summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rwxr-xr-xtools/adbs10
-rw-r--r--tools/apicheck/Android.mk2
-rwxr-xr-xtools/check_radio_versions.py79
-rwxr-xr-xtools/generate-notice-files.py188
-rwxr-xr-xtools/java-event-log-tags.py5
-rwxr-xr-xtools/releasetools/build_image.py145
-rw-r--r--tools/releasetools/common.py43
-rw-r--r--tools/releasetools/edify_generator.py14
-rwxr-xr-xtools/releasetools/img_from_target_files110
-rwxr-xr-xtools/releasetools/ota_from_target_files58
-rw-r--r--tools/zipalign/ZipEntry.cpp68
-rw-r--r--tools/zipalign/ZipFile.cpp96
-rw-r--r--tools/zipalign/ZipFile.h2
13 files changed, 604 insertions, 216 deletions
diff --git a/tools/adbs b/tools/adbs
index 961169b..598af85 100755
--- a/tools/adbs
+++ b/tools/adbs
@@ -137,11 +137,7 @@ def SetupToolsPath():
uname = os.uname()[0]
if uname == "Darwin":
- proc = os.uname()[-1]
- if proc == "i386":
- uname = "darwin-x86"
- else:
- uname = "darwin-ppc"
+ uname = "darwin-x86"
elif uname == "Linux":
uname = "linux-x86"
prefix = "./prebuilts/gcc/" + uname + "/arm/arm-linux-androideabi-4.6/bin/"
@@ -149,8 +145,8 @@ def SetupToolsPath():
if (not os.path.exists(addr2line_cmd)):
try:
- prefix = os.environ['ANDROID_BUILD_TOP'] + "/prebuilts/gcc/" + uname + \
- "/arm/arm-linux-androideabi-4.6/bin/"
+ prefix = os.environ['ANDROID_BUILD_TOP'] + "/prebuilts/gcc/" + \
+ uname + "/arm/arm-linux-androideabi-4.6/bin/"
except:
prefix = "";
diff --git a/tools/apicheck/Android.mk b/tools/apicheck/Android.mk
index 24f697c..1674a17 100644
--- a/tools/apicheck/Android.mk
+++ b/tools/apicheck/Android.mk
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+ifneq ($(TARGET_BUILD_PDK),true)
LOCAL_PATH := $(call my-dir)
# We use copy-file-to-new-target so that the installed
@@ -36,3 +37,4 @@ $(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/etc/apicheck | $(ACP)
$(hide) chmod 755 $@
# Apicheck is now part of Doclava -- See external/doclava.
+endif
diff --git a/tools/check_radio_versions.py b/tools/check_radio_versions.py
new file mode 100755
index 0000000..ebe621f
--- /dev/null
+++ b/tools/check_radio_versions.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import os
+
+try:
+ from hashlib import sha1
+except ImportError:
+ from sha import sha as sha1
+
+if len(sys.argv) < 2:
+ sys.exit(0)
+
+build_info = {}
+f = open(sys.argv[1])
+for line in f:
+ line = line.strip()
+ if line.startswith("require"):
+ key, value = line.split()[1].split("=", 1)
+ build_info[key] = value
+f.close()
+
+bad = False
+
+for item in sys.argv[2:]:
+ key, fn = item.split(":", 1)
+
+ values = build_info.get(key, None)
+ if not values:
+ continue
+ values = values.split("|")
+
+ f = open(fn, "rb")
+ digest = sha1(f.read()).hexdigest()
+ f.close()
+
+ versions = {}
+ try:
+ f = open(fn + ".sha1")
+ except IOError:
+ if not bad: print
+ print "*** Error opening \"%s.sha1\"; can't verify %s" % (fn, key)
+ bad = True
+ continue
+ for line in f:
+ line = line.strip()
+ if not line or line.startswith("#"): continue
+ h, v = line.split()
+ versions[h] = v
+
+ if digest not in versions:
+ if not bad: print
+ print "*** SHA-1 hash of \"%s\" doesn't appear in \"%s.sha1\"" % (fn, fn)
+ bad = True
+ continue
+
+ if versions[digest] not in values:
+ if not bad: print
+ print "*** \"%s\" is version %s; not any %s allowed by \"%s\"." % (
+ fn, versions[digest], key, sys.argv[1])
+ bad = True
+
+if bad:
+ print
+ sys.exit(1)
diff --git a/tools/generate-notice-files.py b/tools/generate-notice-files.py
new file mode 100755
index 0000000..4571b70
--- /dev/null
+++ b/tools/generate-notice-files.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Usage: generate-notice-files [plain text output file] [html output file] [file title] [directory of notices]
+
+Generate the Android notice files, including both text and html files.
+
+-h to display this usage message and exit.
+"""
+from collections import defaultdict
+import getopt
+import hashlib
+import itertools
+import os
+import os.path
+import re
+import sys
+
+MD5_BLOCKSIZE = 1024 * 1024
+HTML_ESCAPE_TABLE = {
+ "&": "&amp;",
+ '"': "&quot;",
+ "'": "&apos;",
+ ">": "&gt;",
+ "<": "&lt;",
+ }
+
+try:
+ opts, args = getopt.getopt(sys.argv[1:], "h")
+except getopt.GetoptError, err:
+ print str(err)
+ print __doc__
+ sys.exit(2)
+
+for o, a in opts:
+ if o == "-h":
+ print __doc__
+ sys.exit(2)
+ else:
+ print >> sys.stderr, "unhandled option %s" % (o,)
+
+if len(args) != 4:
+ print """need exactly four arguments, the two output files, the file title
+ and the directory containing notices, not %d""" % (len(args),)
+ print __doc__
+ sys.exit(1)
+
+def hexify(s):
+ return ("%02x"*len(s)) % tuple(map(ord, s))
+
+def md5sum(filename):
+ """Calculate an MD5 of the file given by FILENAME,
+ and return hex digest as a string.
+ Output should be compatible with md5sum command"""
+
+ f = open(filename, "rb")
+ sum = hashlib.md5()
+ while 1:
+ block = f.read(MD5_BLOCKSIZE)
+ if not block:
+ break
+ sum.update(block)
+ f.close()
+ return hexify(sum.digest())
+
+
+def html_escape(text):
+ """Produce entities within text."""
+ return "".join(HTML_ESCAPE_TABLE.get(c,c) for c in text)
+
+HTML_OUTPUT_CSS="""
+<style type="text/css">
+body { padding: 0; font-family: sans-serif; }
+.same-license { background-color: #eeeeee; border-top: 20px solid white; padding: 10px; }
+.label { font-weight: bold; }
+.file-list { margin-left: 1em; color: blue; }
+</style>
+"""
+
+def combine_notice_files_html(file_hash, input_dir, output_filename):
+ """Combine notice files in FILE_HASH and output a HTML version to OUTPUT_FILENAME."""
+
+ SRC_DIR_STRIP_RE = re.compile(input_dir + "(/.*).txt")
+
+ # Set up a filename to row id table (anchors inside tables don't work in
+ # most browsers, but href's to table row ids do)
+ id_table = {}
+ id_count = 0
+ for value in file_hash.values():
+ for filename in value:
+ id_table[filename] = id_count
+ id_count += 1
+
+ # Open the output file, and output the header pieces
+ output_file = open(output_filename, "wb")
+
+ print >> output_file, "<html><head>"
+ print >> output_file, HTML_OUTPUT_CSS
+ print >> output_file, '</head><body topmargin="0" leftmargin="0" rightmargin="0" bottommargin="0">'
+
+ # Output our table of contents
+ print >> output_file, '<div class="toc">'
+ print >> output_file, "<ul>"
+
+ # Flatten the list of lists into a single list of filenames
+ sorted_filenames = sorted(itertools.chain.from_iterable(file_hash.values()))
+
+ # Print out a nice table of contents
+ for filename in sorted_filenames:
+ stripped_filename = SRC_DIR_STRIP_RE.sub(r"\1", filename)
+ print >> output_file, '<li><a href="#id%d">%s</a></li>' % (id_table.get(filename), stripped_filename)
+
+ print >> output_file, "</ul>"
+ print >> output_file, "</div><!-- table of contents -->"
+ # Output the individual notice file lists
+ print >>output_file, '<table cellpadding="0" cellspacing="0" border="0">'
+ for value in file_hash.values():
+ print >> output_file, '<tr id="id%d"><td class="same-license">' % id_table.get(value[0])
+ print >> output_file, '<div class="label">Notices for file(s):</div>'
+ print >> output_file, '<div class="file-list">'
+ for filename in sorted(value):
+ print >> output_file, "%s <br/>" % (SRC_DIR_STRIP_RE.sub(r"\1", filename))
+ print >> output_file, "</div><!-- file-list -->"
+ print >> output_file
+ print >> output_file, '<pre class="license-text">'
+ print >> output_file, html_escape(open(value[0]).read())
+ print >> output_file, "</pre><!-- license-text -->"
+ print >> output_file, "</td></tr><!-- same-license -->"
+ print >> output_file
+ print >> output_file
+ print >> output_file
+
+ # Finish off the file output
+ print >> output_file, "</table>"
+ print >> output_file, "</body></html>"
+ output_file.close()
+
+def combine_notice_files_text(file_hash, input_dir, output_filename, file_title):
+ """Combine notice files in FILE_HASH and output a text version to OUTPUT_FILENAME."""
+
+ SRC_DIR_STRIP_RE = re.compile(input_dir + "(/.*).txt")
+ output_file = open(output_filename, "wb")
+ print >> output_file, file_title
+ for value in file_hash.values():
+ print >> output_file, "============================================================"
+ print >> output_file, "Notices for file(s):"
+ for filename in sorted(value):
+ print >> output_file, SRC_DIR_STRIP_RE.sub(r"\1", filename)
+ print >> output_file, "------------------------------------------------------------"
+ print >> output_file, open(value[0]).read()
+ output_file.close()
+
+def main(args):
+ txt_output_file = args[0]
+ html_output_file = args[1]
+ file_title = args[2]
+
+ # Find all the notice files and md5 them
+ input_dir = os.path.normpath(args[3])
+ files_with_same_hash = defaultdict(list)
+ for root, dir, files in os.walk(input_dir):
+ for file in files:
+ if file.endswith(".txt"):
+ filename = os.path.join(root, file)
+ file_md5sum = md5sum(filename)
+ files_with_same_hash[file_md5sum].append(filename)
+
+
+ print "Combining NOTICE files into HTML"
+ combine_notice_files_html(files_with_same_hash, input_dir, html_output_file)
+ print "Combining NOTICE files into text"
+ combine_notice_files_text(files_with_same_hash, input_dir, txt_output_file, file_title)
+
+if __name__ == "__main__":
+ main(args)
diff --git a/tools/java-event-log-tags.py b/tools/java-event-log-tags.py
index c63fa20..846d9cf 100755
--- a/tools/java-event-log-tags.py
+++ b/tools/java-event-log-tags.py
@@ -26,6 +26,7 @@ tags in the given input file.
import cStringIO
import getopt
import os
+import os.path
import re
import sys
@@ -144,4 +145,8 @@ for t in tagfile.tags:
buffer.write("}\n");
+output_dir = os.path.dirname(output_file)
+if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+
event_log_tags.WriteOutput(output_file, buffer)
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
new file mode 100755
index 0000000..a615d1a
--- /dev/null
+++ b/tools/releasetools/build_image.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Build image output_image_file from input_directory and properties_file.
+
+Usage: build_image input_directory properties_file output_image_file
+
+"""
+import os
+import subprocess
+import sys
+
+
+def BuildImage(in_dir, prop_dict, out_file):
+ """Build an image to out_file from in_dir with property prop_dict.
+
+ Args:
+ in_dir: path of input directory.
+ prop_dict: property dictionary.
+ out_file: path of the output image file.
+
+ Returns:
+ True iff the image is built successfully.
+ """
+ build_command = []
+ fs_type = prop_dict.get("fs_type", "")
+ if fs_type.startswith("ext"):
+ build_command = ["mkuserimg.sh"]
+ if "extfs_sparse_flag" in prop_dict:
+ build_command.append(prop_dict["extfs_sparse_flag"])
+ build_command.extend([in_dir, out_file, fs_type,
+ prop_dict["mount_point"]])
+ if "partition_size" in prop_dict:
+ build_command.append(prop_dict["partition_size"])
+ if "selinux_fc" in prop_dict:
+ build_command.append(prop_dict["selinux_fc"])
+ else:
+ build_command = ["mkyaffs2image", "-f"]
+ if prop_dict.get("mkyaffs2_extra_flags", None):
+ build_command.extend(prop_dict["mkyaffs2_extra_flags"].split())
+ build_command.append(in_dir)
+ build_command.append(out_file)
+ if "selinux_fc" in prop_dict:
+ build_command.append(prop_dict["selinux_fc"])
+ build_command.append(prop_dict["mount_point"])
+
+ print "Running: ", " ".join(build_command)
+ p = subprocess.Popen(build_command);
+ p.communicate()
+ return p.returncode == 0
+
+
+def ImagePropFromGlobalDict(glob_dict, mount_point):
+ """Build an image property dictionary from the global dictionary.
+
+ Args:
+ glob_dict: the global dictionary from the build system.
+ mount_point: such as "system", "data" etc.
+ """
+ d = {}
+
+ def copy_prop(src_p, dest_p):
+ if src_p in glob_dict:
+ d[dest_p] = str(glob_dict[src_p])
+
+ common_props = (
+ "extfs_sparse_flag",
+ "mkyaffs2_extra_flags",
+ "selinux_fc",
+ )
+ for p in common_props:
+ copy_prop(p, p)
+
+ d["mount_point"] = mount_point
+ if mount_point == "system":
+ copy_prop("fs_type", "fs_type")
+ copy_prop("system_size", "partition_size")
+ elif mount_point == "data":
+ copy_prop("fs_type", "fs_type")
+ copy_prop("userdata_size", "partition_size")
+ elif mount_point == "cache":
+ copy_prop("cache_fs_type", "fs_type")
+ copy_prop("cache_size", "partition_size")
+
+ return d
+
+
+def LoadGlobalDict(filename):
+ """Load "name=value" pairs from filename"""
+ d = {}
+ f = open(filename)
+ for line in f:
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ k, v = line.split("=", 1)
+ d[k] = v
+ f.close()
+ return d
+
+
+def main(argv):
+ if len(argv) != 3:
+ print __doc__
+ sys.exit(1)
+
+ in_dir = argv[0]
+ glob_dict_file = argv[1]
+ out_file = argv[2]
+
+ glob_dict = LoadGlobalDict(glob_dict_file)
+ image_filename = os.path.basename(out_file)
+ mount_point = ""
+ if image_filename == "system.img":
+ mount_point = "system"
+ elif image_filename == "userdata.img":
+ mount_point = "data"
+ elif image_filename == "cache.img":
+ mount_point = "cache"
+ else:
+ print >> sys.stderr, "error: unknown image file name ", image_filename
+ exit(1)
+
+ image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
+ if not BuildImage(in_dir, image_properties, out_file):
+ print >> sys.stderr, "error: failed to build %s from %s" % (out_file, in_dir)
+ exit(1)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 4957354..8196b3c 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -138,6 +138,7 @@ def LoadInfoDict(zip):
makeint("blocksize")
makeint("system_size")
makeint("userdata_size")
+ makeint("cache_size")
makeint("recovery_size")
makeint("boot_size")
@@ -389,24 +390,27 @@ def CheckSize(data, target, info_dict):
if mount_point == "/userdata": mount_point = "/data"
p = info_dict["fstab"][mount_point]
fs_type = p.fs_type
- limit = info_dict.get(p.device + "_size", None)
+ device = p.device
+ if "/" in device:
+ device = device[device.rfind("/")+1:]
+ limit = info_dict.get(device + "_size", None)
if not fs_type or not limit: return
if fs_type == "yaffs2":
# image size should be increased by 1/64th to account for the
# spare area (64 bytes per 2k page)
limit = limit / 2048 * (2048+64)
- size = len(data)
- pct = float(size) * 100.0 / limit
- msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
- if pct >= 99.0:
- raise ExternalError(msg)
- elif pct >= 95.0:
- print
- print " WARNING: ", msg
- print
- elif OPTIONS.verbose:
- print " ", msg
+ size = len(data)
+ pct = float(size) * 100.0 / limit
+ msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
+ if pct >= 99.0:
+ raise ExternalError(msg)
+ elif pct >= 95.0:
+ print
+ print " WARNING: ", msg
+ print
+ elif OPTIONS.verbose:
+ print " ", msg
def ReadApkCerts(tf_zip):
@@ -659,6 +663,10 @@ class DeviceSpecificParams(object):
assertions they like."""
return self._DoCall("FullOTA_Assertions")
+ def FullOTA_InstallBegin(self):
+ """Called at the start of full OTA installation."""
+ return self._DoCall("FullOTA_InstallBegin")
+
def FullOTA_InstallEnd(self):
"""Called at the end of full OTA installation; typically this is
used to install the image for the device's baseband processor."""
@@ -670,12 +678,23 @@ class DeviceSpecificParams(object):
additional assertions they like."""
return self._DoCall("IncrementalOTA_Assertions")
+ def IncrementalOTA_VerifyBegin(self):
+ """Called at the start of the verification phase of incremental
+ OTA installation; additional checks can be placed here to abort
+ the script before any changes are made."""
+ return self._DoCall("IncrementalOTA_VerifyBegin")
+
def IncrementalOTA_VerifyEnd(self):
"""Called at the end of the verification phase of incremental OTA
installation; additional checks can be placed here to abort the
script before any changes are made."""
return self._DoCall("IncrementalOTA_VerifyEnd")
+ def IncrementalOTA_InstallBegin(self):
+ """Called at the start of incremental OTA installation (after
+ verification is complete)."""
+ return self._DoCall("IncrementalOTA_InstallBegin")
+
def IncrementalOTA_InstallEnd(self):
"""Called at the end of incremental OTA installation; typically
this is used to install the image for the device's baseband
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 12bb48b..5672b5a 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -228,20 +228,6 @@ class EdifyGenerator(object):
",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
self.script.append(self._WordWrap(cmd))
- def RetouchBinaries(self, file_list):
- """Execute the retouch instructions in files listed."""
- cmd = ('retouch_binaries(' +
- ', '.join(['"' + i[0] + '", "' + i[1] + '"' for i in file_list]) +
- ');')
- self.script.append(self._WordWrap(cmd))
-
- def UndoRetouchBinaries(self, file_list):
- """Undo the retouching (retouch to zero offset)."""
- cmd = ('undo_retouch_binaries(' +
- ', '.join(['"' + i[0] + '", "' + i[1] + '"' for i in file_list]) +
- ');')
- self.script.append(self._WordWrap(cmd))
-
def AppendExtra(self, extra):
"""Append text verbatim to the output script."""
self.script.append(extra)
diff --git a/tools/releasetools/img_from_target_files b/tools/releasetools/img_from_target_files
index ad03398..002e6e6 100755
--- a/tools/releasetools/img_from_target_files
+++ b/tools/releasetools/img_from_target_files
@@ -27,10 +27,6 @@ Usage: img_from_target_files [flags] input_target_files output_image_zip
Include only the bootable images (eg 'boot' and 'recovery') in
the output.
- -S (--file_context) <file>
- the file contexts configuration used to assign SELinux file
- context attributes.
-
"""
import sys
@@ -51,10 +47,10 @@ import zipfile
if not hasattr(os, "SEEK_SET"):
os.SEEK_SET = 0
+import build_image
import common
OPTIONS = common.OPTIONS
-OPTIONS.selinux_fc = None
def AddUserdata(output_zip):
"""Create an empty userdata image and store it in output_zip."""
@@ -69,32 +65,13 @@ def AddUserdata(output_zip):
os.mkdir(user_dir)
img = tempfile.NamedTemporaryFile()
- build_command = []
+ image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict,
+ "data")
fstab = OPTIONS.info_dict["fstab"]
- if fstab and fstab["/data"].fs_type.startswith("ext"):
- build_command = ["mkuserimg.sh"]
- if "extfs_sparse_flag" in OPTIONS.info_dict:
- build_command.append(OPTIONS.info_dict["extfs_sparse_flag"])
- build_command.extend([user_dir, img.name,
- fstab["/data"].fs_type, "data"])
- if "userdata_size" in OPTIONS.info_dict:
- build_command.append(str(OPTIONS.info_dict["userdata_size"]))
- if OPTIONS.selinux_fc is not None:
- build_command.append(OPTIONS.selinux_fc)
- else:
- build_command = ["mkyaffs2image", "-f"]
- extra = OPTIONS.info_dict.get("mkyaffs2_extra_flags", None)
- if extra:
- build_command.extend(extra.split())
- build_command.append(user_dir)
- build_command.append(img.name)
- if OPTIONS.selinux_fc is not None:
- build_command.append(OPTIONS.selinux_fc)
- build_command.append("/data")
-
- p = common.Run(build_command);
- p.communicate()
- assert p.returncode == 0, "build userdata.img image failed"
+ if fstab:
+ image_props["fs_type" ] = fstab["/data"].fs_type
+ succ = build_image.BuildImage(user_dir, image_props, img.name)
+ assert succ, "build userdata.img image failed"
common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict)
output_zip.write(img.name, "userdata.img")
@@ -103,6 +80,38 @@ def AddUserdata(output_zip):
os.rmdir(temp_dir)
+def AddCache(output_zip):
+ """Create an empty cache image and store it in output_zip."""
+
+ image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict,
+ "cache")
+ # The build system has to explicitly request for cache.img.
+ if "fs_type" not in image_props:
+ return
+
+ print "creating cache.img..."
+
+ # The name of the directory it is making an image out of matters to
+ # mkyaffs2image. So we create a temp dir, and within it we create an
+ # empty dir named "cache", and build the image from that.
+ temp_dir = tempfile.mkdtemp()
+ user_dir = os.path.join(temp_dir, "cache")
+ os.mkdir(user_dir)
+ img = tempfile.NamedTemporaryFile()
+
+ fstab = OPTIONS.info_dict["fstab"]
+ if fstab:
+ image_props["fs_type" ] = fstab["/cache"].fs_type
+ succ = build_image.BuildImage(user_dir, image_props, img.name)
+ assert succ, "build cache.img image failed"
+
+ common.CheckSize(img.name, "cache.img", OPTIONS.info_dict)
+ output_zip.write(img.name, "cache.img")
+ img.close()
+ os.rmdir(user_dir)
+ os.rmdir(temp_dir)
+
+
def AddSystem(output_zip):
"""Turn the contents of SYSTEM into a system image and store it in
output_zip."""
@@ -125,33 +134,14 @@ def AddSystem(output_zip):
if (e.errno == errno.EEXIST):
pass
- build_command = []
+ image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict,
+ "system")
fstab = OPTIONS.info_dict["fstab"]
- if fstab and fstab["/system"].fs_type.startswith("ext"):
-
- build_command = ["mkuserimg.sh"]
- if "extfs_sparse_flag" in OPTIONS.info_dict:
- build_command.append(OPTIONS.info_dict["extfs_sparse_flag"])
- build_command.extend([os.path.join(OPTIONS.input_tmp, "system"), img.name,
- fstab["/system"].fs_type, "system"])
- if "system_size" in OPTIONS.info_dict:
- build_command.append(str(OPTIONS.info_dict["system_size"]))
- if OPTIONS.selinux_fc is not None:
- build_command.append(OPTIONS.selinux_fc)
- else:
- build_command = ["mkyaffs2image", "-f"]
- extra = OPTIONS.info_dict.get("mkyaffs2_extra_flags", None)
- if extra:
- build_command.extend(extra.split())
- build_command.append(os.path.join(OPTIONS.input_tmp, "system"))
- build_command.append(img.name)
- if OPTIONS.selinux_fc is not None:
- build_command.append(OPTIONS.selinux_fc)
- build_command.append("/system")
-
- p = common.Run(build_command)
- p.communicate()
- assert p.returncode == 0, "build system.img image failed"
+ if fstab:
+ image_props["fs_type" ] = fstab["/system"].fs_type
+ succ = build_image.BuildImage(os.path.join(OPTIONS.input_tmp, "system"),
+ image_props, img.name)
+ assert succ, "build system.img image failed"
img.seek(os.SEEK_SET, 0)
data = img.read()
@@ -175,17 +165,14 @@ def main(argv):
pass # deprecated
if o in ("-z", "--bootable_zip"):
bootable_only[0] = True
- if o in ("-S", "--file_context"):
- OPTIONS.selinux_fc = a
else:
return False
return True
args = common.ParseOptions(argv, __doc__,
- extra_opts="b:zS:",
+ extra_opts="b:z",
extra_long_opts=["board_config=",
- "bootable_zip",
- "file_context="],
+ "bootable_zip"],
extra_option_handler=option_handler)
bootable_only = bootable_only[0]
@@ -208,6 +195,7 @@ def main(argv):
if not bootable_only:
AddSystem(output_zip)
AddUserdata(output_zip)
+ AddCache(output_zip)
CopyInfo(output_zip)
print "cleaning up..."
diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files
index f838c22..3dcfbee 100755
--- a/tools/releasetools/ota_from_target_files
+++ b/tools/releasetools/ota_from_target_files
@@ -52,10 +52,6 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package
-a (--aslr_mode) <on|off>
Specify whether to turn on ASLR for the package (on by default).
- -S (--file_context) <file>
- the file contexts configuration used to assign SELinux file
- context attributes
-
"""
import sys
@@ -92,7 +88,6 @@ OPTIONS.omit_prereq = False
OPTIONS.extra_script = None
OPTIONS.aslr_mode = True
OPTIONS.worker_threads = 3
-OPTIONS.selinux_fc = None
def MostPopularKey(d, default):
"""Given a dict, return the key corresponding to the largest
@@ -266,15 +261,13 @@ def CopySystemFiles(input_zip, output_zip=None,
substitute=None):
"""Copies files underneath system/ in the input zip to the output
zip. Populates the Item class with their metadata, and returns a
- list of symlinks as well as a list of files that will be retouched.
- output_zip may be None, in which case the copy is skipped (but the
- other side effects still happen). substitute is an optional dict
- of {output filename: contents} to be output instead of certain input
- files.
+ list of symlinks. output_zip may be None, in which case the copy is
+ skipped (but the other side effects still happen). substitute is an
+ optional dict of {output filename: contents} to be output instead of
+ certain input files.
"""
symlinks = []
- retouch_files = []
for info in input_zip.infolist():
if info.filename.startswith("SYSTEM/"):
@@ -292,9 +285,6 @@ def CopySystemFiles(input_zip, output_zip=None,
data = substitute[fn]
else:
data = input_zip.read(info.filename)
- if info.filename.startswith("SYSTEM/lib/") and IsRegular(info):
- retouch_files.append(("/system/" + basefilename,
- common.sha1(data).hexdigest()))
output_zip.writestr(info2, data)
if fn.endswith("/"):
Item.Get(fn[:-1], dir=True)
@@ -302,7 +292,7 @@ def CopySystemFiles(input_zip, output_zip=None,
Item.Get(fn, dir=False)
symlinks.sort()
- return (symlinks, retouch_files)
+ return symlinks
def SignOutput(temp_zip_name, output_zip_name):
@@ -388,26 +378,23 @@ def WriteFullOTAPackage(input_zip, output_zip):
AppendAssertions(script, input_zip)
device_specific.FullOTA_Assertions()
+ device_specific.FullOTA_InstallBegin()
script.ShowProgress(0.5, 0)
if OPTIONS.wipe_user_data:
script.FormatPartition("/data")
- if OPTIONS.selinux_fc is not None:
- WritePolicyConfig(OPTIONS.selinux_fc, output_zip)
+ if "selinux_fc" in OPTIONS.info_dict:
+ WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip)
script.FormatPartition("/system")
script.Mount("/system")
script.UnpackPackageDir("recovery", "/system")
script.UnpackPackageDir("system", "/system")
- (symlinks, retouch_files) = CopySystemFiles(input_zip, output_zip)
+ symlinks = CopySystemFiles(input_zip, output_zip)
script.MakeSymlinks(symlinks)
- if OPTIONS.aslr_mode:
- script.RetouchBinaries(retouch_files)
- else:
- script.UndoRetouchBinaries(retouch_files)
boot_img = common.GetBootableImage("boot.img", "boot.img",
OPTIONS.input_tmp, "BOOT")
@@ -450,17 +437,13 @@ def LoadSystemFiles(z):
"""Load all the files from SYSTEM/... in a given target-files
ZipFile, and return a dict of {filename: File object}."""
out = {}
- retouch_files = []
for info in z.infolist():
if info.filename.startswith("SYSTEM/") and not IsSymlink(info):
basefilename = info.filename[7:]
fn = "system/" + basefilename
data = z.read(info.filename)
out[fn] = common.File(fn, data)
- if info.filename.startswith("SYSTEM/lib/") and IsRegular(info):
- retouch_files.append(("/system/" + basefilename,
- out[fn].sha1))
- return (out, retouch_files)
+ return out
def GetBuildProp(property, z):
@@ -499,9 +482,9 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
info_dict=OPTIONS.info_dict)
print "Loading target..."
- (target_data, target_retouch_files) = LoadSystemFiles(target_zip)
+ target_data = LoadSystemFiles(target_zip)
print "Loading source..."
- (source_data, source_retouch_files) = LoadSystemFiles(source_zip)
+ source_data = LoadSystemFiles(source_zip)
verbatim_targets = []
patch_list = []
@@ -570,6 +553,8 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
script.Print("Verifying current system...")
+ device_specific.IncrementalOTA_VerifyBegin()
+
script.ShowProgress(0.1, 0)
total_verify_size = float(sum([i[2].size for i in patch_list]) + 1)
if updating_boot:
@@ -605,6 +590,8 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
script.Comment("---- start making changes here ----")
+ device_specific.IncrementalOTA_InstallBegin()
+
if OPTIONS.wipe_user_data:
script.Print("Erasing user data...")
script.FormatPartition("/data")
@@ -671,7 +658,7 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
script.ShowProgress(0.1, 10)
- (target_symlinks, target_retouch_dummies) = CopySystemFiles(target_zip, None)
+ target_symlinks = CopySystemFiles(target_zip, None)
target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
temp_script = script.MakeTemporary()
@@ -680,7 +667,7 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
# Note that this call will mess up the tree of Items, so make sure
# we're done with it.
- (source_symlinks, source_retouch_dummies) = CopySystemFiles(source_zip, None)
+ source_symlinks = CopySystemFiles(source_zip, None)
source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks])
# Delete all the symlinks in source that aren't in target. This
@@ -714,10 +701,6 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
to_create.append((dest, link))
script.DeleteFiles([i[1] for i in to_create])
script.MakeSymlinks(to_create)
- if OPTIONS.aslr_mode:
- script.RetouchBinaries(target_retouch_files)
- else:
- script.UndoRetouchBinaries(target_retouch_files)
# Now that the symlinks are created, we can set all the
# permissions.
@@ -764,14 +747,12 @@ def main(argv):
OPTIONS.aslr_mode = False
elif o in ("--worker_threads"):
OPTIONS.worker_threads = int(a)
- elif o in ("-S", "--file_context"):
- OPTIONS.selinux_fc = a
else:
return False
return True
args = common.ParseOptions(argv, __doc__,
- extra_opts="b:k:i:d:wne:a:S:",
+ extra_opts="b:k:i:d:wne:a:",
extra_long_opts=["board_config=",
"package_key=",
"incremental_from=",
@@ -780,7 +761,6 @@ def main(argv):
"extra_script=",
"worker_threads=",
"aslr_mode=",
- "file_context=",
],
extra_option_handler=option_handler)
diff --git a/tools/zipalign/ZipEntry.cpp b/tools/zipalign/ZipEntry.cpp
index bed0333..d4d366d 100644
--- a/tools/zipalign/ZipEntry.cpp
+++ b/tools/zipalign/ZipEntry.cpp
@@ -42,12 +42,12 @@ status_t ZipEntry::initFromCDE(FILE* fp)
long posn;
bool hasDD;
- //LOGV("initFromCDE ---\n");
+ //ALOGV("initFromCDE ---\n");
/* read the CDE */
result = mCDE.read(fp);
if (result != NO_ERROR) {
- LOGD("mCDE.read failed\n");
+ ALOGD("mCDE.read failed\n");
return result;
}
@@ -56,14 +56,14 @@ status_t ZipEntry::initFromCDE(FILE* fp)
/* using the info in the CDE, go load up the LFH */
posn = ftell(fp);
if (fseek(fp, mCDE.mLocalHeaderRelOffset, SEEK_SET) != 0) {
- LOGD("local header seek failed (%ld)\n",
+ ALOGD("local header seek failed (%ld)\n",
mCDE.mLocalHeaderRelOffset);
return UNKNOWN_ERROR;
}
result = mLFH.read(fp);
if (result != NO_ERROR) {
- LOGD("mLFH.read failed\n");
+ ALOGD("mLFH.read failed\n");
return result;
}
@@ -81,7 +81,7 @@ status_t ZipEntry::initFromCDE(FILE* fp)
hasDD = (mLFH.mGPBitFlag & kUsesDataDescr) != 0;
if (hasDD) {
// do something clever
- //LOGD("+++ has data descriptor\n");
+ //ALOGD("+++ has data descriptor\n");
}
/*
@@ -90,7 +90,7 @@ status_t ZipEntry::initFromCDE(FILE* fp)
* prefer the CDE values.)
*/
if (!hasDD && !compareHeaders()) {
- LOGW("WARNING: header mismatch\n");
+ ALOGW("WARNING: header mismatch\n");
// keep going?
}
@@ -200,7 +200,7 @@ status_t ZipEntry::addPadding(int padding)
if (padding <= 0)
return INVALID_OPERATION;
- //LOGI("HEY: adding %d pad bytes to existing %d in %s\n",
+ //ALOGI("HEY: adding %d pad bytes to existing %d in %s\n",
// padding, mLFH.mExtraFieldLength, mCDE.mFileName);
if (mLFH.mExtraFieldLength > 0) {
@@ -280,50 +280,50 @@ void ZipEntry::setDataInfo(long uncompLen, long compLen, unsigned long crc32,
bool ZipEntry::compareHeaders(void) const
{
if (mCDE.mVersionToExtract != mLFH.mVersionToExtract) {
- LOGV("cmp: VersionToExtract\n");
+ ALOGV("cmp: VersionToExtract\n");
return false;
}
if (mCDE.mGPBitFlag != mLFH.mGPBitFlag) {
- LOGV("cmp: GPBitFlag\n");
+ ALOGV("cmp: GPBitFlag\n");
return false;
}
if (mCDE.mCompressionMethod != mLFH.mCompressionMethod) {
- LOGV("cmp: CompressionMethod\n");
+ ALOGV("cmp: CompressionMethod\n");
return false;
}
if (mCDE.mLastModFileTime != mLFH.mLastModFileTime) {
- LOGV("cmp: LastModFileTime\n");
+ ALOGV("cmp: LastModFileTime\n");
return false;
}
if (mCDE.mLastModFileDate != mLFH.mLastModFileDate) {
- LOGV("cmp: LastModFileDate\n");
+ ALOGV("cmp: LastModFileDate\n");
return false;
}
if (mCDE.mCRC32 != mLFH.mCRC32) {
- LOGV("cmp: CRC32\n");
+ ALOGV("cmp: CRC32\n");
return false;
}
if (mCDE.mCompressedSize != mLFH.mCompressedSize) {
- LOGV("cmp: CompressedSize\n");
+ ALOGV("cmp: CompressedSize\n");
return false;
}
if (mCDE.mUncompressedSize != mLFH.mUncompressedSize) {
- LOGV("cmp: UncompressedSize\n");
+ ALOGV("cmp: UncompressedSize\n");
return false;
}
if (mCDE.mFileNameLength != mLFH.mFileNameLength) {
- LOGV("cmp: FileNameLength\n");
+ ALOGV("cmp: FileNameLength\n");
return false;
}
#if 0 // this seems to be used for padding, not real data
if (mCDE.mExtraFieldLength != mLFH.mExtraFieldLength) {
- LOGV("cmp: ExtraFieldLength\n");
+ ALOGV("cmp: ExtraFieldLength\n");
return false;
}
#endif
if (mCDE.mFileName != NULL) {
if (strcmp((char*) mCDE.mFileName, (char*) mLFH.mFileName) != 0) {
- LOGV("cmp: FileName\n");
+ ALOGV("cmp: FileName\n");
return false;
}
}
@@ -413,7 +413,7 @@ status_t ZipEntry::LocalFileHeader::read(FILE* fp)
}
if (ZipEntry::getLongLE(&buf[0x00]) != kSignature) {
- LOGD("whoops: didn't find expected signature\n");
+ ALOGD("whoops: didn't find expected signature\n");
result = UNKNOWN_ERROR;
goto bail;
}
@@ -506,17 +506,17 @@ status_t ZipEntry::LocalFileHeader::write(FILE* fp)
*/
void ZipEntry::LocalFileHeader::dump(void) const
{
- LOGD(" LocalFileHeader contents:\n");
- LOGD(" versToExt=%u gpBits=0x%04x compression=%u\n",
+ ALOGD(" LocalFileHeader contents:\n");
+ ALOGD(" versToExt=%u gpBits=0x%04x compression=%u\n",
mVersionToExtract, mGPBitFlag, mCompressionMethod);
- LOGD(" modTime=0x%04x modDate=0x%04x crc32=0x%08lx\n",
+ ALOGD(" modTime=0x%04x modDate=0x%04x crc32=0x%08lx\n",
mLastModFileTime, mLastModFileDate, mCRC32);
- LOGD(" compressedSize=%lu uncompressedSize=%lu\n",
+ ALOGD(" compressedSize=%lu uncompressedSize=%lu\n",
mCompressedSize, mUncompressedSize);
- LOGD(" filenameLen=%u extraLen=%u\n",
+ ALOGD(" filenameLen=%u extraLen=%u\n",
mFileNameLength, mExtraFieldLength);
if (mFileName != NULL)
- LOGD(" filename: '%s'\n", mFileName);
+ ALOGD(" filename: '%s'\n", mFileName);
}
@@ -549,7 +549,7 @@ status_t ZipEntry::CentralDirEntry::read(FILE* fp)
}
if (ZipEntry::getLongLE(&buf[0x00]) != kSignature) {
- LOGD("Whoops: didn't find expected signature\n");
+ ALOGD("Whoops: didn't find expected signature\n");
result = UNKNOWN_ERROR;
goto bail;
}
@@ -675,22 +675,22 @@ status_t ZipEntry::CentralDirEntry::write(FILE* fp)
*/
void ZipEntry::CentralDirEntry::dump(void) const
{
- LOGD(" CentralDirEntry contents:\n");
- LOGD(" versMadeBy=%u versToExt=%u gpBits=0x%04x compression=%u\n",
+ ALOGD(" CentralDirEntry contents:\n");
+ ALOGD(" versMadeBy=%u versToExt=%u gpBits=0x%04x compression=%u\n",
mVersionMadeBy, mVersionToExtract, mGPBitFlag, mCompressionMethod);
- LOGD(" modTime=0x%04x modDate=0x%04x crc32=0x%08lx\n",
+ ALOGD(" modTime=0x%04x modDate=0x%04x crc32=0x%08lx\n",
mLastModFileTime, mLastModFileDate, mCRC32);
- LOGD(" compressedSize=%lu uncompressedSize=%lu\n",
+ ALOGD(" compressedSize=%lu uncompressedSize=%lu\n",
mCompressedSize, mUncompressedSize);
- LOGD(" filenameLen=%u extraLen=%u commentLen=%u\n",
+ ALOGD(" filenameLen=%u extraLen=%u commentLen=%u\n",
mFileNameLength, mExtraFieldLength, mFileCommentLength);
- LOGD(" diskNumStart=%u intAttr=0x%04x extAttr=0x%08lx relOffset=%lu\n",
+ ALOGD(" diskNumStart=%u intAttr=0x%04x extAttr=0x%08lx relOffset=%lu\n",
mDiskNumberStart, mInternalAttrs, mExternalAttrs,
mLocalHeaderRelOffset);
if (mFileName != NULL)
- LOGD(" filename: '%s'\n", mFileName);
+ ALOGD(" filename: '%s'\n", mFileName);
if (mFileComment != NULL)
- LOGD(" comment: '%s'\n", mFileComment);
+ ALOGD(" comment: '%s'\n", mFileComment);
}
diff --git a/tools/zipalign/ZipFile.cpp b/tools/zipalign/ZipFile.cpp
index 62c9383..3994c31 100644
--- a/tools/zipalign/ZipFile.cpp
+++ b/tools/zipalign/ZipFile.cpp
@@ -20,8 +20,8 @@
#define LOG_TAG "zip"
-#include <utils/ZipUtils.h>
#include <utils/Log.h>
+#include <utils/ZipUtils.h>
#include "ZipFile.h"
@@ -78,7 +78,7 @@ status_t ZipFile::open(const char* zipFileName, int flags)
newArchive = (access(zipFileName, F_OK) != 0);
if (!(flags & kOpenCreate) && newArchive) {
/* not creating, must already exist */
- LOGD("File %s does not exist", zipFileName);
+ ALOGD("File %s does not exist", zipFileName);
return NAME_NOT_FOUND;
}
}
@@ -96,7 +96,7 @@ status_t ZipFile::open(const char* zipFileName, int flags)
mZipFp = fopen(zipFileName, openflags);
if (mZipFp == NULL) {
int err = errno;
- LOGD("fopen failed: %d\n", err);
+ ALOGD("fopen failed: %d\n", err);
return errnoToStatus(err);
}
@@ -215,14 +215,14 @@ status_t ZipFile::readCentralDir(void)
/* too small to be a ZIP archive? */
if (fileLength < EndOfCentralDir::kEOCDLen) {
- LOGD("Length is %ld -- too small\n", (long)fileLength);
+ ALOGD("Length is %ld -- too small\n", (long)fileLength);
result = INVALID_OPERATION;
goto bail;
}
buf = new unsigned char[EndOfCentralDir::kMaxEOCDSearch];
if (buf == NULL) {
- LOGD("Failure allocating %d bytes for EOCD search",
+ ALOGD("Failure allocating %d bytes for EOCD search",
EndOfCentralDir::kMaxEOCDSearch);
result = NO_MEMORY;
goto bail;
@@ -236,14 +236,14 @@ status_t ZipFile::readCentralDir(void)
readAmount = (long) fileLength;
}
if (fseek(mZipFp, seekStart, SEEK_SET) != 0) {
- LOGD("Failure seeking to end of zip at %ld", (long) seekStart);
+ ALOGD("Failure seeking to end of zip at %ld", (long) seekStart);
result = UNKNOWN_ERROR;
goto bail;
}
/* read the last part of the file into the buffer */
if (fread(buf, 1, readAmount, mZipFp) != (size_t) readAmount) {
- LOGD("short file? wanted %ld\n", readAmount);
+ ALOGD("short file? wanted %ld\n", readAmount);
result = UNKNOWN_ERROR;
goto bail;
}
@@ -253,12 +253,12 @@ status_t ZipFile::readCentralDir(void)
if (buf[i] == 0x50 &&
ZipEntry::getLongLE(&buf[i]) == EndOfCentralDir::kSignature)
{
- LOGV("+++ Found EOCD at buf+%d\n", i);
+ ALOGV("+++ Found EOCD at buf+%d\n", i);
break;
}
}
if (i < 0) {
- LOGD("EOCD not found, not Zip\n");
+ ALOGD("EOCD not found, not Zip\n");
result = INVALID_OPERATION;
goto bail;
}
@@ -266,7 +266,7 @@ status_t ZipFile::readCentralDir(void)
/* extract eocd values */
result = mEOCD.readBuf(buf + i, readAmount - i);
if (result != NO_ERROR) {
- LOGD("Failure reading %ld bytes of EOCD values", readAmount - i);
+ ALOGD("Failure reading %ld bytes of EOCD values", readAmount - i);
goto bail;
}
//mEOCD.dump();
@@ -274,7 +274,7 @@ status_t ZipFile::readCentralDir(void)
if (mEOCD.mDiskNumber != 0 || mEOCD.mDiskWithCentralDir != 0 ||
mEOCD.mNumEntries != mEOCD.mTotalNumEntries)
{
- LOGD("Archive spanning not supported\n");
+ ALOGD("Archive spanning not supported\n");
result = INVALID_OPERATION;
goto bail;
}
@@ -294,7 +294,7 @@ status_t ZipFile::readCentralDir(void)
* we're hoping to preserve.
*/
if (fseek(mZipFp, mEOCD.mCentralDirOffset, SEEK_SET) != 0) {
- LOGD("Failure seeking to central dir offset %ld\n",
+ ALOGD("Failure seeking to central dir offset %ld\n",
mEOCD.mCentralDirOffset);
result = UNKNOWN_ERROR;
goto bail;
@@ -303,14 +303,14 @@ status_t ZipFile::readCentralDir(void)
/*
* Loop through and read the central dir entries.
*/
- LOGV("Scanning %d entries...\n", mEOCD.mTotalNumEntries);
+ ALOGV("Scanning %d entries...\n", mEOCD.mTotalNumEntries);
int entry;
for (entry = 0; entry < mEOCD.mTotalNumEntries; entry++) {
ZipEntry* pEntry = new ZipEntry;
result = pEntry->initFromCDE(mZipFp);
if (result != NO_ERROR) {
- LOGD("initFromCDE failed\n");
+ ALOGD("initFromCDE failed\n");
delete pEntry;
goto bail;
}
@@ -325,16 +325,16 @@ status_t ZipFile::readCentralDir(void)
{
unsigned char checkBuf[4];
if (fread(checkBuf, 1, 4, mZipFp) != 4) {
- LOGD("EOCD check read failed\n");
+ ALOGD("EOCD check read failed\n");
result = INVALID_OPERATION;
goto bail;
}
if (ZipEntry::getLongLE(checkBuf) != EndOfCentralDir::kSignature) {
- LOGD("EOCD read check failed\n");
+ ALOGD("EOCD read check failed\n");
result = UNKNOWN_ERROR;
goto bail;
}
- LOGV("+++ EOCD read check passed\n");
+ ALOGV("+++ EOCD read check passed\n");
}
bail:
@@ -416,7 +416,7 @@ status_t ZipFile::addCommon(const char* fileName, const void* data, size_t size,
bool failed = false;
result = compressFpToFp(mZipFp, inputFp, data, size, &crc);
if (result != NO_ERROR) {
- LOGD("compression failed, storing\n");
+ ALOGD("compression failed, storing\n");
failed = true;
} else {
/*
@@ -427,7 +427,7 @@ status_t ZipFile::addCommon(const char* fileName, const void* data, size_t size,
long src = inputFp ? ftell(inputFp) : size;
long dst = ftell(mZipFp) - startPosn;
if (dst + (dst / 10) > src) {
- LOGD("insufficient compression (src=%ld dst=%ld), storing\n",
+ ALOGD("insufficient compression (src=%ld dst=%ld), storing\n",
src, dst);
failed = true;
}
@@ -449,7 +449,7 @@ status_t ZipFile::addCommon(const char* fileName, const void* data, size_t size,
}
if (result != NO_ERROR) {
// don't need to truncate; happens in CDE rewrite
- LOGD("failed copying data in\n");
+ ALOGD("failed copying data in\n");
goto bail;
}
}
@@ -468,14 +468,14 @@ status_t ZipFile::addCommon(const char* fileName, const void* data, size_t size,
scanResult = ZipUtils::examineGzip(inputFp, &method, &uncompressedLen,
&compressedLen, &crc);
if (!scanResult || method != ZipEntry::kCompressDeflated) {
- LOGD("this isn't a deflated gzip file?");
+ ALOGD("this isn't a deflated gzip file?");
result = UNKNOWN_ERROR;
goto bail;
}
result = copyPartialFpToFp(mZipFp, inputFp, compressedLen, NULL);
if (result != NO_ERROR) {
- LOGD("failed copying gzip data in\n");
+ ALOGD("failed copying gzip data in\n");
goto bail;
}
} else {
@@ -603,7 +603,7 @@ status_t ZipFile::add(const ZipFile* pSourceZip, const ZipEntry* pSourceEntry,
if (copyPartialFpToFp(mZipFp, pSourceZip->mZipFp, copyLen, NULL)
!= NO_ERROR)
{
- LOGW("copy of '%s' failed\n", pEntry->mCDE.mFileName);
+ ALOGW("copy of '%s' failed\n", pEntry->mCDE.mFileName);
result = UNKNOWN_ERROR;
goto bail;
}
@@ -660,7 +660,7 @@ status_t ZipFile::copyFpToFp(FILE* dstFp, FILE* srcFp, unsigned long* pCRC32)
*pCRC32 = crc32(*pCRC32, tmpBuf, count);
if (fwrite(tmpBuf, 1, count, dstFp) != count) {
- LOGD("fwrite %d bytes failed\n", (int) count);
+ ALOGD("fwrite %d bytes failed\n", (int) count);
return UNKNOWN_ERROR;
}
}
@@ -682,7 +682,7 @@ status_t ZipFile::copyDataToFp(FILE* dstFp,
if (size > 0) {
*pCRC32 = crc32(*pCRC32, (const unsigned char*)data, size);
if (fwrite(data, 1, size, dstFp) != size) {
- LOGD("fwrite %d bytes failed\n", (int) size);
+ ALOGD("fwrite %d bytes failed\n", (int) size);
return UNKNOWN_ERROR;
}
}
@@ -716,7 +716,7 @@ status_t ZipFile::copyPartialFpToFp(FILE* dstFp, FILE* srcFp, long length,
count = fread(tmpBuf, 1, readSize, srcFp);
if ((long) count != readSize) { // error or unexpected EOF
- LOGD("fread %d bytes failed\n", (int) readSize);
+ ALOGD("fread %d bytes failed\n", (int) readSize);
return UNKNOWN_ERROR;
}
@@ -724,7 +724,7 @@ status_t ZipFile::copyPartialFpToFp(FILE* dstFp, FILE* srcFp, long length,
*pCRC32 = crc32(*pCRC32, tmpBuf, count);
if (fwrite(tmpBuf, 1, count, dstFp) != count) {
- LOGD("fwrite %d bytes failed\n", (int) count);
+ ALOGD("fwrite %d bytes failed\n", (int) count);
return UNKNOWN_ERROR;
}
@@ -780,10 +780,10 @@ status_t ZipFile::compressFpToFp(FILE* dstFp, FILE* srcFp,
if (zerr != Z_OK) {
result = UNKNOWN_ERROR;
if (zerr == Z_VERSION_ERROR) {
- LOGE("Installed zlib is not compatible with linked version (%s)\n",
+ ALOGE("Installed zlib is not compatible with linked version (%s)\n",
ZLIB_VERSION);
} else {
- LOGD("Call to deflateInit2 failed (zerr=%d)\n", zerr);
+ ALOGD("Call to deflateInit2 failed (zerr=%d)\n", zerr);
}
goto bail;
}
@@ -799,7 +799,7 @@ status_t ZipFile::compressFpToFp(FILE* dstFp, FILE* srcFp,
/* only read if the input buffer is empty */
if (zstream.avail_in == 0 && !atEof) {
- LOGV("+++ reading %d bytes\n", (int)kBufSize);
+ ALOGV("+++ reading %d bytes\n", (int)kBufSize);
if (data) {
getSize = size > kBufSize ? kBufSize : size;
memcpy(inBuf, data, getSize);
@@ -808,12 +808,12 @@ status_t ZipFile::compressFpToFp(FILE* dstFp, FILE* srcFp,
} else {
getSize = fread(inBuf, 1, kBufSize, srcFp);
if (ferror(srcFp)) {
- LOGD("deflate read failed (errno=%d)\n", errno);
+ ALOGD("deflate read failed (errno=%d)\n", errno);
goto z_bail;
}
}
if (getSize < kBufSize) {
- LOGV("+++ got %d bytes, EOF reached\n",
+ ALOGV("+++ got %d bytes, EOF reached\n",
(int)getSize);
atEof = true;
}
@@ -831,7 +831,7 @@ status_t ZipFile::compressFpToFp(FILE* dstFp, FILE* srcFp,
zerr = deflate(&zstream, flush);
if (zerr != Z_OK && zerr != Z_STREAM_END) {
- LOGD("zlib deflate call failed (zerr=%d)\n", zerr);
+ ALOGD("zlib deflate call failed (zerr=%d)\n", zerr);
result = UNKNOWN_ERROR;
goto z_bail;
}
@@ -840,11 +840,11 @@ status_t ZipFile::compressFpToFp(FILE* dstFp, FILE* srcFp,
if (zstream.avail_out == 0 ||
(zerr == Z_STREAM_END && zstream.avail_out != (uInt) kBufSize))
{
- LOGV("+++ writing %d bytes\n", (int) (zstream.next_out - outBuf));
+ ALOGV("+++ writing %d bytes\n", (int) (zstream.next_out - outBuf));
if (fwrite(outBuf, 1, zstream.next_out - outBuf, dstFp) !=
(size_t)(zstream.next_out - outBuf))
{
- LOGD("write %d failed in deflate\n",
+ ALOGD("write %d failed in deflate\n",
(int) (zstream.next_out - outBuf));
goto z_bail;
}
@@ -931,7 +931,7 @@ status_t ZipFile::flush(void)
* of wasted space at the end of the file. Remove it now.
*/
if (ftruncate(fileno(mZipFp), ftell(mZipFp)) != 0) {
- LOGW("ftruncate failed %ld: %s\n", ftell(mZipFp), strerror(errno));
+ ALOGW("ftruncate failed %ld: %s\n", ftell(mZipFp), strerror(errno));
// not fatal
}
@@ -1019,7 +1019,7 @@ status_t ZipFile::crunchArchive(void)
pEntry->getLFHOffset(), span);
if (result != NO_ERROR) {
/* this is why you use a temp file */
- LOGE("error during crunch - archive is toast\n");
+ ALOGE("error during crunch - archive is toast\n");
return result;
}
@@ -1061,23 +1061,23 @@ status_t ZipFile::filemove(FILE* fp, off_t dst, off_t src, size_t n)
getSize = n;
if (fseek(fp, (long) src, SEEK_SET) != 0) {
- LOGD("filemove src seek %ld failed\n", (long) src);
+ ALOGD("filemove src seek %ld failed\n", (long) src);
return UNKNOWN_ERROR;
}
if (fread(readBuf, 1, getSize, fp) != getSize) {
- LOGD("filemove read %ld off=%ld failed\n",
+ ALOGD("filemove read %ld off=%ld failed\n",
(long) getSize, (long) src);
return UNKNOWN_ERROR;
}
if (fseek(fp, (long) dst, SEEK_SET) != 0) {
- LOGD("filemove dst seek %ld failed\n", (long) dst);
+ ALOGD("filemove dst seek %ld failed\n", (long) dst);
return UNKNOWN_ERROR;
}
if (fwrite(readBuf, 1, getSize, fp) != getSize) {
- LOGD("filemove write %ld off=%ld failed\n",
+ ALOGD("filemove write %ld off=%ld failed\n",
(long) getSize, (long) dst);
return UNKNOWN_ERROR;
}
@@ -1104,7 +1104,7 @@ time_t ZipFile::getModTime(int fd)
struct stat sb;
if (fstat(fd, &sb) < 0) {
- LOGD("HEY: fstat on fd %d failed\n", fd);
+ ALOGD("HEY: fstat on fd %d failed\n", fd);
return (time_t) -1;
}
@@ -1129,7 +1129,7 @@ int ZipFile::getZipFd(void) const
int fd;
fd = dup(fileno(mZipFp));
if (fd < 0) {
- LOGD("didn't work, errno=%d\n", errno);
+ ALOGD("didn't work, errno=%d\n", errno);
}
return fd;
@@ -1224,7 +1224,7 @@ status_t ZipFile::EndOfCentralDir::readBuf(const unsigned char* buf, int len)
if (len < kEOCDLen) {
/* looks like ZIP file got truncated */
- LOGD(" Zip EOCD: expected >= %d bytes, found %d\n",
+ ALOGD(" Zip EOCD: expected >= %d bytes, found %d\n",
kEOCDLen, len);
return INVALID_OPERATION;
}
@@ -1245,7 +1245,7 @@ status_t ZipFile::EndOfCentralDir::readBuf(const unsigned char* buf, int len)
if (mCommentLen > 0) {
if (kEOCDLen + mCommentLen > len) {
- LOGD("EOCD(%d) + comment(%d) exceeds len (%d)\n",
+ ALOGD("EOCD(%d) + comment(%d) exceeds len (%d)\n",
kEOCDLen, mCommentLen, len);
return UNKNOWN_ERROR;
}
@@ -1288,10 +1288,10 @@ status_t ZipFile::EndOfCentralDir::write(FILE* fp)
*/
void ZipFile::EndOfCentralDir::dump(void) const
{
- LOGD(" EndOfCentralDir contents:\n");
- LOGD(" diskNum=%u diskWCD=%u numEnt=%u totalNumEnt=%u\n",
+ ALOGD(" EndOfCentralDir contents:\n");
+ ALOGD(" diskNum=%u diskWCD=%u numEnt=%u totalNumEnt=%u\n",
mDiskNumber, mDiskWithCentralDir, mNumEntries, mTotalNumEntries);
- LOGD(" centDirSize=%lu centDirOff=%lu commentLen=%u\n",
+ ALOGD(" centDirSize=%lu centDirOff=%lu commentLen=%u\n",
mCentralDirSize, mCentralDirOffset, mCommentLen);
}
diff --git a/tools/zipalign/ZipFile.h b/tools/zipalign/ZipFile.h
index dbbd072..7877550 100644
--- a/tools/zipalign/ZipFile.h
+++ b/tools/zipalign/ZipFile.h
@@ -57,7 +57,7 @@ public:
/*
* Open a new or existing archive.
*/
- typedef enum {
+ enum {
kOpenReadOnly = 0x01,
kOpenReadWrite = 0x02,
kOpenCreate = 0x04, // create if it doesn't exist