diff options
author | Ricardo Cerqueira <ricardo@cyngn.com> | 2015-11-05 01:07:58 +0000 |
---|---|---|
committer | Ricardo Cerqueira <ricardo@cyngn.com> | 2015-11-05 01:07:58 +0000 |
commit | 766a518ae6de4b81c4fc139f90b51521a2f681b7 (patch) | |
tree | e694616b3a0c42642ed38f396cba9ba5e5ff8671 /tools/releasetools | |
parent | 5cd236c279a16120cabc9e334285277f9e79f213 (diff) | |
parent | 358c8909f5dd043f9d8bfee61f567701946dc19a (diff) | |
download | build-766a518ae6de4b81c4fc139f90b51521a2f681b7.zip build-766a518ae6de4b81c4fc139f90b51521a2f681b7.tar.gz build-766a518ae6de4b81c4fc139f90b51521a2f681b7.tar.bz2 |
Merge tag 'android-6.0.0_r26' into HEAD
Android 6.0.0 release 26
Conflicts:
core/Makefile
core/build_id.mk
tools/releasetools/ota_from_target_files.py
Change-Id: Ia1404ee9cd931a6540ea0dd3e2aa7b32d649e80c
Diffstat (limited to 'tools/releasetools')
-rwxr-xr-x | tools/releasetools/add_img_to_target_files.py | 16 | ||||
-rw-r--r-- | tools/releasetools/blockimgdiff.py | 206 | ||||
-rw-r--r-- | tools/releasetools/common.py | 4 | ||||
-rwxr-xr-x | tools/releasetools/ota_from_target_files.py | 28 | ||||
-rw-r--r-- | tools/releasetools/rangelib.py | 33 | ||||
-rwxr-xr-x | tools/releasetools/sign_target_files_apks.py | 8 |
6 files changed, 267 insertions, 28 deletions
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py index ce474f7..e8d61ad 100755 --- a/tools/releasetools/add_img_to_target_files.py +++ b/tools/releasetools/add_img_to_target_files.py @@ -40,6 +40,9 @@ OPTIONS = common.OPTIONS OPTIONS.add_missing = False OPTIONS.rebuild_recovery = False +OPTIONS.replace_verity_public_key = False +OPTIONS.replace_verity_private_key = False +OPTIONS.verity_signer_path = None def AddSystem(output_zip, prefix="IMAGES/", recovery_img=None, boot_img=None): """Turn the contents of SYSTEM into a system image and store it in @@ -334,18 +337,27 @@ def AddImagesToTargetFiles(filename): common.ZipClose(output_zip) def main(argv): - def option_handler(o, _): + def option_handler(o, a): if o in ("-a", "--add_missing"): OPTIONS.add_missing = True elif o in ("-r", "--rebuild_recovery",): OPTIONS.rebuild_recovery = True + elif o == "--replace_verity_private_key": + OPTIONS.replace_verity_private_key = (True, a) + elif o == "--replace_verity_public_key": + OPTIONS.replace_verity_public_key = (True, a) + elif o == "--verity_signer_path": + OPTIONS.verity_signer_path = a else: return False return True args = common.ParseOptions( argv, __doc__, extra_opts="ar", - extra_long_opts=["add_missing", "rebuild_recovery"], + extra_long_opts=["add_missing", "rebuild_recovery", + "replace_verity_public_key=", + "replace_verity_private_key=", + "verity_signer_path="], extra_option_handler=option_handler) diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py index 6ed9ca2..cb6fc85 100644 --- a/tools/releasetools/blockimgdiff.py +++ b/tools/releasetools/blockimgdiff.py @@ -16,6 +16,7 @@ from __future__ import print_function from collections import deque, OrderedDict from hashlib import sha1 +import common import heapq import itertools import multiprocessing @@ -105,11 +106,13 @@ class DataImage(Image): assert not (trim and pad) partial = len(self.data) % self.blocksize + padded = False if partial > 0: if trim: self.data = self.data[:-partial] elif pad: self.data += '\0' * (self.blocksize - partial) + padded = True else: raise ValueError(("data for DataImage must be multiple of %d bytes " "unless trim or pad is specified") % @@ -119,14 +122,23 @@ class DataImage(Image): self.total_blocks = len(self.data) / self.blocksize self.care_map = RangeSet(data=(0, self.total_blocks)) - self.clobbered_blocks = RangeSet() + # When the last block is padded, we always write the whole block even for + # incremental OTAs. Because otherwise the last block may get skipped if + # unchanged for an incremental, but would fail the post-install + # verification if it has non-zero contents in the padding bytes. + # Bug: 23828506 + if padded: + self.clobbered_blocks = RangeSet( + data=(self.total_blocks-1, self.total_blocks)) + else: + self.clobbered_blocks = RangeSet() self.extended = RangeSet() zero_blocks = [] nonzero_blocks = [] reference = '\0' * self.blocksize - for i in range(self.total_blocks): + for i in range(self.total_blocks-1 if padded else self.total_blocks): d = self.data[i*self.blocksize : (i+1)*self.blocksize] if d == reference: zero_blocks.append(i) @@ -138,14 +150,18 @@ class DataImage(Image): self.file_map = {"__ZERO": RangeSet(zero_blocks), "__NONZERO": RangeSet(nonzero_blocks)} + if self.clobbered_blocks: + self.file_map["__COPY"] = self.clobbered_blocks + def ReadRangeSet(self, ranges): return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges] def TotalSha1(self, include_clobbered_blocks=False): - # DataImage always carries empty clobbered_blocks, so - # include_clobbered_blocks can be ignored. - assert self.clobbered_blocks.size() == 0 - return sha1(self.data).hexdigest() + if not include_clobbered_blocks: + ranges = self.care_map.subtract(self.clobbered_blocks) + return sha1(self.ReadRangeSet(ranges)).hexdigest() + else: + return sha1(self.data).hexdigest() class Transfer(object): @@ -173,6 +189,12 @@ class Transfer(object): return (sum(sr.size() for (_, sr) in self.stash_before) - sum(sr.size() for (_, sr) in self.use_stash)) + def ConvertToNew(self): + assert self.style != "new" + self.use_stash = [] + self.style = "new" + self.src_ranges = RangeSet() + def __str__(self): return (str(self.id) + ": <" + str(self.src_ranges) + " " + self.style + " to " + str(self.tgt_ranges) + ">") @@ -267,6 +289,10 @@ class BlockImageDiff(object): self.ReverseBackwardEdges() self.ImproveVertexSequence() + # Ensure the runtime stash size is under the limit. + if self.version >= 2 and common.OPTIONS.cache_size is not None: + self.ReviseStashSize() + # Double-check our work. self.AssertSequenceGood() @@ -286,7 +312,6 @@ class BlockImageDiff(object): out = [] total = 0 - performs_read = False stashes = {} stashed_blocks = 0 @@ -398,7 +423,6 @@ class BlockImageDiff(object): out.append("%s %s\n" % (xf.style, xf.tgt_ranges.to_string_raw())) total += tgt_size elif xf.style == "move": - performs_read = True assert xf.tgt_ranges assert xf.src_ranges.size() == tgt_size if xf.src_ranges != xf.tgt_ranges: @@ -423,7 +447,6 @@ class BlockImageDiff(object): xf.tgt_ranges.to_string_raw(), src_str)) total += tgt_size elif xf.style in ("bsdiff", "imgdiff"): - performs_read = True assert xf.tgt_ranges assert xf.src_ranges if self.version == 1: @@ -460,9 +483,20 @@ class BlockImageDiff(object): if free_string: out.append("".join(free_string)) - # sanity check: abort if we're going to need more than 512 MB if - # stash space - assert max_stashed_blocks * self.tgt.blocksize < (512 << 20) + if self.version >= 2: + # Sanity check: abort if we're going to need more stash space than + # the allowed size (cache_size * threshold). There are two purposes + # of having a threshold here. a) Part of the cache may have been + # occupied by some recovery logs. b) It will buy us some time to deal + # with the oversize issue. + cache_size = common.OPTIONS.cache_size + stash_threshold = common.OPTIONS.stash_threshold + max_allowed = cache_size * stash_threshold + assert max_stashed_blocks * self.tgt.blocksize < max_allowed, \ + 'Stash size %d (%d * %d) exceeds the limit %d (%d * %.2f)' % ( + max_stashed_blocks * self.tgt.blocksize, max_stashed_blocks, + self.tgt.blocksize, max_allowed, cache_size, + stash_threshold) # Zero out extended blocks as a workaround for bug 20881595. if self.tgt.extended: @@ -489,8 +523,81 @@ class BlockImageDiff(object): f.write(i) if self.version >= 2: - print("max stashed blocks: %d (%d bytes)\n" % ( - max_stashed_blocks, max_stashed_blocks * self.tgt.blocksize)) + max_stashed_size = max_stashed_blocks * self.tgt.blocksize + max_allowed = common.OPTIONS.cache_size * common.OPTIONS.stash_threshold + print("max stashed blocks: %d (%d bytes), limit: %d bytes (%.2f%%)\n" % ( + max_stashed_blocks, max_stashed_size, max_allowed, + max_stashed_size * 100.0 / max_allowed)) + + def ReviseStashSize(self): + print("Revising stash size...") + stashes = {} + + # Create the map between a stash and its def/use points. For example, for a + # given stash of (idx, sr), stashes[idx] = (sr, def_cmd, use_cmd). + for xf in self.transfers: + # Command xf defines (stores) all the stashes in stash_before. + for idx, sr in xf.stash_before: + stashes[idx] = (sr, xf) + + # Record all the stashes command xf uses. + for idx, _ in xf.use_stash: + stashes[idx] += (xf,) + + # Compute the maximum blocks available for stash based on /cache size and + # the threshold. + cache_size = common.OPTIONS.cache_size + stash_threshold = common.OPTIONS.stash_threshold + max_allowed = cache_size * stash_threshold / self.tgt.blocksize + + stashed_blocks = 0 + new_blocks = 0 + + # Now go through all the commands. Compute the required stash size on the + # fly. If a command requires excess stash than available, it deletes the + # stash by replacing the command that uses the stash with a "new" command + # instead. + for xf in self.transfers: + replaced_cmds = [] + + # xf.stash_before generates explicit stash commands. + for idx, sr in xf.stash_before: + if stashed_blocks + sr.size() > max_allowed: + # We cannot stash this one for a later command. Find out the command + # that will use this stash and replace the command with "new". + use_cmd = stashes[idx][2] + replaced_cmds.append(use_cmd) + print("%10d %9s %s" % (sr.size(), "explicit", use_cmd)) + else: + stashed_blocks += sr.size() + + # xf.use_stash generates free commands. + for _, sr in xf.use_stash: + stashed_blocks -= sr.size() + + # "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to + # ComputePatches(), they both have the style of "diff". + if xf.style == "diff" and self.version >= 3: + assert xf.tgt_ranges and xf.src_ranges + if xf.src_ranges.overlaps(xf.tgt_ranges): + if stashed_blocks + xf.src_ranges.size() > max_allowed: + replaced_cmds.append(xf) + print("%10d %9s %s" % (xf.src_ranges.size(), "implicit", xf)) + + # Replace the commands in replaced_cmds with "new"s. + for cmd in replaced_cmds: + # It no longer uses any commands in "use_stash". Remove the def points + # for all those stashes. + for idx, sr in cmd.use_stash: + def_cmd = stashes[idx][1] + assert (idx, sr) in def_cmd.stash_before + def_cmd.stash_before.remove((idx, sr)) + new_blocks += sr.size() + + cmd.ConvertToNew() + + print(" Total %d blocks are packed as new blocks due to insufficient " + "cache size." % (new_blocks,)) def ComputePatches(self, prefix): print("Reticulating splines...") @@ -847,6 +954,57 @@ class BlockImageDiff(object): a.goes_after[b] = size def FindTransfers(self): + """Parse the file_map to generate all the transfers.""" + + def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id, + split=False): + """Wrapper function for adding a Transfer(). + + For BBOTA v3, we need to stash source blocks for resumable feature. + However, with the growth of file size and the shrink of the cache + partition source blocks are too large to be stashed. If a file occupies + too many blocks (greater than MAX_BLOCKS_PER_DIFF_TRANSFER), we split it + into smaller pieces by getting multiple Transfer()s. + + The downside is that after splitting, we can no longer use imgdiff but + only bsdiff.""" + + MAX_BLOCKS_PER_DIFF_TRANSFER = 1024 + + # We care about diff transfers only. + if style != "diff" or not split: + Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id) + return + + # Change nothing for small files. + if (tgt_ranges.size() <= MAX_BLOCKS_PER_DIFF_TRANSFER and + src_ranges.size() <= MAX_BLOCKS_PER_DIFF_TRANSFER): + Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id) + return + + pieces = 0 + while (tgt_ranges.size() > MAX_BLOCKS_PER_DIFF_TRANSFER and + src_ranges.size() > MAX_BLOCKS_PER_DIFF_TRANSFER): + tgt_split_name = "%s-%d" % (tgt_name, pieces) + src_split_name = "%s-%d" % (src_name, pieces) + tgt_first = tgt_ranges.first(MAX_BLOCKS_PER_DIFF_TRANSFER) + src_first = src_ranges.first(MAX_BLOCKS_PER_DIFF_TRANSFER) + Transfer(tgt_split_name, src_split_name, tgt_first, src_first, style, + by_id) + + tgt_ranges = tgt_ranges.subtract(tgt_first) + src_ranges = src_ranges.subtract(src_first) + pieces += 1 + + # Handle remaining blocks. + if tgt_ranges.size() or src_ranges.size(): + # Must be both non-empty. + assert tgt_ranges.size() and src_ranges.size() + tgt_split_name = "%s-%d" % (tgt_name, pieces) + src_split_name = "%s-%d" % (src_name, pieces) + Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges, style, + by_id) + empty = RangeSet() for tgt_fn, tgt_ranges in self.tgt.file_map.items(): if tgt_fn == "__ZERO": @@ -854,28 +1012,28 @@ class BlockImageDiff(object): # in any file and that are filled with zeros. We have a # special transfer style for zero blocks. src_ranges = self.src.file_map.get("__ZERO", empty) - Transfer(tgt_fn, "__ZERO", tgt_ranges, src_ranges, - "zero", self.transfers) + AddTransfer(tgt_fn, "__ZERO", tgt_ranges, src_ranges, + "zero", self.transfers) continue elif tgt_fn == "__COPY": # "__COPY" domain includes all the blocks not contained in any # file and that need to be copied unconditionally to the target. - Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers) + AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers) continue elif tgt_fn in self.src.file_map: # Look for an exact pathname match in the source. - Transfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn], - "diff", self.transfers) + AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn], + "diff", self.transfers, self.version >= 3) continue b = os.path.basename(tgt_fn) if b in self.src_basenames: # Look for an exact basename match in the source. src_fn = self.src_basenames[b] - Transfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn], - "diff", self.transfers) + AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn], + "diff", self.transfers, self.version >= 3) continue b = re.sub("[0-9]+", "#", b) @@ -885,11 +1043,11 @@ class BlockImageDiff(object): # for .so files that contain version numbers in the filename # that get bumped.) src_fn = self.src_numpatterns[b] - Transfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn], - "diff", self.transfers) + AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn], + "diff", self.transfers, self.version >= 3) continue - Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers) + AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers) def AbbreviateSourceNames(self): for k in self.src.file_map.keys(): diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 36f3305..d13f82a 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -422,7 +422,9 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None): img_keyblock = tempfile.NamedTemporaryFile() cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"], img_unsigned.name, info_dict["vboot_key"] + ".vbpubk", - info_dict["vboot_key"] + ".vbprivk", img_keyblock.name, + info_dict["vboot_key"] + ".vbprivk", + info_dict["vboot_subkey"] + ".vbprivk", + img_keyblock.name, img.name] p = Run(cmd, stdout=subprocess.PIPE) p.communicate() diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py index 549bdcc..77af8ab 100755 --- a/tools/releasetools/ota_from_target_files.py +++ b/tools/releasetools/ota_from_target_files.py @@ -42,6 +42,11 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package radio image. This option is only meaningful when -i is specified, because a full radio is always included in a full OTA if applicable. + --full_bootloader + When generating an incremental OTA, always include a full copy of + bootloader image. This option is only meaningful when -i is specified, + because a full bootloader is always included in a full OTA if applicable. + -v (--verify) Remount and verify the checksums of the files written to the system and vendor (if used) partitions. Incremental builds only. @@ -84,6 +89,10 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package Specifies the number of worker-threads that will be used when generating patches for incremental updates (defaults to 3). + --stash_threshold <float> + Specifies the threshold that will be used to compute the maximum + allowed stash size (defaults to 0.8). + --backup <boolean> Enable or disable the execution of backuptool.sh. Disabled by default. @@ -133,6 +142,10 @@ OPTIONS.updater_binary = None OPTIONS.oem_source = None OPTIONS.fallback_to_full = True OPTIONS.full_radio = False +OPTIONS.full_bootloader = False +# Stash size cannot exceed cache_size * threshold. +OPTIONS.cache_size = None +OPTIONS.stash_threshold = 0.8 OPTIONS.backuptool = False OPTIONS.override_device = 'auto' OPTIONS.override_prop = False @@ -1571,6 +1584,8 @@ def main(argv): OPTIONS.incremental_source = a elif o == "--full_radio": OPTIONS.full_radio = True + elif o == "--full_bootloader": + OPTIONS.full_bootloader = True elif o in ("-w", "--wipe_user_data"): OPTIONS.wipe_user_data = True elif o in ("-n", "--no_prereq"): @@ -1602,6 +1617,12 @@ def main(argv): OPTIONS.updater_binary = a elif o in ("--no_fallback_to_full",): OPTIONS.fallback_to_full = False + elif o == "--stash_threshold": + try: + OPTIONS.stash_threshold = float(a) + except ValueError: + raise ValueError("Cannot parse value %r for option %r - expecting " + "a float" % (a, o)) elif o in ("--backup"): OPTIONS.backuptool = bool(a.lower() == 'true') elif o in ("--override_device"): @@ -1619,6 +1640,7 @@ def main(argv): "package_key=", "incremental_from=", "full_radio", + "full_bootloader", "wipe_user_data", "no_prereq", "extra_script=", @@ -1631,6 +1653,7 @@ def main(argv): "oem_settings=", "verify", "no_fallback_to_full", + "stash_threshold=", "backup=", "override_device=", "override_prop=" @@ -1691,6 +1714,11 @@ def main(argv): output_zip = zipfile.ZipFile(temp_zip_file, "w", compression=zipfile.ZIP_DEFLATED) + cache_size = OPTIONS.info_dict.get("cache_size", None) + if cache_size is None: + raise RuntimeError("can't determine the cache partition size") + OPTIONS.cache_size = cache_size + if OPTIONS.incremental_source is None: WriteFullOTAPackage(input_zip, output_zip) if OPTIONS.package_key is None: diff --git a/tools/releasetools/rangelib.py b/tools/releasetools/rangelib.py index 1506658..373bbed 100644 --- a/tools/releasetools/rangelib.py +++ b/tools/releasetools/rangelib.py @@ -24,6 +24,7 @@ class RangeSet(object): lots of runs.""" def __init__(self, data=None): + # TODO(tbao): monotonic is broken when passing in a tuple. self.monotonic = False if isinstance(data, str): self._parse_internal(data) @@ -260,6 +261,38 @@ class RangeSet(object): out = out.union(RangeSet(str(s1) + "-" + str(e1-1))) return out + def first(self, n): + """Return the RangeSet that contains at most the first 'n' integers. + + >>> RangeSet("0-9").first(1) + <RangeSet("0")> + >>> RangeSet("10-19").first(5) + <RangeSet("10-14")> + >>> RangeSet("10-19").first(15) + <RangeSet("10-19")> + >>> RangeSet("10-19 30-39").first(3) + <RangeSet("10-12")> + >>> RangeSet("10-19 30-39").first(15) + <RangeSet("10-19 30-34")> + >>> RangeSet("10-19 30-39").first(30) + <RangeSet("10-19 30-39")> + >>> RangeSet("0-9").first(0) + <RangeSet("")> + """ + + if self.size() <= n: + return self + + out = [] + for s, e in self: + if e - s >= n: + out += (s, s+n) + break + else: + out += (s, e) + n -= e - s + return RangeSet(data=out) + if __name__ == "__main__": import doctest diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py index ec49112..60d62c2 100755 --- a/tools/releasetools/sign_target_files_apks.py +++ b/tools/releasetools/sign_target_files_apks.py @@ -203,11 +203,13 @@ def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, common.ZipWriteStr(output_tf_zip, out_info, data) elif info.filename in ("SYSTEM/build.prop", "VENDOR/build.prop", + "BOOT/RAMDISK/default.prop", "RECOVERY/RAMDISK/default.prop"): print "rewriting %s:" % (info.filename,) new_data = RewriteProps(data, misc_info) common.ZipWriteStr(output_tf_zip, out_info, new_data) - if info.filename == "RECOVERY/RAMDISK/default.prop": + if info.filename in ("BOOT/RAMDISK/default.prop", + "RECOVERY/RAMDISK/default.prop"): write_to_temp(info.filename, info.external_attr, new_data) elif info.filename.endswith("mac_permissions.xml"): print "rewriting %s with new keys." % (info.filename,) @@ -310,6 +312,10 @@ def RewriteProps(data, misc_info): pieces = value.split("/") pieces[-1] = EditTags(pieces[-1]) value = "/".join(pieces) + elif key == "ro.bootimage.build.fingerprint": + pieces = value.split("/") + pieces[-1] = EditTags(pieces[-1]) + value = "/".join(pieces) elif key == "ro.build.description": pieces = value.split(" ") assert len(pieces) == 5 |