summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--core/Makefile7
-rw-r--r--core/build_id.mk2
-rw-r--r--core/product.mk1
-rw-r--r--envsetup.sh12
-rw-r--r--target/product/base.mk1
-rw-r--r--target/product/embedded.mk1
-rw-r--r--target/product/emulator.mk1
-rw-r--r--target/product/full_base.mk3
-rw-r--r--target/product/vboot.mk1
-rw-r--r--tools/droiddoc/templates-sdk/sdkpage.cs16
-rwxr-xr-xtools/releasetools/add_img_to_target_files.py16
-rw-r--r--tools/releasetools/blockimgdiff.py206
-rw-r--r--tools/releasetools/common.py4
-rwxr-xr-xtools/releasetools/ota_from_target_files.py28
-rw-r--r--tools/releasetools/rangelib.py33
-rwxr-xr-xtools/releasetools/sign_target_files_apks.py8
16 files changed, 296 insertions, 44 deletions
diff --git a/core/Makefile b/core/Makefile
index c108988..596abb1 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -565,14 +565,14 @@ else ifndef BOARD_CUSTOM_BOOTIMG_MK
$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER) $(BOOTIMAGE_EXTRA_DEPS)
$(call pretty,"Target boot image: $@")
$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@.unsigned
- $(VBOOT_SIGNER) $(FUTILITY) $@.unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $@.keyblock $@
+ $(VBOOT_SIGNER) $(FUTILITY) $@.unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $@.keyblock $@
$(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE))
.PHONY: bootimage-nodeps
bootimage-nodeps: $(MKBOOTIMG) $(VBOOT_SIGNER)
@echo "make $@: ignoring dependencies"
$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(INSTALLED_BOOTIMAGE_TARGET).unsigned
- $(VBOOT_SIGNER) $(FUTILITY) $(INSTALLED_BOOTIMAGE_TARGET).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(INSTALLED_BOOTIMAGE_TARGET).keyblock $(INSTALLED_BOOTIMAGE_TARGET)
+ $(VBOOT_SIGNER) $(FUTILITY) $(INSTALLED_BOOTIMAGE_TARGET).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(INSTALLED_BOOTIMAGE_TARGET).keyblock $(INSTALLED_BOOTIMAGE_TARGET)
$(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
else # PRODUCT_SUPPORTS_VBOOT != true
@@ -819,6 +819,7 @@ $(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_VERITY_PARTITION),$(hide) ech
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_VERITY_PARTITION),$(hide) echo "vendor_verity_block_device=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_VERITY_PARTITION)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_key=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_subkey=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "futility=$(FUTILITY)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_signer_cmd=$(VBOOT_SIGNER)" >> $(1))
$(if $(filter true,$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)),\
@@ -993,7 +994,7 @@ define build-recoveryimage-target
$(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)),\
$(BOOT_SIGNER) /recovery $(1) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1))
$(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \
- $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(1).keyblock $(1))
+ $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1))
$(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE))
@echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST})
endef
diff --git a/core/build_id.mk b/core/build_id.mk
index aad477a..03692e0 100644
--- a/core/build_id.mk
+++ b/core/build_id.mk
@@ -18,4 +18,4 @@
# (like "CRB01"). It must be a single word, and is
# capitalized by convention.
-export BUILD_ID=MRA58V
+export BUILD_ID=MDB08M
diff --git a/core/product.mk b/core/product.mk
index 03490ca..427fd83 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -107,6 +107,7 @@ _product_var_list := \
PRODUCT_SYSTEM_PROPERTY_BLACKLIST \
PRODUCT_SYSTEM_SERVER_JARS \
PRODUCT_VBOOT_SIGNING_KEY \
+ PRODUCT_VBOOT_SIGNING_SUBKEY \
PRODUCT_VERITY_SIGNING_KEY \
PRODUCT_SYSTEM_VERITY_PARTITION \
PRODUCT_VENDOR_VERITY_PARTITION \
diff --git a/envsetup.sh b/envsetup.sh
index 4ce7d3b..8eb22cf 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -1625,14 +1625,20 @@ function godir () {
return
fi
T=$(gettop)
- if [[ ! -f $T/filelist ]]; then
+ if [ ! "$OUT_DIR" = "" ]; then
+ mkdir -p $OUT_DIR
+ FILELIST=$OUT_DIR/filelist
+ else
+ FILELIST=$T/filelist
+ fi
+ if [[ ! -f $FILELIST ]]; then
echo -n "Creating index..."
- (\cd $T; find . -wholename ./out -prune -o -wholename ./.repo -prune -o -type f > filelist)
+ (\cd $T; find . -wholename ./out -prune -o -wholename ./.repo -prune -o -type f > $FILELIST)
echo " Done"
echo ""
fi
local lines
- lines=($(\grep "$1" $T/filelist | sed -e 's/\/[^/]*$//' | sort | uniq))
+ lines=($(\grep "$1" $FILELIST | sed -e 's/\/[^/]*$//' | sort | uniq))
if [[ ${#lines[@]} = 0 ]]; then
echo "Not found"
return
diff --git a/target/product/base.mk b/target/product/base.mk
index 4ad0d25..a9610d1 100644
--- a/target/product/base.mk
+++ b/target/product/base.mk
@@ -111,6 +111,7 @@ PRODUCT_PACKAGES += \
run-as \
schedtest \
sdcard \
+ secdiscard \
services \
settings \
sgdisk \
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index c40de4f..25a8975 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -89,6 +89,7 @@ PRODUCT_DEFAULT_PROPERTY_OVERRIDES += \
PRODUCT_COPY_FILES += \
system/core/rootdir/init.usb.rc:root/init.usb.rc \
+ system/core/rootdir/init.usb.configfs.rc:root/init.usb.configfs.rc \
system/core/rootdir/init.trace.rc:root/init.trace.rc \
system/core/rootdir/ueventd.rc:root/ueventd.rc \
system/core/rootdir/etc/hosts:system/etc/hosts
diff --git a/target/product/emulator.mk b/target/product/emulator.mk
index 982f7da..7394d4f 100644
--- a/target/product/emulator.mk
+++ b/target/product/emulator.mk
@@ -50,6 +50,7 @@ PRODUCT_PACKAGES += \
sensors.ranchu
PRODUCT_COPY_FILES += \
+ frameworks/native/data/etc/android.hardware.ethernet.xml:system/etc/permissions/android.hardware.ethernet.xml \
device/generic/goldfish/fstab.goldfish:root/fstab.goldfish \
device/generic/goldfish/init.goldfish.rc:root/init.goldfish.rc \
device/generic/goldfish/init.goldfish.sh:system/etc/init.goldfish.sh \
diff --git a/target/product/full_base.mk b/target/product/full_base.mk
index fc6fd5c..bac3e03 100644
--- a/target/product/full_base.mk
+++ b/target/product/full_base.mk
@@ -26,9 +26,6 @@ PRODUCT_PACKAGES := \
# Put en_US first in the list, so make it default.
PRODUCT_LOCALES := en_US
-# Include drawables for all densities
-PRODUCT_AAPT_CONFIG := normal
-
# Get some sounds
$(call inherit-product-if-exists, frameworks/base/data/sounds/AllAudio.mk)
diff --git a/target/product/vboot.mk b/target/product/vboot.mk
index e4b1144..48a4883 100644
--- a/target/product/vboot.mk
+++ b/target/product/vboot.mk
@@ -22,3 +22,4 @@ PRODUCT_SUPPORTS_VBOOT := true
# We expect this file to exist with the suffixes ".vbprivk" and ".vbpupk".
# TODO: find a proper location for this
PRODUCT_VBOOT_SIGNING_KEY := external/vboot_reference/tests/devkeys/kernel_data_key
+PRODUCT_VBOOT_SIGNING_SUBKEY := external/vboot_reference/tests/devkeys/kernel_subkey
diff --git a/tools/droiddoc/templates-sdk/sdkpage.cs b/tools/droiddoc/templates-sdk/sdkpage.cs
index 013de59..3a3a9a3 100644
--- a/tools/droiddoc/templates-sdk/sdkpage.cs
+++ b/tools/droiddoc/templates-sdk/sdkpage.cs
@@ -342,7 +342,7 @@ var:sdk.linux_download
<tr>
<td rowspan="3">Windows</td>
<td>
- <a onclick="return onDownload(this)" id="win-bundle"
+ <a onclick="return onDownload(this,false,true)" id="win-bundle"
href="https://dl.google.com/dl/android/studio/install/<?cs var:studio.version ?>/<?cs var:studio.win_bundle_exe_download ?>"
><?cs var:studio.win_bundle_exe_download ?></a><br>(Recommended)
</td>
@@ -353,7 +353,7 @@ var:sdk.linux_download
<tr>
<!-- blank TD from Windows rowspan -->
<td>
- <a onclick="return onDownload(this)"
+ <a onclick="return onDownload(this,false,true)"
href="https://dl.google.com/dl/android/studio/install/<?cs var:studio.version ?>/<?cs var:studio.win_notools_exe_download ?>"
><?cs var:studio.win_notools_exe_download ?></a><br>(No SDK tools included)
</td>
@@ -364,7 +364,7 @@ var:sdk.linux_download
<tr>
<!-- blank TD from Windows rowspan -->
<td>
- <a onclick="return onDownload(this)"
+ <a onclick="return onDownload(this,false,true)"
href="https://dl.google.com/dl/android/studio/ide-zips/<?cs var:studio.version ?>/<?cs var:studio.win_bundle_download ?>"
><?cs var:studio.win_bundle_download ?></a>
</td>
@@ -375,7 +375,7 @@ var:sdk.linux_download
<tr>
<td><nobr>Mac OS X</nobr></td>
<td>
- <a onclick="return onDownload(this)" id="mac-bundle"
+ <a onclick="return onDownload(this,false,true)" id="mac-bundle"
href="https://dl.google.com/dl/android/studio/install/<?cs var:studio.version ?>/<?cs var:studio.mac_bundle_download ?>"
><?cs var:studio.mac_bundle_download ?></a>
</td>
@@ -386,7 +386,7 @@ var:sdk.linux_download
<tr>
<td>Linux</td>
<td>
- <a onclick="return onDownload(this)" id="linux-bundle"
+ <a onclick="return onDownload(this,false,true)" id="linux-bundle"
href="https://dl.google.com/dl/android/studio/ide-zips/<?cs var:studio.version ?>/<?cs var:studio.linux_bundle_download ?>"
><?cs var:studio.linux_bundle_download ?></a>
</td>
@@ -450,7 +450,11 @@ var:sdk.linux_download
}
$("#downloadForRealz").attr('bundle', bundle);
- $("a#downloadForRealz").attr("name", $(link).attr('href'));
+ if (bundle && !button) {
+ $("a#downloadForRealz").attr("name", "#" + $(link).attr('id'));
+ } else {
+ $("a#downloadForRealz").attr("name", $(link).attr('href'));
+ }
$("#tos").show();
$("#landing").hide();
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index ce474f7..e8d61ad 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -40,6 +40,9 @@ OPTIONS = common.OPTIONS
OPTIONS.add_missing = False
OPTIONS.rebuild_recovery = False
+OPTIONS.replace_verity_public_key = False
+OPTIONS.replace_verity_private_key = False
+OPTIONS.verity_signer_path = None
def AddSystem(output_zip, prefix="IMAGES/", recovery_img=None, boot_img=None):
"""Turn the contents of SYSTEM into a system image and store it in
@@ -334,18 +337,27 @@ def AddImagesToTargetFiles(filename):
common.ZipClose(output_zip)
def main(argv):
- def option_handler(o, _):
+ def option_handler(o, a):
if o in ("-a", "--add_missing"):
OPTIONS.add_missing = True
elif o in ("-r", "--rebuild_recovery",):
OPTIONS.rebuild_recovery = True
+ elif o == "--replace_verity_private_key":
+ OPTIONS.replace_verity_private_key = (True, a)
+ elif o == "--replace_verity_public_key":
+ OPTIONS.replace_verity_public_key = (True, a)
+ elif o == "--verity_signer_path":
+ OPTIONS.verity_signer_path = a
else:
return False
return True
args = common.ParseOptions(
argv, __doc__, extra_opts="ar",
- extra_long_opts=["add_missing", "rebuild_recovery"],
+ extra_long_opts=["add_missing", "rebuild_recovery",
+ "replace_verity_public_key=",
+ "replace_verity_private_key=",
+ "verity_signer_path="],
extra_option_handler=option_handler)
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 6ed9ca2..cb6fc85 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -16,6 +16,7 @@ from __future__ import print_function
from collections import deque, OrderedDict
from hashlib import sha1
+import common
import heapq
import itertools
import multiprocessing
@@ -105,11 +106,13 @@ class DataImage(Image):
assert not (trim and pad)
partial = len(self.data) % self.blocksize
+ padded = False
if partial > 0:
if trim:
self.data = self.data[:-partial]
elif pad:
self.data += '\0' * (self.blocksize - partial)
+ padded = True
else:
raise ValueError(("data for DataImage must be multiple of %d bytes "
"unless trim or pad is specified") %
@@ -119,14 +122,23 @@ class DataImage(Image):
self.total_blocks = len(self.data) / self.blocksize
self.care_map = RangeSet(data=(0, self.total_blocks))
- self.clobbered_blocks = RangeSet()
+ # When the last block is padded, we always write the whole block even for
+ # incremental OTAs. Because otherwise the last block may get skipped if
+ # unchanged for an incremental, but would fail the post-install
+ # verification if it has non-zero contents in the padding bytes.
+ # Bug: 23828506
+ if padded:
+ self.clobbered_blocks = RangeSet(
+ data=(self.total_blocks-1, self.total_blocks))
+ else:
+ self.clobbered_blocks = RangeSet()
self.extended = RangeSet()
zero_blocks = []
nonzero_blocks = []
reference = '\0' * self.blocksize
- for i in range(self.total_blocks):
+ for i in range(self.total_blocks-1 if padded else self.total_blocks):
d = self.data[i*self.blocksize : (i+1)*self.blocksize]
if d == reference:
zero_blocks.append(i)
@@ -138,14 +150,18 @@ class DataImage(Image):
self.file_map = {"__ZERO": RangeSet(zero_blocks),
"__NONZERO": RangeSet(nonzero_blocks)}
+ if self.clobbered_blocks:
+ self.file_map["__COPY"] = self.clobbered_blocks
+
def ReadRangeSet(self, ranges):
return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges]
def TotalSha1(self, include_clobbered_blocks=False):
- # DataImage always carries empty clobbered_blocks, so
- # include_clobbered_blocks can be ignored.
- assert self.clobbered_blocks.size() == 0
- return sha1(self.data).hexdigest()
+ if not include_clobbered_blocks:
+ ranges = self.care_map.subtract(self.clobbered_blocks)
+ return sha1(self.ReadRangeSet(ranges)).hexdigest()
+ else:
+ return sha1(self.data).hexdigest()
class Transfer(object):
@@ -173,6 +189,12 @@ class Transfer(object):
return (sum(sr.size() for (_, sr) in self.stash_before) -
sum(sr.size() for (_, sr) in self.use_stash))
+ def ConvertToNew(self):
+ assert self.style != "new"
+ self.use_stash = []
+ self.style = "new"
+ self.src_ranges = RangeSet()
+
def __str__(self):
return (str(self.id) + ": <" + str(self.src_ranges) + " " + self.style +
" to " + str(self.tgt_ranges) + ">")
@@ -267,6 +289,10 @@ class BlockImageDiff(object):
self.ReverseBackwardEdges()
self.ImproveVertexSequence()
+ # Ensure the runtime stash size is under the limit.
+ if self.version >= 2 and common.OPTIONS.cache_size is not None:
+ self.ReviseStashSize()
+
# Double-check our work.
self.AssertSequenceGood()
@@ -286,7 +312,6 @@ class BlockImageDiff(object):
out = []
total = 0
- performs_read = False
stashes = {}
stashed_blocks = 0
@@ -398,7 +423,6 @@ class BlockImageDiff(object):
out.append("%s %s\n" % (xf.style, xf.tgt_ranges.to_string_raw()))
total += tgt_size
elif xf.style == "move":
- performs_read = True
assert xf.tgt_ranges
assert xf.src_ranges.size() == tgt_size
if xf.src_ranges != xf.tgt_ranges:
@@ -423,7 +447,6 @@ class BlockImageDiff(object):
xf.tgt_ranges.to_string_raw(), src_str))
total += tgt_size
elif xf.style in ("bsdiff", "imgdiff"):
- performs_read = True
assert xf.tgt_ranges
assert xf.src_ranges
if self.version == 1:
@@ -460,9 +483,20 @@ class BlockImageDiff(object):
if free_string:
out.append("".join(free_string))
- # sanity check: abort if we're going to need more than 512 MB if
- # stash space
- assert max_stashed_blocks * self.tgt.blocksize < (512 << 20)
+ if self.version >= 2:
+ # Sanity check: abort if we're going to need more stash space than
+ # the allowed size (cache_size * threshold). There are two purposes
+ # of having a threshold here. a) Part of the cache may have been
+ # occupied by some recovery logs. b) It will buy us some time to deal
+ # with the oversize issue.
+ cache_size = common.OPTIONS.cache_size
+ stash_threshold = common.OPTIONS.stash_threshold
+ max_allowed = cache_size * stash_threshold
+ assert max_stashed_blocks * self.tgt.blocksize < max_allowed, \
+ 'Stash size %d (%d * %d) exceeds the limit %d (%d * %.2f)' % (
+ max_stashed_blocks * self.tgt.blocksize, max_stashed_blocks,
+ self.tgt.blocksize, max_allowed, cache_size,
+ stash_threshold)
# Zero out extended blocks as a workaround for bug 20881595.
if self.tgt.extended:
@@ -489,8 +523,81 @@ class BlockImageDiff(object):
f.write(i)
if self.version >= 2:
- print("max stashed blocks: %d (%d bytes)\n" % (
- max_stashed_blocks, max_stashed_blocks * self.tgt.blocksize))
+ max_stashed_size = max_stashed_blocks * self.tgt.blocksize
+ max_allowed = common.OPTIONS.cache_size * common.OPTIONS.stash_threshold
+ print("max stashed blocks: %d (%d bytes), limit: %d bytes (%.2f%%)\n" % (
+ max_stashed_blocks, max_stashed_size, max_allowed,
+ max_stashed_size * 100.0 / max_allowed))
+
+ def ReviseStashSize(self):
+ print("Revising stash size...")
+ stashes = {}
+
+ # Create the map between a stash and its def/use points. For example, for a
+ # given stash of (idx, sr), stashes[idx] = (sr, def_cmd, use_cmd).
+ for xf in self.transfers:
+ # Command xf defines (stores) all the stashes in stash_before.
+ for idx, sr in xf.stash_before:
+ stashes[idx] = (sr, xf)
+
+ # Record all the stashes command xf uses.
+ for idx, _ in xf.use_stash:
+ stashes[idx] += (xf,)
+
+ # Compute the maximum blocks available for stash based on /cache size and
+ # the threshold.
+ cache_size = common.OPTIONS.cache_size
+ stash_threshold = common.OPTIONS.stash_threshold
+ max_allowed = cache_size * stash_threshold / self.tgt.blocksize
+
+ stashed_blocks = 0
+ new_blocks = 0
+
+ # Now go through all the commands. Compute the required stash size on the
+ # fly. If a command requires excess stash than available, it deletes the
+ # stash by replacing the command that uses the stash with a "new" command
+ # instead.
+ for xf in self.transfers:
+ replaced_cmds = []
+
+ # xf.stash_before generates explicit stash commands.
+ for idx, sr in xf.stash_before:
+ if stashed_blocks + sr.size() > max_allowed:
+ # We cannot stash this one for a later command. Find out the command
+ # that will use this stash and replace the command with "new".
+ use_cmd = stashes[idx][2]
+ replaced_cmds.append(use_cmd)
+ print("%10d %9s %s" % (sr.size(), "explicit", use_cmd))
+ else:
+ stashed_blocks += sr.size()
+
+ # xf.use_stash generates free commands.
+ for _, sr in xf.use_stash:
+ stashed_blocks -= sr.size()
+
+ # "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to
+ # ComputePatches(), they both have the style of "diff".
+ if xf.style == "diff" and self.version >= 3:
+ assert xf.tgt_ranges and xf.src_ranges
+ if xf.src_ranges.overlaps(xf.tgt_ranges):
+ if stashed_blocks + xf.src_ranges.size() > max_allowed:
+ replaced_cmds.append(xf)
+ print("%10d %9s %s" % (xf.src_ranges.size(), "implicit", xf))
+
+ # Replace the commands in replaced_cmds with "new"s.
+ for cmd in replaced_cmds:
+ # It no longer uses any commands in "use_stash". Remove the def points
+ # for all those stashes.
+ for idx, sr in cmd.use_stash:
+ def_cmd = stashes[idx][1]
+ assert (idx, sr) in def_cmd.stash_before
+ def_cmd.stash_before.remove((idx, sr))
+ new_blocks += sr.size()
+
+ cmd.ConvertToNew()
+
+ print(" Total %d blocks are packed as new blocks due to insufficient "
+ "cache size." % (new_blocks,))
def ComputePatches(self, prefix):
print("Reticulating splines...")
@@ -847,6 +954,57 @@ class BlockImageDiff(object):
a.goes_after[b] = size
def FindTransfers(self):
+ """Parse the file_map to generate all the transfers."""
+
+ def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id,
+ split=False):
+ """Wrapper function for adding a Transfer().
+
+ For BBOTA v3, we need to stash source blocks for resumable feature.
+ However, with the growth of file size and the shrink of the cache
+ partition source blocks are too large to be stashed. If a file occupies
+ too many blocks (greater than MAX_BLOCKS_PER_DIFF_TRANSFER), we split it
+ into smaller pieces by getting multiple Transfer()s.
+
+ The downside is that after splitting, we can no longer use imgdiff but
+ only bsdiff."""
+
+ MAX_BLOCKS_PER_DIFF_TRANSFER = 1024
+
+ # We care about diff transfers only.
+ if style != "diff" or not split:
+ Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+ return
+
+ # Change nothing for small files.
+ if (tgt_ranges.size() <= MAX_BLOCKS_PER_DIFF_TRANSFER and
+ src_ranges.size() <= MAX_BLOCKS_PER_DIFF_TRANSFER):
+ Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+ return
+
+ pieces = 0
+ while (tgt_ranges.size() > MAX_BLOCKS_PER_DIFF_TRANSFER and
+ src_ranges.size() > MAX_BLOCKS_PER_DIFF_TRANSFER):
+ tgt_split_name = "%s-%d" % (tgt_name, pieces)
+ src_split_name = "%s-%d" % (src_name, pieces)
+ tgt_first = tgt_ranges.first(MAX_BLOCKS_PER_DIFF_TRANSFER)
+ src_first = src_ranges.first(MAX_BLOCKS_PER_DIFF_TRANSFER)
+ Transfer(tgt_split_name, src_split_name, tgt_first, src_first, style,
+ by_id)
+
+ tgt_ranges = tgt_ranges.subtract(tgt_first)
+ src_ranges = src_ranges.subtract(src_first)
+ pieces += 1
+
+ # Handle remaining blocks.
+ if tgt_ranges.size() or src_ranges.size():
+ # Must be both non-empty.
+ assert tgt_ranges.size() and src_ranges.size()
+ tgt_split_name = "%s-%d" % (tgt_name, pieces)
+ src_split_name = "%s-%d" % (src_name, pieces)
+ Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges, style,
+ by_id)
+
empty = RangeSet()
for tgt_fn, tgt_ranges in self.tgt.file_map.items():
if tgt_fn == "__ZERO":
@@ -854,28 +1012,28 @@ class BlockImageDiff(object):
# in any file and that are filled with zeros. We have a
# special transfer style for zero blocks.
src_ranges = self.src.file_map.get("__ZERO", empty)
- Transfer(tgt_fn, "__ZERO", tgt_ranges, src_ranges,
- "zero", self.transfers)
+ AddTransfer(tgt_fn, "__ZERO", tgt_ranges, src_ranges,
+ "zero", self.transfers)
continue
elif tgt_fn == "__COPY":
# "__COPY" domain includes all the blocks not contained in any
# file and that need to be copied unconditionally to the target.
- Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
+ AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
continue
elif tgt_fn in self.src.file_map:
# Look for an exact pathname match in the source.
- Transfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
- "diff", self.transfers)
+ AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
+ "diff", self.transfers, self.version >= 3)
continue
b = os.path.basename(tgt_fn)
if b in self.src_basenames:
# Look for an exact basename match in the source.
src_fn = self.src_basenames[b]
- Transfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
- "diff", self.transfers)
+ AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
+ "diff", self.transfers, self.version >= 3)
continue
b = re.sub("[0-9]+", "#", b)
@@ -885,11 +1043,11 @@ class BlockImageDiff(object):
# for .so files that contain version numbers in the filename
# that get bumped.)
src_fn = self.src_numpatterns[b]
- Transfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
- "diff", self.transfers)
+ AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
+ "diff", self.transfers, self.version >= 3)
continue
- Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
+ AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
def AbbreviateSourceNames(self):
for k in self.src.file_map.keys():
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 36f3305..d13f82a 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -422,7 +422,9 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
img_keyblock = tempfile.NamedTemporaryFile()
cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
- info_dict["vboot_key"] + ".vbprivk", img_keyblock.name,
+ info_dict["vboot_key"] + ".vbprivk",
+ info_dict["vboot_subkey"] + ".vbprivk",
+ img_keyblock.name,
img.name]
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 549bdcc..77af8ab 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -42,6 +42,11 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package
radio image. This option is only meaningful when -i is specified,
because a full radio is always included in a full OTA if applicable.
+ --full_bootloader
+ When generating an incremental OTA, always include a full copy of
+ bootloader image. This option is only meaningful when -i is specified,
+ because a full bootloader is always included in a full OTA if applicable.
+
-v (--verify)
Remount and verify the checksums of the files written to the
system and vendor (if used) partitions. Incremental builds only.
@@ -84,6 +89,10 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package
Specifies the number of worker-threads that will be used when
generating patches for incremental updates (defaults to 3).
+ --stash_threshold <float>
+ Specifies the threshold that will be used to compute the maximum
+ allowed stash size (defaults to 0.8).
+
--backup <boolean>
Enable or disable the execution of backuptool.sh.
Disabled by default.
@@ -133,6 +142,10 @@ OPTIONS.updater_binary = None
OPTIONS.oem_source = None
OPTIONS.fallback_to_full = True
OPTIONS.full_radio = False
+OPTIONS.full_bootloader = False
+# Stash size cannot exceed cache_size * threshold.
+OPTIONS.cache_size = None
+OPTIONS.stash_threshold = 0.8
OPTIONS.backuptool = False
OPTIONS.override_device = 'auto'
OPTIONS.override_prop = False
@@ -1571,6 +1584,8 @@ def main(argv):
OPTIONS.incremental_source = a
elif o == "--full_radio":
OPTIONS.full_radio = True
+ elif o == "--full_bootloader":
+ OPTIONS.full_bootloader = True
elif o in ("-w", "--wipe_user_data"):
OPTIONS.wipe_user_data = True
elif o in ("-n", "--no_prereq"):
@@ -1602,6 +1617,12 @@ def main(argv):
OPTIONS.updater_binary = a
elif o in ("--no_fallback_to_full",):
OPTIONS.fallback_to_full = False
+ elif o == "--stash_threshold":
+ try:
+ OPTIONS.stash_threshold = float(a)
+ except ValueError:
+ raise ValueError("Cannot parse value %r for option %r - expecting "
+ "a float" % (a, o))
elif o in ("--backup"):
OPTIONS.backuptool = bool(a.lower() == 'true')
elif o in ("--override_device"):
@@ -1619,6 +1640,7 @@ def main(argv):
"package_key=",
"incremental_from=",
"full_radio",
+ "full_bootloader",
"wipe_user_data",
"no_prereq",
"extra_script=",
@@ -1631,6 +1653,7 @@ def main(argv):
"oem_settings=",
"verify",
"no_fallback_to_full",
+ "stash_threshold=",
"backup=",
"override_device=",
"override_prop="
@@ -1691,6 +1714,11 @@ def main(argv):
output_zip = zipfile.ZipFile(temp_zip_file, "w",
compression=zipfile.ZIP_DEFLATED)
+ cache_size = OPTIONS.info_dict.get("cache_size", None)
+ if cache_size is None:
+ raise RuntimeError("can't determine the cache partition size")
+ OPTIONS.cache_size = cache_size
+
if OPTIONS.incremental_source is None:
WriteFullOTAPackage(input_zip, output_zip)
if OPTIONS.package_key is None:
diff --git a/tools/releasetools/rangelib.py b/tools/releasetools/rangelib.py
index 1506658..373bbed 100644
--- a/tools/releasetools/rangelib.py
+++ b/tools/releasetools/rangelib.py
@@ -24,6 +24,7 @@ class RangeSet(object):
lots of runs."""
def __init__(self, data=None):
+ # TODO(tbao): monotonic is broken when passing in a tuple.
self.monotonic = False
if isinstance(data, str):
self._parse_internal(data)
@@ -260,6 +261,38 @@ class RangeSet(object):
out = out.union(RangeSet(str(s1) + "-" + str(e1-1)))
return out
+ def first(self, n):
+ """Return the RangeSet that contains at most the first 'n' integers.
+
+ >>> RangeSet("0-9").first(1)
+ <RangeSet("0")>
+ >>> RangeSet("10-19").first(5)
+ <RangeSet("10-14")>
+ >>> RangeSet("10-19").first(15)
+ <RangeSet("10-19")>
+ >>> RangeSet("10-19 30-39").first(3)
+ <RangeSet("10-12")>
+ >>> RangeSet("10-19 30-39").first(15)
+ <RangeSet("10-19 30-34")>
+ >>> RangeSet("10-19 30-39").first(30)
+ <RangeSet("10-19 30-39")>
+ >>> RangeSet("0-9").first(0)
+ <RangeSet("")>
+ """
+
+ if self.size() <= n:
+ return self
+
+ out = []
+ for s, e in self:
+ if e - s >= n:
+ out += (s, s+n)
+ break
+ else:
+ out += (s, e)
+ n -= e - s
+ return RangeSet(data=out)
+
if __name__ == "__main__":
import doctest
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index ec49112..60d62c2 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -203,11 +203,13 @@ def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info,
common.ZipWriteStr(output_tf_zip, out_info, data)
elif info.filename in ("SYSTEM/build.prop",
"VENDOR/build.prop",
+ "BOOT/RAMDISK/default.prop",
"RECOVERY/RAMDISK/default.prop"):
print "rewriting %s:" % (info.filename,)
new_data = RewriteProps(data, misc_info)
common.ZipWriteStr(output_tf_zip, out_info, new_data)
- if info.filename == "RECOVERY/RAMDISK/default.prop":
+ if info.filename in ("BOOT/RAMDISK/default.prop",
+ "RECOVERY/RAMDISK/default.prop"):
write_to_temp(info.filename, info.external_attr, new_data)
elif info.filename.endswith("mac_permissions.xml"):
print "rewriting %s with new keys." % (info.filename,)
@@ -310,6 +312,10 @@ def RewriteProps(data, misc_info):
pieces = value.split("/")
pieces[-1] = EditTags(pieces[-1])
value = "/".join(pieces)
+ elif key == "ro.bootimage.build.fingerprint":
+ pieces = value.split("/")
+ pieces[-1] = EditTags(pieces[-1])
+ value = "/".join(pieces)
elif key == "ro.build.description":
pieces = value.split(" ")
assert len(pieces) == 5