summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rwxr-xr-xtools/buildinfo.sh1
-rw-r--r--tools/droiddoc/templates-pdk/assets/images/android_logo.pngbin0 -> 4934 bytes
-rw-r--r--tools/droiddoc/templates-pdk/components/masthead.cs5
-rw-r--r--tools/droiddoc/templates-sac/assets/css/default.css4
-rwxr-xr-xtools/java-event-log-tags.py2
-rwxr-xr-xtools/releasetools/add_img_to_target_files.py12
-rw-r--r--tools/releasetools/blockimgdiff.py44
-rwxr-xr-xtools/releasetools/build_image.py90
-rwxr-xr-xtools/releasetools/check_target_files_signatures.py2
-rw-r--r--tools/releasetools/common.py137
-rw-r--r--tools/releasetools/edify_generator.py21
-rwxr-xr-xtools/releasetools/img_from_target_files.py13
-rwxr-xr-xtools/releasetools/ota_from_target_files.py40
-rwxr-xr-xtools/releasetools/sign_target_files_apks.py16
-rw-r--r--tools/releasetools/sparse_img.py50
-rw-r--r--tools/releasetools/test_common.py249
16 files changed, 536 insertions, 150 deletions
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index dcd9ab5..6cd9499 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -7,6 +7,7 @@ echo "ro.build.id=$BUILD_ID"
echo "ro.build.display.id=$BUILD_DISPLAY_ID"
echo "ro.build.version.incremental=$BUILD_NUMBER"
echo "ro.build.version.sdk=$PLATFORM_SDK_VERSION"
+echo "ro.build.version.preview_sdk=$PLATFORM_PREVIEW_SDK_VERSION"
echo "ro.build.version.codename=$PLATFORM_VERSION_CODENAME"
echo "ro.build.version.all_codenames=$PLATFORM_VERSION_ALL_CODENAMES"
echo "ro.build.version.release=$PLATFORM_VERSION"
diff --git a/tools/droiddoc/templates-pdk/assets/images/android_logo.png b/tools/droiddoc/templates-pdk/assets/images/android_logo.png
new file mode 100644
index 0000000..4040f3f
--- /dev/null
+++ b/tools/droiddoc/templates-pdk/assets/images/android_logo.png
Binary files differ
diff --git a/tools/droiddoc/templates-pdk/components/masthead.cs b/tools/droiddoc/templates-pdk/components/masthead.cs
index 05437f3..874c11c 100644
--- a/tools/droiddoc/templates-pdk/components/masthead.cs
+++ b/tools/droiddoc/templates-pdk/components/masthead.cs
@@ -1,10 +1,9 @@
-<?cs
+<?cs
def:custom_masthead() ?>
<div id="header">
<div id="headerLeft">
<a href="<?cs var:toroot ?>guide/index.html"><img
- src="<?cs var:toroot ?>assets/images/android-partner-logo.png" alt="Android Platform Development Kit" /></a>
-
+ src="<?cs var:toroot ?>assets/images/android_logo.png" alt="Android Platform Development Kit" /></a>
</div>
<div id="headerRight">
<div id="headerLinks">
diff --git a/tools/droiddoc/templates-sac/assets/css/default.css b/tools/droiddoc/templates-sac/assets/css/default.css
index c1a0c19..ca998f1 100644
--- a/tools/droiddoc/templates-sac/assets/css/default.css
+++ b/tools/droiddoc/templates-sac/assets/css/default.css
@@ -1093,8 +1093,12 @@ ul ul, ul ol, ol ul, ol ol {
li {
margin:0 0 5px;
}
+dt {
+ font-weight:bold;
+}
dd {
margin:0 0 10px 30px;
+ padding: 5px;
}
dd p,
dd pre,
diff --git a/tools/java-event-log-tags.py b/tools/java-event-log-tags.py
index 846d9cf..f364751 100755
--- a/tools/java-event-log-tags.py
+++ b/tools/java-event-log-tags.py
@@ -129,7 +129,7 @@ def javaName(name):
out += "_"
return out
-javaTypes = ["ERROR", "int", "long", "String", "Object[]"]
+javaTypes = ["ERROR", "int", "long", "String", "Object[]", "float"]
for t in tagfile.tags:
methodName = javaName("write_" + t.tagname)
if t.description:
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 7984ad6..eab8113 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -33,10 +33,6 @@ import os
import tempfile
import zipfile
-# missing in Python 2.4 and before
-if not hasattr(os, "SEEK_SET"):
- os.SEEK_SET = 0
-
import build_image
import common
@@ -189,7 +185,7 @@ def AddUserdata(output_zip, prefix="IMAGES/"):
assert succ, "build userdata.img image failed"
common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict)
- output_zip.write(img.name, prefix + "userdata.img")
+ common.ZipWrite(output_zip, img.name, prefix + "userdata.img")
img.close()
os.rmdir(user_dir)
os.rmdir(temp_dir)
@@ -226,7 +222,7 @@ def AddCache(output_zip, prefix="IMAGES/"):
assert succ, "build cache.img image failed"
common.CheckSize(img.name, "cache.img", OPTIONS.info_dict)
- output_zip.write(img.name, prefix + "cache.img")
+ common.ZipWrite(output_zip, img.name, prefix + "cache.img")
img.close()
os.rmdir(user_dir)
os.rmdir(temp_dir)
@@ -252,7 +248,7 @@ def AddImagesToTargetFiles(filename):
OPTIONS.info_dict["selinux_fc"] = os.path.join(
OPTIONS.input_tmp, "BOOT", "RAMDISK", "file_contexts")
- input_zip.close()
+ common.ZipClose(input_zip)
output_zip = zipfile.ZipFile(filename, "a",
compression=zipfile.ZIP_DEFLATED)
@@ -297,7 +293,7 @@ def AddImagesToTargetFiles(filename):
banner("cache")
AddCache(output_zip)
- output_zip.close()
+ common.ZipClose(output_zip)
def main(argv):
def option_handler(o, _):
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index d549f70..3402572 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -74,7 +74,7 @@ class Image(object):
def ReadRangeSet(self, ranges):
raise NotImplementedError
- def TotalSha1(self):
+ def TotalSha1(self, include_clobbered_blocks=False):
raise NotImplementedError
@@ -82,11 +82,15 @@ class EmptyImage(Image):
"""A zero-length image."""
blocksize = 4096
care_map = RangeSet()
+ clobbered_blocks = RangeSet()
total_blocks = 0
file_map = {}
def ReadRangeSet(self, ranges):
return ()
- def TotalSha1(self):
+ def TotalSha1(self, include_clobbered_blocks=False):
+ # EmptyImage always carries empty clobbered_blocks, so
+ # include_clobbered_blocks can be ignored.
+ assert self.clobbered_blocks.size() == 0
return sha1().hexdigest()
@@ -114,6 +118,7 @@ class DataImage(Image):
self.total_blocks = len(self.data) / self.blocksize
self.care_map = RangeSet(data=(0, self.total_blocks))
+ self.clobbered_blocks = RangeSet()
zero_blocks = []
nonzero_blocks = []
@@ -134,7 +139,10 @@ class DataImage(Image):
def ReadRangeSet(self, ranges):
return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges]
- def TotalSha1(self):
+ def TotalSha1(self, include_clobbered_blocks=False):
+ # DataImage always carries empty clobbered_blocks, so
+ # include_clobbered_blocks can be ignored.
+ assert self.clobbered_blocks.size() == 0
return sha1(self.data).hexdigest()
@@ -184,6 +192,10 @@ class Transfer(object):
# (Typically a domain is a file, and the key in file_map is the
# pathname.)
#
+# clobbered_blocks: a RangeSet containing which blocks contain data
+# but may be altered by the FS. They need to be excluded when
+# verifying the partition integrity.
+#
# ReadRangeSet(): a function that takes a RangeSet and returns the
# data contained in the image blocks of that RangeSet. The data
# is returned as a list or tuple of strings; concatenating the
@@ -193,14 +205,15 @@ class Transfer(object):
#
# TotalSha1(): a function that returns (as a hex string) the SHA-1
# hash of all the data in the image (ie, all the blocks in the
-# care_map)
+# care_map minus clobbered_blocks, or including the clobbered
+# blocks if include_clobbered_blocks is True).
#
# When creating a BlockImageDiff, the src image may be None, in which
# case the list of transfers produced will never read from the
# original image.
class BlockImageDiff(object):
- def __init__(self, tgt, src=None, threads=None, version=2):
+ def __init__(self, tgt, src=None, threads=None, version=3):
if threads is None:
threads = multiprocessing.cpu_count() // 2
if threads == 0:
@@ -211,7 +224,7 @@ class BlockImageDiff(object):
self.src_basenames = {}
self.src_numpatterns = {}
- assert version in (1, 2)
+ assert version in (1, 2, 3)
self.tgt = tgt
if src is None:
@@ -295,7 +308,15 @@ class BlockImageDiff(object):
next_stash_id += 1
stashes[s] = sid
stashed_blocks += sr.size()
- out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
+ if self.version == 2:
+ out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
+ else:
+ sh = self.HashBlocks(self.src, sr)
+ if sh in stashes:
+ stashes[sh] += 1
+ else:
+ stashes[sh] = 1
+ out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
if stashed_blocks > max_stashed_blocks:
max_stashed_blocks = stashed_blocks
@@ -321,6 +342,7 @@ class BlockImageDiff(object):
sid = stashes.pop(s)
stashed_blocks -= sr.size()
unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
+ sh = self.HashBlocks(self.src, sr)
sr = xf.src_ranges.map_within(sr)
mapped_stashes.append(sr)
if self.version == 2:
@@ -347,7 +369,7 @@ class BlockImageDiff(object):
src_str = " ".join(src_str)
- # both versions:
+ # all versions:
# zero <rangeset>
# new <rangeset>
# erase <rangeset>
@@ -835,6 +857,12 @@ class BlockImageDiff(object):
"zero", self.transfers)
continue
+ elif tgt_fn == "__COPY":
+ # "__COPY" domain includes all the blocks not contained in any
+ # file and that need to be copied unconditionally to the target.
+ Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
+ continue
+
elif tgt_fn in self.src.file_map:
# Look for an exact pathname match in the source.
Transfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 033ade9..5e8f0e6 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -22,26 +22,31 @@ Usage: build_image input_directory properties_file output_image_file
"""
import os
import os.path
+import re
import subprocess
import sys
import commands
+import common
import shutil
import tempfile
+OPTIONS = common.OPTIONS
+
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
def RunCommand(cmd):
- """ Echo and run the given command
+ """Echo and run the given command.
Args:
cmd: the command represented as a list of strings.
Returns:
- The exit code.
+ A tuple of the output and the exit code.
"""
print "Running: ", " ".join(cmd)
- p = subprocess.Popen(cmd)
- p.communicate()
- return p.returncode
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ output, _ = p.communicate()
+ print "%s" % (output.rstrip(),)
+ return (output, p.returncode)
def GetVerityTreeSize(partition_size):
cmd = "build_verity_tree -s %d"
@@ -55,6 +60,7 @@ def GetVerityTreeSize(partition_size):
def GetVerityMetadataSize(partition_size):
cmd = "system/extras/verity/build_verity_metadata.py -s %d"
cmd %= partition_size
+
status, output = commands.getstatusoutput(cmd)
if status:
print output
@@ -142,7 +148,7 @@ def UnsparseImage(sparse_image_path, replace=True):
else:
return True, unsparse_image_path
inflate_command = ["simg2img", sparse_image_path, unsparse_image_path]
- exit_code = RunCommand(inflate_command)
+ (_, exit_code) = RunCommand(inflate_command)
if exit_code != 0:
os.remove(unsparse_image_path)
return False, None
@@ -162,7 +168,11 @@ def MakeVerityEnabledImage(out_file, prop_dict):
image_size = prop_dict["partition_size"]
block_dev = prop_dict["verity_block_device"]
signer_key = prop_dict["verity_key"] + ".pk8"
- signer_path = prop_dict["verity_signer_cmd"]
+ if OPTIONS.verity_signer_path is not None:
+ signer_path = OPTIONS.verity_signer_path + ' '
+ signer_path += ' '.join(OPTIONS.verity_signer_args)
+ else:
+ signer_path = prop_dict["verity_signer_cmd"]
# make a tempdir
tempdir_name = tempfile.mkdtemp(suffix="_verity_images")
@@ -205,8 +215,8 @@ def BuildImage(in_dir, prop_dict, out_file):
Returns:
True iff the image is built successfully.
"""
- # system_root_image=true: build a system.img that combines the contents of /system
- # and the ramdisk, and can be mounted at the root of the file system.
+ # system_root_image=true: build a system.img that combines the contents of
+ # /system and the ramdisk, and can be mounted at the root of the file system.
origin_in = in_dir
fs_config = prop_dict.get("fs_config")
if (prop_dict.get("system_root_image") == "true"
@@ -233,13 +243,15 @@ def BuildImage(in_dir, prop_dict, out_file):
fs_spans_partition = True
if fs_type.startswith("squash"):
- fs_spans_partition = False
+ fs_spans_partition = False
is_verity_partition = "verity_block_device" in prop_dict
verity_supported = prop_dict.get("verity") == "true"
- # adjust the partition size to make room for the hashes if this is to be verified
+ # Adjust the partition size to make room for the hashes if this is to be
+ # verified.
if verity_supported and is_verity_partition and fs_spans_partition:
partition_size = int(prop_dict.get("partition_size"))
+
adjusted_size = AdjustPartitionSizeForVerity(partition_size)
if not adjusted_size:
return False
@@ -298,8 +310,15 @@ def BuildImage(in_dir, prop_dict, out_file):
staging_system = os.path.join(in_dir, "system")
shutil.rmtree(staging_system, ignore_errors=True)
shutil.copytree(origin_in, staging_system, symlinks=True)
+
+ reserved_blocks = prop_dict.get("has_ext4_reserved_blocks") == "true"
+ ext4fs_output = None
+
try:
- exit_code = RunCommand(build_command)
+ if reserved_blocks and fs_type.startswith("ext4"):
+ (ext4fs_output, exit_code) = RunCommand(build_command)
+ else:
+ (_, exit_code) = RunCommand(build_command)
finally:
if in_dir != origin_in:
# Clean up temporary directories and files.
@@ -309,17 +328,42 @@ def BuildImage(in_dir, prop_dict, out_file):
if exit_code != 0:
return False
+ # Bug: 21522719, 22023465
+ # There are some reserved blocks on ext4 FS (lesser of 4096 blocks and 2%).
+ # We need to deduct those blocks from the available space, since they are
+ # not writable even with root privilege. It only affects devices using
+ # file-based OTA and a kernel version of 3.10 or greater (currently just
+ # sprout).
+ if reserved_blocks and fs_type.startswith("ext4"):
+ assert ext4fs_output is not None
+ ext4fs_stats = re.compile(
+ r'Created filesystem with .* (?P<used_blocks>[0-9]+)/'
+ r'(?P<total_blocks>[0-9]+) blocks')
+ m = ext4fs_stats.match(ext4fs_output.strip().split('\n')[-1])
+ used_blocks = int(m.groupdict().get('used_blocks'))
+ total_blocks = int(m.groupdict().get('total_blocks'))
+ reserved_blocks = min(4096, int(total_blocks * 0.02))
+ adjusted_blocks = total_blocks - reserved_blocks
+ if used_blocks > adjusted_blocks:
+ mount_point = prop_dict.get("mount_point")
+ print("Error: Not enough room on %s (total: %d blocks, used: %d blocks, "
+ "reserved: %d blocks, available: %d blocks)" % (
+ mount_point, total_blocks, used_blocks, reserved_blocks,
+ adjusted_blocks))
+ return False
+
if not fs_spans_partition:
mount_point = prop_dict.get("mount_point")
partition_size = int(prop_dict.get("partition_size"))
image_size = os.stat(out_file).st_size
if image_size > partition_size:
- print "Error: %s image size of %d is larger than partition size of %d" % (mount_point, image_size, partition_size)
- return False
+ print("Error: %s image size of %d is larger than partition size of "
+ "%d" % (mount_point, image_size, partition_size))
+ return False
if verity_supported and is_verity_partition:
- if 2 * image_size - AdjustPartitionSizeForVerity(image_size) > partition_size:
- print "Error: No more room on %s to fit verity data" % mount_point
- return False
+ if 2 * image_size - AdjustPartitionSizeForVerity(image_size) > partition_size:
+ print "Error: No more room on %s to fit verity data" % mount_point
+ return False
prop_dict["original_partition_size"] = prop_dict["partition_size"]
prop_dict["partition_size"] = str(image_size)
@@ -335,7 +379,7 @@ def BuildImage(in_dir, prop_dict, out_file):
# Run e2fsck on the inflated image file
e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
- exit_code = RunCommand(e2fsck_command)
+ (_, exit_code) = RunCommand(e2fsck_command)
os.remove(unsparse_image)
@@ -380,8 +424,9 @@ def ImagePropFromGlobalDict(glob_dict, mount_point):
copy_prop("system_size", "partition_size")
copy_prop("system_journal_size", "journal_size")
copy_prop("system_verity_block_device", "verity_block_device")
- copy_prop("system_root_image","system_root_image")
- copy_prop("ramdisk_dir","ramdisk_dir")
+ copy_prop("system_root_image", "system_root_image")
+ copy_prop("ramdisk_dir", "ramdisk_dir")
+ copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
copy_prop("system_squashfs_compressor", "squashfs_compressor")
copy_prop("system_squashfs_compressor_opt", "squashfs_compressor_opt")
elif mount_point == "data":
@@ -397,10 +442,12 @@ def ImagePropFromGlobalDict(glob_dict, mount_point):
copy_prop("vendor_size", "partition_size")
copy_prop("vendor_journal_size", "journal_size")
copy_prop("vendor_verity_block_device", "verity_block_device")
+ copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
elif mount_point == "oem":
copy_prop("fs_type", "fs_type")
copy_prop("oem_size", "partition_size")
copy_prop("oem_journal_size", "journal_size")
+ copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
return d
@@ -430,7 +477,8 @@ def main(argv):
glob_dict = LoadGlobalDict(glob_dict_file)
if "mount_point" in glob_dict:
- # The caller knows the mount point and provides a dictionay needed by BuildImage().
+ # The caller knows the mount point and provides a dictionay needed by
+ # BuildImage().
image_properties = glob_dict
else:
image_filename = os.path.basename(out_file)
diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py
index dd57033..5c541ab 100755
--- a/tools/releasetools/check_target_files_signatures.py
+++ b/tools/releasetools/check_target_files_signatures.py
@@ -331,7 +331,7 @@ class TargetFiles(object):
by_certpair = {}
- for i in all:
+ for i in all_apks:
if i in self.apks:
if i in other.apks:
# in both; should have same set of certs
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 592ed19..6f921e0 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -32,10 +32,7 @@ import zipfile
import blockimgdiff
import rangelib
-try:
- from hashlib import sha1 as sha1
-except ImportError:
- from sha import sha as sha1
+from hashlib import sha1 as sha1
class Options(object):
@@ -52,6 +49,11 @@ class Options(object):
self.java_args = "-Xmx2048m" # JVM Args
self.public_key_suffix = ".x509.pem"
self.private_key_suffix = ".pk8"
+ # use otatools built boot_signer by default
+ self.boot_signer_path = "boot_signer"
+ self.boot_signer_args = []
+ self.verity_signer_path = None
+ self.verity_signer_args = []
self.verbose = False
self.tempfiles = []
self.device_specific = None
@@ -200,12 +202,13 @@ def LoadDictionaryFromLines(lines):
def LoadRecoveryFSTab(read_helper, fstab_version):
class Partition(object):
- def __init__(self, mount_point, fs_type, device, length, device2):
+ def __init__(self, mount_point, fs_type, device, length, device2, context):
self.mount_point = mount_point
self.fs_type = fs_type
self.device = device
self.length = length
self.device2 = device2
+ self.context = context
try:
data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
@@ -254,6 +257,7 @@ def LoadRecoveryFSTab(read_helper, fstab_version):
line = line.strip()
if not line or line.startswith("#"):
continue
+ # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
pieces = line.split()
if len(pieces) != 5:
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
@@ -273,9 +277,17 @@ def LoadRecoveryFSTab(read_helper, fstab_version):
# Ignore all unknown options in the unified fstab
continue
+ mount_flags = pieces[3]
+ # Honor the SELinux context if present.
+ context = None
+ for i in mount_flags.split(","):
+ if i.startswith("context="):
+ context = i
+
mount_point = pieces[1]
d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
- device=pieces[0], length=length, device2=None)
+ device=pieces[0], length=length,
+ device2=None, context=context)
else:
raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
@@ -360,10 +372,14 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
assert p.returncode == 0, "mkbootimg of %s image failed" % (
os.path.basename(sourcedir),)
- if info_dict.get("verity_key", None):
+ if (info_dict.get("boot_signer", None) == "true" and
+ info_dict.get("verity_key", None)):
path = "/" + os.path.basename(sourcedir).lower()
- cmd = ["boot_signer", path, img.name, info_dict["verity_key"] + ".pk8",
- info_dict["verity_key"] + ".x509.pem", img.name]
+ cmd = [OPTIONS.boot_signer_path]
+ cmd.extend(OPTIONS.boot_signer_args)
+ cmd.extend([path, img.name,
+ info_dict["verity_key"] + ".pk8",
+ info_dict["verity_key"] + ".x509.pem", img.name])
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "boot_signer of %s image failed" % path
@@ -380,6 +396,10 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
p.communicate()
assert p.returncode == 0, "vboot_signer of %s image failed" % path
+ # Clean up the temp files.
+ img_unsigned.close()
+ img_keyblock.close()
+
img.seek(os.SEEK_SET, 0)
data = img.read()
@@ -540,7 +560,7 @@ def SignFile(input_name, output_name, key, password, align=None,
raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
if align:
- p = Run(["zipalign", "-f", str(align), sign_name, output_name])
+ p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
p.communicate()
if p.returncode != 0:
raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
@@ -652,7 +672,9 @@ def ParseOptions(argv,
argv, "hvp:s:x:" + extra_opts,
["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
"java_path=", "java_args=", "public_key_suffix=",
- "private_key_suffix=", "device_specific=", "extra="] +
+ "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
+ "verity_signer_path=", "verity_signer_args=", "device_specific=",
+ "extra="] +
list(extra_long_opts))
except getopt.GetoptError as err:
Usage(docstring)
@@ -679,6 +701,14 @@ def ParseOptions(argv,
OPTIONS.public_key_suffix = a
elif o in ("--private_key_suffix",):
OPTIONS.private_key_suffix = a
+ elif o in ("--boot_signer_path",):
+ OPTIONS.boot_signer_path = a
+ elif o in ("--boot_signer_args",):
+ OPTIONS.boot_signer_args = shlex.split(a)
+ elif o in ("--verity_signer_path",):
+ OPTIONS.verity_signer_path = a
+ elif o in ("--verity_signer_args",):
+ OPTIONS.verity_signer_args = shlex.split(a)
elif o in ("-s", "--device_specific"):
OPTIONS.device_specific = a
elif o in ("-x", "--extra"):
@@ -854,16 +884,55 @@ def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
zipfile.ZIP64_LIMIT = saved_zip64_limit
-def ZipWriteStr(zip_file, filename, data, perms=0o644, compression=None):
- # use a fixed timestamp so the output is repeatable.
- zinfo = zipfile.ZipInfo(filename=filename,
- date_time=(2009, 1, 1, 0, 0, 0))
- if compression is None:
+def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
+ compress_type=None):
+ """Wrap zipfile.writestr() function to work around the zip64 limit.
+
+ Even with the ZIP64_LIMIT workaround, it won't allow writing a string
+ longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
+ when calling crc32(bytes).
+
+ But it still works fine to write a shorter string into a large zip file.
+ We should use ZipWrite() whenever possible, and only use ZipWriteStr()
+ when we know the string won't be too long.
+ """
+
+ saved_zip64_limit = zipfile.ZIP64_LIMIT
+ zipfile.ZIP64_LIMIT = (1 << 32) - 1
+
+ if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
+ zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
zinfo.compress_type = zip_file.compression
+ if perms is None:
+ perms = 0o644
else:
- zinfo.compress_type = compression
- zinfo.external_attr = perms << 16
+ zinfo = zinfo_or_arcname
+
+ # If compress_type is given, it overrides the value in zinfo.
+ if compress_type is not None:
+ zinfo.compress_type = compress_type
+
+ # If perms is given, it has a priority.
+ if perms is not None:
+ zinfo.external_attr = perms << 16
+
+ # Use a fixed timestamp so the output is repeatable.
+ zinfo.date_time = (2009, 1, 1, 0, 0, 0)
+
zip_file.writestr(zinfo, data)
+ zipfile.ZIP64_LIMIT = saved_zip64_limit
+
+
+def ZipClose(zip_file):
+ # http://b/18015246
+ # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
+ # central directory.
+ saved_zip64_limit = zipfile.ZIP64_LIMIT
+ zipfile.ZIP64_LIMIT = (1 << 32) - 1
+
+ zip_file.close()
+
+ zipfile.ZIP64_LIMIT = saved_zip64_limit
class DeviceSpecificParams(object):
@@ -969,7 +1038,7 @@ class File(object):
return t
def AddToZip(self, z, compression=None):
- ZipWriteStr(z, self.name, self.data, compression=compression)
+ ZipWriteStr(z, self.name, self.data, compress_type=compression)
DIFF_PROGRAM_BY_EXT = {
".gz" : "imgdiff",
@@ -1106,6 +1175,9 @@ class BlockDifference(object):
self.partition = partition
self.check_first_block = check_first_block
+ # Due to http://b/20939131, check_first_block is disabled temporarily.
+ assert not self.check_first_block
+
if version is None:
version = 1
if OPTIONS.info_dict:
@@ -1133,24 +1205,25 @@ class BlockDifference(object):
if progress:
script.ShowProgress(progress, 0)
self._WriteUpdate(script, output_zip)
+ self._WritePostInstallVerifyScript(script)
def WriteVerifyScript(self, script):
partition = self.partition
if not self.src:
script.Print("Image %s will be patched unconditionally." % (partition,))
else:
+ ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
+ ranges_str = ranges.to_string_raw()
if self.version >= 3:
script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
'block_image_verify("%s", '
'package_extract_file("%s.transfer.list"), '
'"%s.new.dat", "%s.patch.dat")) then') % (
- self.device, self.src.care_map.to_string_raw(),
- self.src.TotalSha1(),
+ self.device, ranges_str, self.src.TotalSha1(),
self.device, partition, partition, partition))
else:
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
- self.device, self.src.care_map.to_string_raw(),
- self.src.TotalSha1()))
+ self.device, ranges_str, self.src.TotalSha1()))
script.Print('Verified %s image...' % (partition,))
script.AppendExtra('else')
@@ -1171,6 +1244,21 @@ class BlockDifference(object):
script.AppendExtra(('abort("%s partition has unexpected contents");\n'
'endif;') % (partition,))
+ def _WritePostInstallVerifyScript(self, script):
+ partition = self.partition
+ script.Print('Verifying the updated %s image...' % (partition,))
+ # Unlike pre-install verification, clobbered_blocks should not be ignored.
+ ranges = self.tgt.care_map
+ ranges_str = ranges.to_string_raw()
+ script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
+ self.device, ranges_str,
+ self.tgt.TotalSha1(include_clobbered_blocks=True)))
+ script.Print('Verified the updated %s image.' % (partition,))
+ script.AppendExtra(
+ 'else\n'
+ ' abort("%s partition has unexpected contents after OTA update");\n'
+ 'endif;' % (partition,))
+
def _WriteUpdate(self, script, output_zip):
ZipWrite(output_zip,
'{}.transfer.list'.format(self.path),
@@ -1198,6 +1286,9 @@ class BlockDifference(object):
return ctx.hexdigest()
+ # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
+ # remounting R/W. Will change the checking to a finer-grained way to
+ # mask off those bits.
def _CheckFirstBlock(self, script):
r = rangelib.RangeSet((0, 1))
srchash = self._HashBlocks(self.src, r)
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 3d0da88..566e687 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -20,11 +20,15 @@ class EdifyGenerator(object):
"""Class to generate scripts in the 'edify' recovery script language
used from donut onwards."""
- def __init__(self, version, info):
+ def __init__(self, version, info, fstab=None):
self.script = []
self.mounts = set()
self.version = version
self.info = info
+ if fstab is None:
+ self.fstab = self.info.get("fstab", None)
+ else:
+ self.fstab = fstab
def MakeTemporary(self):
"""Make a temporary script object whose commands can latter be
@@ -168,7 +172,7 @@ class EdifyGenerator(object):
where option is optname[=optvalue]
E.g. ext4=barrier=1,nodelalloc,errors=panic|f2fs=errors=recover
"""
- fstab = self.info.get("fstab", None)
+ fstab = self.fstab
if fstab:
p = fstab[mount_point]
mount_dict = {}
@@ -177,9 +181,12 @@ class EdifyGenerator(object):
if "=" in option:
key, value = option.split("=", 1)
mount_dict[key] = value
+ mount_flags = mount_dict.get(p.fs_type, "")
+ if p.context is not None:
+ mount_flags = p.context + ("," + mount_flags if mount_flags else "")
self.script.append('mount("%s", "%s", "%s", "%s", "%s");' % (
p.fs_type, common.PARTITION_TYPES[p.fs_type], p.device,
- p.mount_point, mount_dict.get(p.fs_type, "")))
+ p.mount_point, mount_flags))
self.mounts.add(p.mount_point)
def UnpackPackageDir(self, src, dst):
@@ -199,7 +206,7 @@ class EdifyGenerator(object):
self.script.append('ui_print("%s");' % (message,))
def TunePartition(self, partition, *options):
- fstab = self.info.get("fstab", None)
+ fstab = self.fstab
if fstab:
p = fstab[partition]
if p.fs_type not in ("ext2", "ext3", "ext4"):
@@ -213,7 +220,7 @@ class EdifyGenerator(object):
"""Format the given partition, specified by its mount point (eg,
"/system")."""
- fstab = self.info.get("fstab", None)
+ fstab = self.fstab
if fstab:
p = fstab[partition]
self.script.append('format("%s", "%s", "%s", "%s", "%s");' %
@@ -223,7 +230,7 @@ class EdifyGenerator(object):
def WipeBlockDevice(self, partition):
if partition not in ("/system", "/vendor"):
raise ValueError(("WipeBlockDevice doesn't work on %s\n") % (partition,))
- fstab = self.info.get("fstab", None)
+ fstab = self.fstab
size = self.info.get(partition.lstrip("/") + "_size", None)
device = fstab[partition].device
@@ -268,7 +275,7 @@ class EdifyGenerator(object):
"""Write the given package file into the partition for the given
mount point."""
- fstab = self.info["fstab"]
+ fstab = self.fstab
if fstab:
p = fstab[mount_point]
partition_type = common.PARTITION_TYPES[p.fs_type]
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 8c5acd8..c486992 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -43,8 +43,9 @@ OPTIONS = common.OPTIONS
def CopyInfo(output_zip):
"""Copy the android-info.txt file from the input to the output."""
- output_zip.write(os.path.join(OPTIONS.input_tmp, "OTA", "android-info.txt"),
- "android-info.txt")
+ common.ZipWrite(
+ output_zip, os.path.join(OPTIONS.input_tmp, "OTA", "android-info.txt"),
+ "android-info.txt")
def main(argv):
@@ -133,13 +134,7 @@ def main(argv):
finally:
print "cleaning up..."
- # http://b/18015246
- # See common.py for context. zipfile also refers to ZIP64_LIMIT during
- # close() when it writes out the central directory.
- saved_zip64_limit = zipfile.ZIP64_LIMIT
- zipfile.ZIP64_LIMIT = (1 << 32) - 1
- output_zip.close()
- zipfile.ZIP64_LIMIT = saved_zip64_limit
+ common.ZipClose(output_zip)
shutil.rmtree(OPTIONS.input_tmp)
print "done."
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index eab3daa..82d6313 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -92,7 +92,6 @@ if sys.hexversion < 0x02070000:
print >> sys.stderr, "Python 2.7 or newer is required."
sys.exit(1)
-import copy
import multiprocessing
import os
import tempfile
@@ -371,6 +370,7 @@ def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None):
symlinks.append((input_zip.read(info.filename),
"/" + partition + "/" + basefilename))
else:
+ import copy
info2 = copy.copy(info)
fn = info2.filename = partition + "/" + basefilename
if substitute and fn in substitute and substitute[fn] is None:
@@ -380,7 +380,7 @@ def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None):
data = substitute[fn]
else:
data = input_zip.read(info.filename)
- output_zip.writestr(info2, data)
+ common.ZipWriteStr(output_zip, info2, data)
if fn.endswith("/"):
itemset.Get(fn[:-1], is_dir=True)
else:
@@ -475,13 +475,20 @@ def GetImage(which, tmpdir, info_dict):
path = add_img_to_target_files.BuildVendor(
tmpdir, info_dict, block_list=mappath)
- return sparse_img.SparseImage(path, mappath)
+ # Bug: http://b/20939131
+ # In ext4 filesystems, block 0 might be changed even being mounted
+ # R/O. We add it to clobbered_blocks so that it will be written to the
+ # target unconditionally. Note that they are still part of care_map.
+ clobbered_blocks = "0"
+
+ return sparse_img.SparseImage(path, mappath, clobbered_blocks)
def WriteFullOTAPackage(input_zip, output_zip):
# TODO: how to determine this? We don't know what version it will
- # be installed on top of. For now, we expect the API just won't
- # change very often.
+ # be installed on top of. For now, we expect the API just won't
+ # change very often. Similarly for fstab, it might have changed
+ # in the target build.
script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict)
oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties")
@@ -721,8 +728,9 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
if source_version == 0:
print ("WARNING: generating edify script for a source that "
"can't install it.")
- script = edify_generator.EdifyGenerator(source_version,
- OPTIONS.target_info_dict)
+ script = edify_generator.EdifyGenerator(
+ source_version, OPTIONS.target_info_dict,
+ fstab=OPTIONS.source_info_dict["fstab"])
metadata = {
"pre-device": GetBuildProp("ro.product.device",
@@ -773,7 +781,6 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
system_diff = common.BlockDifference("system", system_tgt, system_src,
- check_first_block=True,
version=blockimgdiff_version)
if HasVendorPartition(target_zip):
@@ -784,13 +791,12 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
vendor_tgt = GetImage("vendor", OPTIONS.target_tmp,
OPTIONS.target_info_dict)
vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src,
- check_first_block=True,
version=blockimgdiff_version)
else:
vendor_diff = None
oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties")
- recovery_mount_options = OPTIONS.target_info_dict.get(
+ recovery_mount_options = OPTIONS.source_info_dict.get(
"recovery_mount_options")
oem_dict = None
if oem_props is not None and len(oem_props) > 0:
@@ -1110,11 +1116,13 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
if source_version == 0:
print ("WARNING: generating edify script for a source that "
"can't install it.")
- script = edify_generator.EdifyGenerator(source_version,
- OPTIONS.target_info_dict)
+ script = edify_generator.EdifyGenerator(
+ source_version, OPTIONS.target_info_dict,
+ fstab=OPTIONS.source_info_dict["fstab"])
oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties")
- recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options")
+ recovery_mount_options = OPTIONS.source_info_dict.get(
+ "recovery_mount_options")
oem_dict = None
if oem_props is not None and len(oem_props) > 0:
if OPTIONS.oem_source is None:
@@ -1581,6 +1589,7 @@ def main(argv):
OPTIONS.package_key = OPTIONS.info_dict.get(
"default_system_dev_certificate",
"build/target/product/security/testkey")
+ common.ZipClose(output_zip)
break
else:
@@ -1601,15 +1610,14 @@ def main(argv):
common.DumpInfoDict(OPTIONS.source_info_dict)
try:
WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
+ common.ZipClose(output_zip)
break
except ValueError:
if not OPTIONS.fallback_to_full:
raise
print "--- failed to build incremental; falling back to full ---"
OPTIONS.incremental_source = None
- output_zip.close()
-
- output_zip.close()
+ common.ZipClose(output_zip)
if not OPTIONS.no_signing:
SignOutput(temp_zip_file.name, args[1])
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index d47cc4f..ec49112 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -196,23 +196,23 @@ def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info,
if key not in common.SPECIAL_CERT_STRINGS:
print " signing: %-*s (%s)" % (maxsize, name, key)
signed_data = SignApk(data, key, key_passwords[key])
- output_tf_zip.writestr(out_info, signed_data)
+ common.ZipWriteStr(output_tf_zip, out_info, signed_data)
else:
# an APK we're not supposed to sign.
print "NOT signing: %s" % (name,)
- output_tf_zip.writestr(out_info, data)
+ common.ZipWriteStr(output_tf_zip, out_info, data)
elif info.filename in ("SYSTEM/build.prop",
"VENDOR/build.prop",
"RECOVERY/RAMDISK/default.prop"):
print "rewriting %s:" % (info.filename,)
new_data = RewriteProps(data, misc_info)
- output_tf_zip.writestr(out_info, new_data)
+ common.ZipWriteStr(output_tf_zip, out_info, new_data)
if info.filename == "RECOVERY/RAMDISK/default.prop":
write_to_temp(info.filename, info.external_attr, new_data)
elif info.filename.endswith("mac_permissions.xml"):
print "rewriting %s with new keys." % (info.filename,)
new_data = ReplaceCerts(data)
- output_tf_zip.writestr(out_info, new_data)
+ common.ZipWriteStr(output_tf_zip, out_info, new_data)
elif info.filename in ("SYSTEM/recovery-from-boot.p",
"SYSTEM/bin/install-recovery.sh"):
rebuild_recovery = True
@@ -229,7 +229,7 @@ def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info,
pass
else:
# a non-APK file; copy it verbatim
- output_tf_zip.writestr(out_info, data)
+ common.ZipWriteStr(output_tf_zip, out_info, data)
if OPTIONS.replace_ota_keys:
new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info)
@@ -243,7 +243,7 @@ def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info,
"boot.img", "boot.img", tmpdir, "BOOT", info_dict=misc_info)
def output_sink(fn, data):
- output_tf_zip.writestr("SYSTEM/"+fn, data)
+ common.ZipWriteStr(output_tf_zip, "SYSTEM/" + fn, data)
common.MakeRecoveryPatch(tmpdir, output_sink, recovery_img, boot_img,
info_dict=misc_info)
@@ -488,8 +488,8 @@ def main(argv):
ProcessTargetFiles(input_zip, output_zip, misc_info,
apk_key_map, key_passwords)
- input_zip.close()
- output_zip.close()
+ common.ZipClose(input_zip)
+ common.ZipClose(output_zip)
add_img_to_target_files.AddImagesToTargetFiles(args[1])
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 78f3057..7019f00 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -21,10 +21,17 @@ import rangelib
class SparseImage(object):
- """Wraps a sparse image file (and optional file map) into an image
- object suitable for passing to BlockImageDiff."""
+ """Wraps a sparse image file into an image object.
- def __init__(self, simg_fn, file_map_fn=None):
+ Wraps a sparse image file (and optional file map and clobbered_blocks) into
+ an image object suitable for passing to BlockImageDiff. file_map contains
+ the mapping between files and their blocks. clobbered_blocks contains the set
+ of blocks that should be always written to the target regardless of the old
+ contents (i.e. copying instead of patching). clobbered_blocks should be in
+ the form of a string like "0" or "0 1-5 8".
+ """
+
+ def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None):
self.simg_f = f = open(simg_fn, "rb")
header_bin = f.read(28)
@@ -57,6 +64,7 @@ class SparseImage(object):
pos = 0 # in blocks
care_data = []
self.offset_map = offset_map = []
+ self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
for i in range(total_chunks):
header_bin = f.read(12)
@@ -103,17 +111,23 @@ class SparseImage(object):
self.offset_index = [i[0] for i in offset_map]
if file_map_fn:
- self.LoadFileBlockMap(file_map_fn)
+ self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
else:
self.file_map = {"__DATA": self.care_map}
def ReadRangeSet(self, ranges):
return [d for d in self._GetRangeData(ranges)]
- def TotalSha1(self):
- """Return the SHA-1 hash of all data in the 'care' regions of this image."""
+ def TotalSha1(self, include_clobbered_blocks=False):
+ """Return the SHA-1 hash of all data in the 'care' regions.
+
+ If include_clobbered_blocks is True, it returns the hash including the
+ clobbered_blocks."""
+ ranges = self.care_map
+ if not include_clobbered_blocks:
+ ranges = ranges.subtract(self.clobbered_blocks)
h = sha1()
- for d in self._GetRangeData(self.care_map):
+ for d in self._GetRangeData(ranges):
h.update(d)
return h.hexdigest()
@@ -156,7 +170,7 @@ class SparseImage(object):
yield fill_data * (this_read * (self.blocksize >> 2))
to_read -= this_read
- def LoadFileBlockMap(self, fn):
+ def LoadFileBlockMap(self, fn, clobbered_blocks):
remaining = self.care_map
self.file_map = out = {}
@@ -166,14 +180,20 @@ class SparseImage(object):
ranges = rangelib.RangeSet.parse(ranges)
out[fn] = ranges
assert ranges.size() == ranges.intersect(remaining).size()
+
+ # Currently we assume that blocks in clobbered_blocks are not part of
+ # any file.
+ assert not clobbered_blocks.overlaps(ranges)
remaining = remaining.subtract(ranges)
+ remaining = remaining.subtract(clobbered_blocks)
+
# For all the remaining blocks in the care_map (ie, those that
- # aren't part of the data for any file), divide them into blocks
- # that are all zero and blocks that aren't. (Zero blocks are
- # handled specially because (1) there are usually a lot of them
- # and (2) bsdiff handles files with long sequences of repeated
- # bytes especially poorly.)
+ # aren't part of the data for any file nor part of the clobbered_blocks),
+ # divide them into blocks that are all zero and blocks that aren't.
+ # (Zero blocks are handled specially because (1) there are usually
+ # a lot of them and (2) bsdiff handles files with long sequences of
+ # repeated bytes especially poorly.)
zero_blocks = []
nonzero_blocks = []
@@ -201,12 +221,14 @@ class SparseImage(object):
nonzero_blocks.append(b)
nonzero_blocks.append(b+1)
- assert zero_blocks or nonzero_blocks
+ assert zero_blocks or nonzero_blocks or clobbered_blocks
if zero_blocks:
out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
if nonzero_blocks:
out["__NONZERO"] = rangelib.RangeSet(data=nonzero_blocks)
+ if clobbered_blocks:
+ out["__COPY"] = clobbered_blocks
def ResetFileMap(self):
"""Throw away the file map and treat the entire image as
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 5fdc132..a861346 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -29,15 +29,54 @@ def random_string_with_holes(size, block_size, step_size):
data[begin:end] = os.urandom(block_size)
return "".join(data)
+def get_2gb_string():
+ kilobytes = 1024
+ megabytes = 1024 * kilobytes
+ gigabytes = 1024 * megabytes
+
+ size = int(2 * gigabytes + 1)
+ block_size = 4 * kilobytes
+ step_size = 4 * megabytes
+ two_gb_string = random_string_with_holes(
+ size, block_size, step_size)
+ return two_gb_string
+
class CommonZipTest(unittest.TestCase):
+ def _verify(self, zip_file, zip_file_name, arcname, contents,
+ test_file_name=None, expected_stat=None, expected_mode=0o644,
+ expected_compress_type=zipfile.ZIP_STORED):
+ # Verify the stat if present.
+ if test_file_name is not None:
+ new_stat = os.stat(test_file_name)
+ self.assertEqual(int(expected_stat.st_mode), int(new_stat.st_mode))
+ self.assertEqual(int(expected_stat.st_mtime), int(new_stat.st_mtime))
+
+ # Reopen the zip file to verify.
+ zip_file = zipfile.ZipFile(zip_file_name, "r")
+
+ # Verify the timestamp.
+ info = zip_file.getinfo(arcname)
+ self.assertEqual(info.date_time, (2009, 1, 1, 0, 0, 0))
+
+ # Verify the file mode.
+ mode = (info.external_attr >> 16) & 0o777
+ self.assertEqual(mode, expected_mode)
+
+ # Verify the compress type.
+ self.assertEqual(info.compress_type, expected_compress_type)
+
+ # Verify the zip contents.
+ self.assertEqual(zip_file.read(arcname), contents)
+ self.assertIsNone(zip_file.testzip())
+
def _test_ZipWrite(self, contents, extra_zipwrite_args=None):
extra_zipwrite_args = dict(extra_zipwrite_args or {})
test_file = tempfile.NamedTemporaryFile(delete=False)
- zip_file = tempfile.NamedTemporaryFile(delete=False)
-
test_file_name = test_file.name
+
+ zip_file = tempfile.NamedTemporaryFile(delete=False)
zip_file_name = zip_file.name
# File names within an archive strip the leading slash.
@@ -52,31 +91,103 @@ class CommonZipTest(unittest.TestCase):
test_file.write(contents)
test_file.close()
- old_stat = os.stat(test_file_name)
+ expected_stat = os.stat(test_file_name)
expected_mode = extra_zipwrite_args.get("perms", 0o644)
-
+ expected_compress_type = extra_zipwrite_args.get("compress_type",
+ zipfile.ZIP_STORED)
time.sleep(5) # Make sure the atime/mtime will change measurably.
common.ZipWrite(zip_file, test_file_name, **extra_zipwrite_args)
+ common.ZipClose(zip_file)
- new_stat = os.stat(test_file_name)
- self.assertEqual(int(old_stat.st_mode), int(new_stat.st_mode))
- self.assertEqual(int(old_stat.st_mtime), int(new_stat.st_mtime))
- self.assertIsNone(zip_file.testzip())
-
- zip_file.close()
- zip_file = zipfile.ZipFile(zip_file_name, "r")
- info = zip_file.getinfo(arcname)
-
- self.assertEqual(info.date_time, (2009, 1, 1, 0, 0, 0))
- mode = (info.external_attr >> 16) & 0o777
- self.assertEqual(mode, expected_mode)
- self.assertEqual(zip_file.read(arcname), contents)
- self.assertIsNone(zip_file.testzip())
+ self._verify(zip_file, zip_file_name, arcname, contents, test_file_name,
+ expected_stat, expected_mode, expected_compress_type)
finally:
os.remove(test_file_name)
os.remove(zip_file_name)
+ def _test_ZipWriteStr(self, zinfo_or_arcname, contents, extra_args=None):
+ extra_args = dict(extra_args or {})
+
+ zip_file = tempfile.NamedTemporaryFile(delete=False)
+ zip_file_name = zip_file.name
+ zip_file.close()
+
+ zip_file = zipfile.ZipFile(zip_file_name, "w")
+
+ try:
+ expected_compress_type = extra_args.get("compress_type",
+ zipfile.ZIP_STORED)
+ time.sleep(5) # Make sure the atime/mtime will change measurably.
+
+ if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
+ arcname = zinfo_or_arcname
+ expected_mode = extra_args.get("perms", 0o644)
+ else:
+ arcname = zinfo_or_arcname.filename
+ expected_mode = extra_args.get("perms",
+ zinfo_or_arcname.external_attr >> 16)
+
+ common.ZipWriteStr(zip_file, zinfo_or_arcname, contents, **extra_args)
+ common.ZipClose(zip_file)
+
+ self._verify(zip_file, zip_file_name, arcname, contents,
+ expected_mode=expected_mode,
+ expected_compress_type=expected_compress_type)
+ finally:
+ os.remove(zip_file_name)
+
+ def _test_ZipWriteStr_large_file(self, large, small, extra_args=None):
+ extra_args = dict(extra_args or {})
+
+ zip_file = tempfile.NamedTemporaryFile(delete=False)
+ zip_file_name = zip_file.name
+
+ test_file = tempfile.NamedTemporaryFile(delete=False)
+ test_file_name = test_file.name
+
+ arcname_large = test_file_name
+ arcname_small = "bar"
+
+ # File names within an archive strip the leading slash.
+ if arcname_large[0] == "/":
+ arcname_large = arcname_large[1:]
+
+ zip_file.close()
+ zip_file = zipfile.ZipFile(zip_file_name, "w")
+
+ try:
+ test_file.write(large)
+ test_file.close()
+
+ expected_stat = os.stat(test_file_name)
+ expected_mode = 0o644
+ expected_compress_type = extra_args.get("compress_type",
+ zipfile.ZIP_STORED)
+ time.sleep(5) # Make sure the atime/mtime will change measurably.
+
+ common.ZipWrite(zip_file, test_file_name, **extra_args)
+ common.ZipWriteStr(zip_file, arcname_small, small, **extra_args)
+ common.ZipClose(zip_file)
+
+ # Verify the contents written by ZipWrite().
+ self._verify(zip_file, zip_file_name, arcname_large, large,
+ test_file_name, expected_stat, expected_mode,
+ expected_compress_type)
+
+ # Verify the contents written by ZipWriteStr().
+ self._verify(zip_file, zip_file_name, arcname_small, small,
+ expected_compress_type=expected_compress_type)
+ finally:
+ os.remove(zip_file_name)
+ os.remove(test_file_name)
+
+ def _test_reset_ZIP64_LIMIT(self, func, *args):
+ default_limit = (1 << 31) - 1
+ self.assertEqual(default_limit, zipfile.ZIP64_LIMIT)
+ func(*args)
+ self.assertEqual(default_limit, zipfile.ZIP64_LIMIT)
+
def test_ZipWrite(self):
file_contents = os.urandom(1024)
self._test_ZipWrite(file_contents)
@@ -88,23 +199,99 @@ class CommonZipTest(unittest.TestCase):
"perms": 0o777,
"compress_type": zipfile.ZIP_DEFLATED,
})
+ self._test_ZipWrite(file_contents, {
+ "arcname": "foobar",
+ "perms": 0o700,
+ "compress_type": zipfile.ZIP_STORED,
+ })
def test_ZipWrite_large_file(self):
- kilobytes = 1024
- megabytes = 1024 * kilobytes
- gigabytes = 1024 * megabytes
-
- size = int(2 * gigabytes + 1)
- block_size = 4 * kilobytes
- step_size = 4 * megabytes
- file_contents = random_string_with_holes(
- size, block_size, step_size)
+ file_contents = get_2gb_string()
self._test_ZipWrite(file_contents, {
"compress_type": zipfile.ZIP_DEFLATED,
})
def test_ZipWrite_resets_ZIP64_LIMIT(self):
- default_limit = (1 << 31) - 1
- self.assertEqual(default_limit, zipfile.ZIP64_LIMIT)
- self._test_ZipWrite('')
- self.assertEqual(default_limit, zipfile.ZIP64_LIMIT)
+ self._test_reset_ZIP64_LIMIT(self._test_ZipWrite, "")
+
+ def test_ZipWriteStr(self):
+ random_string = os.urandom(1024)
+ # Passing arcname
+ self._test_ZipWriteStr("foo", random_string)
+
+ # Passing zinfo
+ zinfo = zipfile.ZipInfo(filename="foo")
+ self._test_ZipWriteStr(zinfo, random_string)
+
+ # Timestamp in the zinfo should be overwritten.
+ zinfo.date_time = (2015, 3, 1, 15, 30, 0)
+ self._test_ZipWriteStr(zinfo, random_string)
+
+ def test_ZipWriteStr_with_opts(self):
+ random_string = os.urandom(1024)
+ # Passing arcname
+ self._test_ZipWriteStr("foo", random_string, {
+ "perms": 0o700,
+ "compress_type": zipfile.ZIP_DEFLATED,
+ })
+ self._test_ZipWriteStr("bar", random_string, {
+ "compress_type": zipfile.ZIP_STORED,
+ })
+
+ # Passing zinfo
+ zinfo = zipfile.ZipInfo(filename="foo")
+ self._test_ZipWriteStr(zinfo, random_string, {
+ "compress_type": zipfile.ZIP_DEFLATED,
+ })
+ self._test_ZipWriteStr(zinfo, random_string, {
+ "perms": 0o600,
+ "compress_type": zipfile.ZIP_STORED,
+ })
+
+ def test_ZipWriteStr_large_file(self):
+ # zipfile.writestr() doesn't work when the str size is over 2GiB even with
+ # the workaround. We will only test the case of writing a string into a
+ # large archive.
+ long_string = get_2gb_string()
+ short_string = os.urandom(1024)
+ self._test_ZipWriteStr_large_file(long_string, short_string, {
+ "compress_type": zipfile.ZIP_DEFLATED,
+ })
+
+ def test_ZipWriteStr_resets_ZIP64_LIMIT(self):
+ self._test_reset_ZIP64_LIMIT(self._test_ZipWriteStr, "foo", "")
+ zinfo = zipfile.ZipInfo(filename="foo")
+ self._test_reset_ZIP64_LIMIT(self._test_ZipWriteStr, zinfo, "")
+
+ def test_bug21309935(self):
+ zip_file = tempfile.NamedTemporaryFile(delete=False)
+ zip_file_name = zip_file.name
+ zip_file.close()
+
+ try:
+ random_string = os.urandom(1024)
+ zip_file = zipfile.ZipFile(zip_file_name, "w")
+ # Default perms should be 0o644 when passing the filename.
+ common.ZipWriteStr(zip_file, "foo", random_string)
+ # Honor the specified perms.
+ common.ZipWriteStr(zip_file, "bar", random_string, perms=0o755)
+ # The perms in zinfo should be untouched.
+ zinfo = zipfile.ZipInfo(filename="baz")
+ zinfo.external_attr = 0o740 << 16
+ common.ZipWriteStr(zip_file, zinfo, random_string)
+ # Explicitly specified perms has the priority.
+ zinfo = zipfile.ZipInfo(filename="qux")
+ zinfo.external_attr = 0o700 << 16
+ common.ZipWriteStr(zip_file, zinfo, random_string, perms=0o400)
+ common.ZipClose(zip_file)
+
+ self._verify(zip_file, zip_file_name, "foo", random_string,
+ expected_mode=0o644)
+ self._verify(zip_file, zip_file_name, "bar", random_string,
+ expected_mode=0o755)
+ self._verify(zip_file, zip_file_name, "baz", random_string,
+ expected_mode=0o740)
+ self._verify(zip_file, zip_file_name, "qux", random_string,
+ expected_mode=0o400)
+ finally:
+ os.remove(zip_file_name)