# Copyright (C) 2014 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import bisect import os import struct from hashlib import sha1 import rangelib class SparseImage(object): """Wraps a sparse image file into an image object. Wraps a sparse image file (and optional file map and clobbered_blocks) into an image object suitable for passing to BlockImageDiff. file_map contains the mapping between files and their blocks. clobbered_blocks contains the set of blocks that should be always written to the target regardless of the old contents (i.e. copying instead of patching). clobbered_blocks should be in the form of a string like "0" or "0 1-5 8". """ def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None): self.simg_f = f = open(simg_fn, "rb") header_bin = f.read(28) header = struct.unpack("> 2)) to_read -= this_read while to_read > 0: # continue with following chunks if this range spans multiple chunks. idx += 1 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx] this_read = min(chunk_len, to_read) if filepos is not None: f.seek(filepos, os.SEEK_SET) yield f.read(this_read * self.blocksize) else: yield fill_data * (this_read * (self.blocksize >> 2)) to_read -= this_read def LoadFileBlockMap(self, fn, clobbered_blocks): remaining = self.care_map self.file_map = out = {} with open(fn) as f: for line in f: fn, ranges = line.split(None, 1) ranges = rangelib.RangeSet.parse(ranges) out[fn] = ranges assert ranges.size() == ranges.intersect(remaining).size() # Currently we assume that blocks in clobbered_blocks are not part of # any file. assert not clobbered_blocks.overlaps(ranges) remaining = remaining.subtract(ranges) remaining = remaining.subtract(clobbered_blocks) # For all the remaining blocks in the care_map (ie, those that # aren't part of the data for any file nor part of the clobbered_blocks), # divide them into blocks that are all zero and blocks that aren't. # (Zero blocks are handled specially because (1) there are usually # a lot of them and (2) bsdiff handles files with long sequences of # repeated bytes especially poorly.) zero_blocks = [] nonzero_blocks = [] reference = '\0' * self.blocksize # Workaround for bug 23227672. For squashfs, we don't have a system.map. So # the whole system image will be treated as a single file. But for some # unknown bug, the updater will be killed due to OOM when writing back the # patched image to flash (observed on lenok-userdebug MEA49). Prior to # getting a real fix, we evenly divide the non-zero blocks into smaller # groups (currently 1024 blocks or 4MB per group). # Bug: 23227672 MAX_BLOCKS_PER_GROUP = 1024 nonzero_groups = [] f = self.simg_f for s, e in remaining: for b in range(s, e): idx = bisect.bisect_right(self.offset_index, b) - 1 chunk_start, _, filepos, fill_data = self.offset_map[idx] if filepos is not None: filepos += (b-chunk_start) * self.blocksize f.seek(filepos, os.SEEK_SET) data = f.read(self.blocksize) else: if fill_data == reference[:4]: # fill with all zeros data = reference else: data = None if data == reference: zero_blocks.append(b) zero_blocks.append(b+1) else: nonzero_blocks.append(b) nonzero_blocks.append(b+1) if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP: nonzero_groups.append(nonzero_blocks) # Clear the list. nonzero_blocks = [] if nonzero_blocks: nonzero_groups.append(nonzero_blocks) nonzero_blocks = [] assert zero_blocks or nonzero_groups or clobbered_blocks if zero_blocks: out["__ZERO"] = rangelib.RangeSet(data=zero_blocks) if nonzero_groups: for i, blocks in enumerate(nonzero_groups): out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks) if clobbered_blocks: out["__COPY"] = clobbered_blocks def ResetFileMap(self): """Throw away the file map and treat the entire image as undifferentiated data.""" self.file_map = {"__DATA": self.care_map}