| Yifan Hong | 7ad83b6 | 2019-04-04 10:57:39 -0700 | [diff] [blame] | 1 | #!/usr/bin/env python | 
|  | 2 | # | 
| Doug Zongker | 424296a | 2014-09-02 08:53:09 -0700 | [diff] [blame] | 3 | # Copyright (C) 2014 The Android Open Source Project | 
|  | 4 | # | 
|  | 5 | # Licensed under the Apache License, Version 2.0 (the "License"); | 
|  | 6 | # you may not use this file except in compliance with the License. | 
|  | 7 | # You may obtain a copy of the License at | 
|  | 8 | # | 
|  | 9 | #      http://www.apache.org/licenses/LICENSE-2.0 | 
|  | 10 | # | 
|  | 11 | # Unless required by applicable law or agreed to in writing, software | 
|  | 12 | # distributed under the License is distributed on an "AS IS" BASIS, | 
|  | 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
|  | 14 | # See the License for the specific language governing permissions and | 
|  | 15 | # limitations under the License. | 
|  | 16 |  | 
| Yifan Hong | 7ad83b6 | 2019-04-04 10:57:39 -0700 | [diff] [blame] | 17 | from __future__ import print_function | 
|  | 18 |  | 
|  | 19 | import argparse | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 20 | import bisect | 
| Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 21 | import logging | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 22 | import os | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 23 | import struct | 
| Tianjie Xu | df1166e | 2018-01-27 17:35:41 -0800 | [diff] [blame] | 24 | import threading | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 25 | from hashlib import sha1 | 
|  | 26 |  | 
| Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 27 | import rangelib | 
|  | 28 |  | 
| Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 29 | logger = logging.getLogger(__name__) | 
|  | 30 |  | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 31 |  | 
|  | 32 | class SparseImage(object): | 
| Tao Bao | 5ece99d | 2015-05-12 11:42:31 -0700 | [diff] [blame] | 33 | """Wraps a sparse image file into an image object. | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 34 |  | 
| Tao Bao | 5ece99d | 2015-05-12 11:42:31 -0700 | [diff] [blame] | 35 | Wraps a sparse image file (and optional file map and clobbered_blocks) into | 
|  | 36 | an image object suitable for passing to BlockImageDiff. file_map contains | 
|  | 37 | the mapping between files and their blocks. clobbered_blocks contains the set | 
|  | 38 | of blocks that should be always written to the target regardless of the old | 
|  | 39 | contents (i.e. copying instead of patching). clobbered_blocks should be in | 
|  | 40 | the form of a string like "0" or "0 1-5 8". | 
|  | 41 | """ | 
|  | 42 |  | 
| Sami Tolvanen | 405e71d | 2016-02-09 12:28:58 -0800 | [diff] [blame] | 43 | def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None, | 
| hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 44 | mode="rb", build_map=True, allow_shared_blocks=False): | 
| Sami Tolvanen | 405e71d | 2016-02-09 12:28:58 -0800 | [diff] [blame] | 45 | self.simg_f = f = open(simg_fn, mode) | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 46 |  | 
|  | 47 | header_bin = f.read(28) | 
|  | 48 | header = struct.unpack("<I4H4I", header_bin) | 
|  | 49 |  | 
|  | 50 | magic = header[0] | 
|  | 51 | major_version = header[1] | 
|  | 52 | minor_version = header[2] | 
|  | 53 | file_hdr_sz = header[3] | 
|  | 54 | chunk_hdr_sz = header[4] | 
|  | 55 | self.blocksize = blk_sz = header[5] | 
|  | 56 | self.total_blocks = total_blks = header[6] | 
| Sami Tolvanen | 405e71d | 2016-02-09 12:28:58 -0800 | [diff] [blame] | 57 | self.total_chunks = total_chunks = header[7] | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 58 |  | 
|  | 59 | if magic != 0xED26FF3A: | 
|  | 60 | raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,)) | 
|  | 61 | if major_version != 1 or minor_version != 0: | 
|  | 62 | raise ValueError("I know about version 1.0, but this is version %u.%u" % | 
|  | 63 | (major_version, minor_version)) | 
|  | 64 | if file_hdr_sz != 28: | 
|  | 65 | raise ValueError("File header size was expected to be 28, but is %u." % | 
|  | 66 | (file_hdr_sz,)) | 
|  | 67 | if chunk_hdr_sz != 12: | 
|  | 68 | raise ValueError("Chunk header size was expected to be 12, but is %u." % | 
|  | 69 | (chunk_hdr_sz,)) | 
|  | 70 |  | 
| Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 71 | logger.info( | 
|  | 72 | "Total of %u %u-byte output blocks in %u input chunks.", total_blks, | 
|  | 73 | blk_sz, total_chunks) | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 74 |  | 
| Sami Tolvanen | 405e71d | 2016-02-09 12:28:58 -0800 | [diff] [blame] | 75 | if not build_map: | 
|  | 76 | return | 
|  | 77 |  | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 78 | pos = 0   # in blocks | 
|  | 79 | care_data = [] | 
|  | 80 | self.offset_map = offset_map = [] | 
| Tao Bao | 5ece99d | 2015-05-12 11:42:31 -0700 | [diff] [blame] | 81 | self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks) | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 82 |  | 
|  | 83 | for i in range(total_chunks): | 
|  | 84 | header_bin = f.read(12) | 
|  | 85 | header = struct.unpack("<2H2I", header_bin) | 
|  | 86 | chunk_type = header[0] | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 87 | chunk_sz = header[2] | 
|  | 88 | total_sz = header[3] | 
|  | 89 | data_sz = total_sz - 12 | 
|  | 90 |  | 
|  | 91 | if chunk_type == 0xCAC1: | 
|  | 92 | if data_sz != (chunk_sz * blk_sz): | 
|  | 93 | raise ValueError( | 
|  | 94 | "Raw chunk input size (%u) does not match output size (%u)" % | 
|  | 95 | (data_sz, chunk_sz * blk_sz)) | 
|  | 96 | else: | 
|  | 97 | care_data.append(pos) | 
|  | 98 | care_data.append(pos + chunk_sz) | 
| Doug Zongker | e18eb50 | 2014-10-15 15:55:50 -0700 | [diff] [blame] | 99 | offset_map.append((pos, chunk_sz, f.tell(), None)) | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 100 | pos += chunk_sz | 
|  | 101 | f.seek(data_sz, os.SEEK_CUR) | 
|  | 102 |  | 
|  | 103 | elif chunk_type == 0xCAC2: | 
| Doug Zongker | e18eb50 | 2014-10-15 15:55:50 -0700 | [diff] [blame] | 104 | fill_data = f.read(4) | 
|  | 105 | care_data.append(pos) | 
|  | 106 | care_data.append(pos + chunk_sz) | 
|  | 107 | offset_map.append((pos, chunk_sz, None, fill_data)) | 
|  | 108 | pos += chunk_sz | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 109 |  | 
|  | 110 | elif chunk_type == 0xCAC3: | 
|  | 111 | if data_sz != 0: | 
|  | 112 | raise ValueError("Don't care chunk input size is non-zero (%u)" % | 
|  | 113 | (data_sz)) | 
| Tianjie Xu | 67c7cbb | 2018-08-30 00:32:07 -0700 | [diff] [blame] | 114 |  | 
|  | 115 | pos += chunk_sz | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 116 |  | 
|  | 117 | elif chunk_type == 0xCAC4: | 
|  | 118 | raise ValueError("CRC32 chunks are not supported") | 
|  | 119 |  | 
|  | 120 | else: | 
|  | 121 | raise ValueError("Unknown chunk type 0x%04X not supported" % | 
|  | 122 | (chunk_type,)) | 
|  | 123 |  | 
| Tianjie Xu | df1166e | 2018-01-27 17:35:41 -0800 | [diff] [blame] | 124 | self.generator_lock = threading.Lock() | 
|  | 125 |  | 
| Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 126 | self.care_map = rangelib.RangeSet(care_data) | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 127 | self.offset_index = [i[0] for i in offset_map] | 
|  | 128 |  | 
| Tao Bao | 2fd2c9b | 2015-07-09 17:37:49 -0700 | [diff] [blame] | 129 | # Bug: 20881595 | 
|  | 130 | # Introduce extended blocks as a workaround for the bug. dm-verity may | 
|  | 131 | # touch blocks that are not in the care_map due to block device | 
|  | 132 | # read-ahead. It will fail if such blocks contain non-zeroes. We zero out | 
|  | 133 | # the extended blocks explicitly to avoid dm-verity failures. 512 blocks | 
|  | 134 | # are the maximum read-ahead we configure for dm-verity block devices. | 
|  | 135 | extended = self.care_map.extend(512) | 
|  | 136 | all_blocks = rangelib.RangeSet(data=(0, self.total_blocks)) | 
|  | 137 | extended = extended.intersect(all_blocks).subtract(self.care_map) | 
|  | 138 | self.extended = extended | 
|  | 139 |  | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 140 | if file_map_fn: | 
| Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 141 | self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks, | 
|  | 142 | allow_shared_blocks) | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 143 | else: | 
|  | 144 | self.file_map = {"__DATA": self.care_map} | 
|  | 145 |  | 
| Sami Tolvanen | 405e71d | 2016-02-09 12:28:58 -0800 | [diff] [blame] | 146 | def AppendFillChunk(self, data, blocks): | 
|  | 147 | f = self.simg_f | 
|  | 148 |  | 
|  | 149 | # Append a fill chunk | 
|  | 150 | f.seek(0, os.SEEK_END) | 
|  | 151 | f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data)) | 
|  | 152 |  | 
|  | 153 | # Update the sparse header | 
|  | 154 | self.total_blocks += blocks | 
|  | 155 | self.total_chunks += 1 | 
|  | 156 |  | 
|  | 157 | f.seek(16, os.SEEK_SET) | 
|  | 158 | f.write(struct.pack("<2I", self.total_blocks, self.total_chunks)) | 
|  | 159 |  | 
| Tao Bao | 183e56e | 2017-03-05 17:05:09 -0800 | [diff] [blame] | 160 | def RangeSha1(self, ranges): | 
|  | 161 | h = sha1() | 
|  | 162 | for data in self._GetRangeData(ranges): | 
|  | 163 | h.update(data) | 
|  | 164 | return h.hexdigest() | 
|  | 165 |  | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 166 | def ReadRangeSet(self, ranges): | 
|  | 167 | return [d for d in self._GetRangeData(ranges)] | 
|  | 168 |  | 
| Tao Bao | 5fcaaef | 2015-06-01 13:40:49 -0700 | [diff] [blame] | 169 | def TotalSha1(self, include_clobbered_blocks=False): | 
|  | 170 | """Return the SHA-1 hash of all data in the 'care' regions. | 
|  | 171 |  | 
|  | 172 | If include_clobbered_blocks is True, it returns the hash including the | 
|  | 173 | clobbered_blocks.""" | 
|  | 174 | ranges = self.care_map | 
|  | 175 | if not include_clobbered_blocks: | 
| Tao Bao | 2b4ff17 | 2015-06-23 17:30:35 -0700 | [diff] [blame] | 176 | ranges = ranges.subtract(self.clobbered_blocks) | 
| Tao Bao | 183e56e | 2017-03-05 17:05:09 -0800 | [diff] [blame] | 177 | return self.RangeSha1(ranges) | 
|  | 178 |  | 
|  | 179 | def WriteRangeDataToFd(self, ranges, fd): | 
|  | 180 | for data in self._GetRangeData(ranges): | 
|  | 181 | fd.write(data) | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 182 |  | 
|  | 183 | def _GetRangeData(self, ranges): | 
|  | 184 | """Generator that produces all the image data in 'ranges'.  The | 
|  | 185 | number of individual pieces returned is arbitrary (and in | 
|  | 186 | particular is not necessarily equal to the number of ranges in | 
|  | 187 | 'ranges'. | 
|  | 188 |  | 
| Tianjie Xu | df1166e | 2018-01-27 17:35:41 -0800 | [diff] [blame] | 189 | Use a lock to protect the generator so that we will not run two | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 190 | instances of this generator on the same object simultaneously.""" | 
|  | 191 |  | 
|  | 192 | f = self.simg_f | 
| Tianjie Xu | df1166e | 2018-01-27 17:35:41 -0800 | [diff] [blame] | 193 | with self.generator_lock: | 
|  | 194 | for s, e in ranges: | 
|  | 195 | to_read = e-s | 
|  | 196 | idx = bisect.bisect_right(self.offset_index, s) - 1 | 
| Doug Zongker | e18eb50 | 2014-10-15 15:55:50 -0700 | [diff] [blame] | 197 | chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx] | 
| Tianjie Xu | df1166e | 2018-01-27 17:35:41 -0800 | [diff] [blame] | 198 |  | 
|  | 199 | # for the first chunk we may be starting partway through it. | 
|  | 200 | remain = chunk_len - (s - chunk_start) | 
|  | 201 | this_read = min(remain, to_read) | 
| Doug Zongker | e18eb50 | 2014-10-15 15:55:50 -0700 | [diff] [blame] | 202 | if filepos is not None: | 
| Tianjie Xu | df1166e | 2018-01-27 17:35:41 -0800 | [diff] [blame] | 203 | p = filepos + ((s - chunk_start) * self.blocksize) | 
|  | 204 | f.seek(p, os.SEEK_SET) | 
| Doug Zongker | e18eb50 | 2014-10-15 15:55:50 -0700 | [diff] [blame] | 205 | yield f.read(this_read * self.blocksize) | 
|  | 206 | else: | 
|  | 207 | yield fill_data * (this_read * (self.blocksize >> 2)) | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 208 | to_read -= this_read | 
|  | 209 |  | 
| Tianjie Xu | df1166e | 2018-01-27 17:35:41 -0800 | [diff] [blame] | 210 | while to_read > 0: | 
|  | 211 | # continue with following chunks if this range spans multiple chunks. | 
|  | 212 | idx += 1 | 
|  | 213 | chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx] | 
|  | 214 | this_read = min(chunk_len, to_read) | 
|  | 215 | if filepos is not None: | 
|  | 216 | f.seek(filepos, os.SEEK_SET) | 
|  | 217 | yield f.read(this_read * self.blocksize) | 
|  | 218 | else: | 
|  | 219 | yield fill_data * (this_read * (self.blocksize >> 2)) | 
|  | 220 | to_read -= this_read | 
|  | 221 |  | 
| Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 222 | def LoadFileBlockMap(self, fn, clobbered_blocks, allow_shared_blocks): | 
|  | 223 | """Loads the given block map file. | 
|  | 224 |  | 
|  | 225 | Args: | 
|  | 226 | fn: The filename of the block map file. | 
|  | 227 | clobbered_blocks: A RangeSet instance for the clobbered blocks. | 
|  | 228 | allow_shared_blocks: Whether having shared blocks is allowed. | 
|  | 229 | """ | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 230 | remaining = self.care_map | 
|  | 231 | self.file_map = out = {} | 
|  | 232 |  | 
|  | 233 | with open(fn) as f: | 
|  | 234 | for line in f: | 
| Tao Bao | 22632cc | 2019-10-03 23:12:55 -0700 | [diff] [blame] | 235 | fn, ranges_text = line.rstrip().split(None, 1) | 
| Tianjie Xu | f02ecec | 2020-02-26 21:50:47 -0800 | [diff] [blame] | 236 | raw_ranges = rangelib.RangeSet.parse(ranges_text) | 
| David Anderson | ab9c7e6 | 2020-02-18 23:29:44 -0800 | [diff] [blame] | 237 |  | 
|  | 238 | # Note: e2fsdroid records holes in the extent tree as "0" blocks. | 
|  | 239 | # This causes confusion because clobbered_blocks always includes | 
|  | 240 | # the superblock (physical block #0). Since the 0 blocks here do | 
|  | 241 | # not represent actual physical blocks, remove them from the set. | 
| Tianjie Xu | f02ecec | 2020-02-26 21:50:47 -0800 | [diff] [blame] | 242 | ranges = raw_ranges.subtract(rangelib.RangeSet("0")) | 
|  | 243 | # b/150334561 we need to perserve the monotonic property of the raw | 
|  | 244 | # range. Otherwise, the validation script will read the blocks with | 
|  | 245 | # wrong order when pulling files from the image. | 
|  | 246 | ranges.monotonic = raw_ranges.monotonic | 
| Tao Bao | 22632cc | 2019-10-03 23:12:55 -0700 | [diff] [blame] | 247 | ranges.extra['text_str'] = ranges_text | 
| Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 248 |  | 
|  | 249 | if allow_shared_blocks: | 
| Tao Bao | 2a20f34 | 2018-12-03 15:08:23 -0800 | [diff] [blame] | 250 | # Find the shared blocks that have been claimed by others. If so, tag | 
|  | 251 | # the entry so that we can skip applying imgdiff on this file. | 
| Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 252 | shared_blocks = ranges.subtract(remaining) | 
|  | 253 | if shared_blocks: | 
| Tao Bao | 2a20f34 | 2018-12-03 15:08:23 -0800 | [diff] [blame] | 254 | non_shared = ranges.subtract(shared_blocks) | 
|  | 255 | if not non_shared: | 
| Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 256 | continue | 
|  | 257 |  | 
| Tao Bao | 2a20f34 | 2018-12-03 15:08:23 -0800 | [diff] [blame] | 258 | # Put the non-shared RangeSet as the value in the block map, which | 
|  | 259 | # has a copy of the original RangeSet. | 
|  | 260 | non_shared.extra['uses_shared_blocks'] = ranges | 
|  | 261 | ranges = non_shared | 
| Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 262 |  | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 263 | out[fn] = ranges | 
|  | 264 | assert ranges.size() == ranges.intersect(remaining).size() | 
| Tao Bao | 5ece99d | 2015-05-12 11:42:31 -0700 | [diff] [blame] | 265 |  | 
|  | 266 | # Currently we assume that blocks in clobbered_blocks are not part of | 
|  | 267 | # any file. | 
|  | 268 | assert not clobbered_blocks.overlaps(ranges) | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 269 | remaining = remaining.subtract(ranges) | 
|  | 270 |  | 
| Tao Bao | 5ece99d | 2015-05-12 11:42:31 -0700 | [diff] [blame] | 271 | remaining = remaining.subtract(clobbered_blocks) | 
|  | 272 |  | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 273 | # For all the remaining blocks in the care_map (ie, those that | 
| Tao Bao | 5ece99d | 2015-05-12 11:42:31 -0700 | [diff] [blame] | 274 | # aren't part of the data for any file nor part of the clobbered_blocks), | 
|  | 275 | # divide them into blocks that are all zero and blocks that aren't. | 
|  | 276 | # (Zero blocks are handled specially because (1) there are usually | 
|  | 277 | # a lot of them and (2) bsdiff handles files with long sequences of | 
|  | 278 | # repeated bytes especially poorly.) | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 279 |  | 
|  | 280 | zero_blocks = [] | 
|  | 281 | nonzero_blocks = [] | 
|  | 282 | reference = '\0' * self.blocksize | 
|  | 283 |  | 
| Tao Bao | 7c4c6f5 | 2015-08-19 17:07:50 -0700 | [diff] [blame] | 284 | # Workaround for bug 23227672. For squashfs, we don't have a system.map. So | 
|  | 285 | # the whole system image will be treated as a single file. But for some | 
|  | 286 | # unknown bug, the updater will be killed due to OOM when writing back the | 
|  | 287 | # patched image to flash (observed on lenok-userdebug MEA49). Prior to | 
|  | 288 | # getting a real fix, we evenly divide the non-zero blocks into smaller | 
|  | 289 | # groups (currently 1024 blocks or 4MB per group). | 
|  | 290 | # Bug: 23227672 | 
|  | 291 | MAX_BLOCKS_PER_GROUP = 1024 | 
|  | 292 | nonzero_groups = [] | 
|  | 293 |  | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 294 | f = self.simg_f | 
|  | 295 | for s, e in remaining: | 
|  | 296 | for b in range(s, e): | 
|  | 297 | idx = bisect.bisect_right(self.offset_index, b) - 1 | 
| Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 298 | chunk_start, _, filepos, fill_data = self.offset_map[idx] | 
| Doug Zongker | e18eb50 | 2014-10-15 15:55:50 -0700 | [diff] [blame] | 299 | if filepos is not None: | 
|  | 300 | filepos += (b-chunk_start) * self.blocksize | 
|  | 301 | f.seek(filepos, os.SEEK_SET) | 
|  | 302 | data = f.read(self.blocksize) | 
|  | 303 | else: | 
|  | 304 | if fill_data == reference[:4]:   # fill with all zeros | 
|  | 305 | data = reference | 
|  | 306 | else: | 
|  | 307 | data = None | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 308 |  | 
|  | 309 | if data == reference: | 
|  | 310 | zero_blocks.append(b) | 
|  | 311 | zero_blocks.append(b+1) | 
|  | 312 | else: | 
|  | 313 | nonzero_blocks.append(b) | 
|  | 314 | nonzero_blocks.append(b+1) | 
|  | 315 |  | 
| Tao Bao | 7c4c6f5 | 2015-08-19 17:07:50 -0700 | [diff] [blame] | 316 | if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP: | 
|  | 317 | nonzero_groups.append(nonzero_blocks) | 
|  | 318 | # Clear the list. | 
|  | 319 | nonzero_blocks = [] | 
|  | 320 |  | 
|  | 321 | if nonzero_blocks: | 
|  | 322 | nonzero_groups.append(nonzero_blocks) | 
|  | 323 | nonzero_blocks = [] | 
|  | 324 |  | 
|  | 325 | assert zero_blocks or nonzero_groups or clobbered_blocks | 
| Tao Bao | 7f9470c | 2015-06-26 17:49:39 -0700 | [diff] [blame] | 326 |  | 
|  | 327 | if zero_blocks: | 
|  | 328 | out["__ZERO"] = rangelib.RangeSet(data=zero_blocks) | 
| Tao Bao | 7c4c6f5 | 2015-08-19 17:07:50 -0700 | [diff] [blame] | 329 | if nonzero_groups: | 
|  | 330 | for i, blocks in enumerate(nonzero_groups): | 
|  | 331 | out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks) | 
| Tao Bao | 8bd7202 | 2015-07-01 18:06:33 -0700 | [diff] [blame] | 332 | if clobbered_blocks: | 
|  | 333 | out["__COPY"] = clobbered_blocks | 
| Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 334 |  | 
|  | 335 | def ResetFileMap(self): | 
|  | 336 | """Throw away the file map and treat the entire image as | 
|  | 337 | undifferentiated data.""" | 
|  | 338 | self.file_map = {"__DATA": self.care_map} | 
| Yifan Hong | 7ad83b6 | 2019-04-04 10:57:39 -0700 | [diff] [blame] | 339 |  | 
|  | 340 |  | 
|  | 341 | def GetImagePartitionSize(img): | 
|  | 342 | try: | 
|  | 343 | simg = SparseImage(img, build_map=False) | 
|  | 344 | return simg.blocksize * simg.total_blocks | 
|  | 345 | except ValueError: | 
|  | 346 | return os.path.getsize(img) | 
|  | 347 |  | 
|  | 348 |  | 
|  | 349 | if __name__ == '__main__': | 
|  | 350 | parser = argparse.ArgumentParser() | 
|  | 351 | parser.add_argument('image') | 
|  | 352 | parser.add_argument('--get_partition_size', action='store_true', | 
|  | 353 | help='Return partition size of the image') | 
|  | 354 | args = parser.parse_args() | 
|  | 355 | if args.get_partition_size: | 
|  | 356 | print(GetImagePartitionSize(args.image)) |