blob: 524c0f297b82fd3ef68c45f7136065e5cc974644 [file] [log] [blame]
Yifan Hong7ad83b62019-04-04 10:57:39 -07001#!/usr/bin/env python
2#
Doug Zongker424296a2014-09-02 08:53:09 -07003# Copyright (C) 2014 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
Yifan Hong7ad83b62019-04-04 10:57:39 -070017from __future__ import print_function
18
19import argparse
Doug Zongkerfc44a512014-08-26 13:10:25 -070020import bisect
Tao Bao32fcdab2018-10-12 10:30:39 -070021import logging
Doug Zongkerfc44a512014-08-26 13:10:25 -070022import os
Doug Zongkerfc44a512014-08-26 13:10:25 -070023import struct
Tianjie Xudf1166e2018-01-27 17:35:41 -080024import threading
Doug Zongkerfc44a512014-08-26 13:10:25 -070025from hashlib import sha1
26
Dan Albert8b72aef2015-03-23 19:13:21 -070027import rangelib
28
Tao Bao32fcdab2018-10-12 10:30:39 -070029logger = logging.getLogger(__name__)
30
Doug Zongkerfc44a512014-08-26 13:10:25 -070031
32class SparseImage(object):
Tao Bao5ece99d2015-05-12 11:42:31 -070033 """Wraps a sparse image file into an image object.
Doug Zongkerfc44a512014-08-26 13:10:25 -070034
Tao Bao5ece99d2015-05-12 11:42:31 -070035 Wraps a sparse image file (and optional file map and clobbered_blocks) into
36 an image object suitable for passing to BlockImageDiff. file_map contains
37 the mapping between files and their blocks. clobbered_blocks contains the set
38 of blocks that should be always written to the target regardless of the old
39 contents (i.e. copying instead of patching). clobbered_blocks should be in
40 the form of a string like "0" or "0 1-5 8".
41 """
42
Sami Tolvanen405e71d2016-02-09 12:28:58 -080043 def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
Tianjie Xu67c7cbb2018-08-30 00:32:07 -070044 mode="rb", build_map=True, allow_shared_blocks=False,
45 hashtree_info_generator=None):
Sami Tolvanen405e71d2016-02-09 12:28:58 -080046 self.simg_f = f = open(simg_fn, mode)
Doug Zongkerfc44a512014-08-26 13:10:25 -070047
48 header_bin = f.read(28)
49 header = struct.unpack("<I4H4I", header_bin)
50
51 magic = header[0]
52 major_version = header[1]
53 minor_version = header[2]
54 file_hdr_sz = header[3]
55 chunk_hdr_sz = header[4]
56 self.blocksize = blk_sz = header[5]
57 self.total_blocks = total_blks = header[6]
Sami Tolvanen405e71d2016-02-09 12:28:58 -080058 self.total_chunks = total_chunks = header[7]
Doug Zongkerfc44a512014-08-26 13:10:25 -070059
60 if magic != 0xED26FF3A:
61 raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
62 if major_version != 1 or minor_version != 0:
63 raise ValueError("I know about version 1.0, but this is version %u.%u" %
64 (major_version, minor_version))
65 if file_hdr_sz != 28:
66 raise ValueError("File header size was expected to be 28, but is %u." %
67 (file_hdr_sz,))
68 if chunk_hdr_sz != 12:
69 raise ValueError("Chunk header size was expected to be 12, but is %u." %
70 (chunk_hdr_sz,))
71
Tao Bao32fcdab2018-10-12 10:30:39 -070072 logger.info(
73 "Total of %u %u-byte output blocks in %u input chunks.", total_blks,
74 blk_sz, total_chunks)
Doug Zongkerfc44a512014-08-26 13:10:25 -070075
Sami Tolvanen405e71d2016-02-09 12:28:58 -080076 if not build_map:
Tianjie Xu67c7cbb2018-08-30 00:32:07 -070077 assert not hashtree_info_generator, \
78 "Cannot generate the hashtree info without building the offset map."
Sami Tolvanen405e71d2016-02-09 12:28:58 -080079 return
80
Doug Zongkerfc44a512014-08-26 13:10:25 -070081 pos = 0 # in blocks
82 care_data = []
83 self.offset_map = offset_map = []
Tao Bao5ece99d2015-05-12 11:42:31 -070084 self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -070085
86 for i in range(total_chunks):
87 header_bin = f.read(12)
88 header = struct.unpack("<2H2I", header_bin)
89 chunk_type = header[0]
Doug Zongkerfc44a512014-08-26 13:10:25 -070090 chunk_sz = header[2]
91 total_sz = header[3]
92 data_sz = total_sz - 12
93
94 if chunk_type == 0xCAC1:
95 if data_sz != (chunk_sz * blk_sz):
96 raise ValueError(
97 "Raw chunk input size (%u) does not match output size (%u)" %
98 (data_sz, chunk_sz * blk_sz))
99 else:
100 care_data.append(pos)
101 care_data.append(pos + chunk_sz)
Doug Zongkere18eb502014-10-15 15:55:50 -0700102 offset_map.append((pos, chunk_sz, f.tell(), None))
Doug Zongkerfc44a512014-08-26 13:10:25 -0700103 pos += chunk_sz
104 f.seek(data_sz, os.SEEK_CUR)
105
106 elif chunk_type == 0xCAC2:
Doug Zongkere18eb502014-10-15 15:55:50 -0700107 fill_data = f.read(4)
108 care_data.append(pos)
109 care_data.append(pos + chunk_sz)
110 offset_map.append((pos, chunk_sz, None, fill_data))
111 pos += chunk_sz
Doug Zongkerfc44a512014-08-26 13:10:25 -0700112
113 elif chunk_type == 0xCAC3:
114 if data_sz != 0:
115 raise ValueError("Don't care chunk input size is non-zero (%u)" %
116 (data_sz))
Tianjie Xu67c7cbb2018-08-30 00:32:07 -0700117 # Fills the don't care data ranges with zeros.
118 # TODO(xunchang) pass the care_map to hashtree info generator.
119 if hashtree_info_generator:
120 fill_data = '\x00' * 4
121 # In order to compute verity hashtree on device, we need to write
122 # zeros explicitly to the don't care ranges. Because these ranges may
123 # contain non-zero data from the previous build.
124 care_data.append(pos)
125 care_data.append(pos + chunk_sz)
126 offset_map.append((pos, chunk_sz, None, fill_data))
127
128 pos += chunk_sz
Doug Zongkerfc44a512014-08-26 13:10:25 -0700129
130 elif chunk_type == 0xCAC4:
131 raise ValueError("CRC32 chunks are not supported")
132
133 else:
134 raise ValueError("Unknown chunk type 0x%04X not supported" %
135 (chunk_type,))
136
Tianjie Xudf1166e2018-01-27 17:35:41 -0800137 self.generator_lock = threading.Lock()
138
Dan Albert8b72aef2015-03-23 19:13:21 -0700139 self.care_map = rangelib.RangeSet(care_data)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700140 self.offset_index = [i[0] for i in offset_map]
141
Tao Bao2fd2c9b2015-07-09 17:37:49 -0700142 # Bug: 20881595
143 # Introduce extended blocks as a workaround for the bug. dm-verity may
144 # touch blocks that are not in the care_map due to block device
145 # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
146 # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
147 # are the maximum read-ahead we configure for dm-verity block devices.
148 extended = self.care_map.extend(512)
149 all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
150 extended = extended.intersect(all_blocks).subtract(self.care_map)
151 self.extended = extended
152
Tianjie Xu67c7cbb2018-08-30 00:32:07 -0700153 self.hashtree_info = None
154 if hashtree_info_generator:
155 self.hashtree_info = hashtree_info_generator.Generate(self)
156
Doug Zongkerfc44a512014-08-26 13:10:25 -0700157 if file_map_fn:
Tao Baoe709b092018-02-07 12:40:00 -0800158 self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
159 allow_shared_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700160 else:
161 self.file_map = {"__DATA": self.care_map}
162
Sami Tolvanen405e71d2016-02-09 12:28:58 -0800163 def AppendFillChunk(self, data, blocks):
164 f = self.simg_f
165
166 # Append a fill chunk
167 f.seek(0, os.SEEK_END)
168 f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data))
169
170 # Update the sparse header
171 self.total_blocks += blocks
172 self.total_chunks += 1
173
174 f.seek(16, os.SEEK_SET)
175 f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
176
Tao Bao183e56e2017-03-05 17:05:09 -0800177 def RangeSha1(self, ranges):
178 h = sha1()
179 for data in self._GetRangeData(ranges):
180 h.update(data)
181 return h.hexdigest()
182
Doug Zongkerfc44a512014-08-26 13:10:25 -0700183 def ReadRangeSet(self, ranges):
184 return [d for d in self._GetRangeData(ranges)]
185
Tao Bao5fcaaef2015-06-01 13:40:49 -0700186 def TotalSha1(self, include_clobbered_blocks=False):
187 """Return the SHA-1 hash of all data in the 'care' regions.
188
189 If include_clobbered_blocks is True, it returns the hash including the
190 clobbered_blocks."""
191 ranges = self.care_map
192 if not include_clobbered_blocks:
Tao Bao2b4ff172015-06-23 17:30:35 -0700193 ranges = ranges.subtract(self.clobbered_blocks)
Tao Bao183e56e2017-03-05 17:05:09 -0800194 return self.RangeSha1(ranges)
195
196 def WriteRangeDataToFd(self, ranges, fd):
197 for data in self._GetRangeData(ranges):
198 fd.write(data)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700199
200 def _GetRangeData(self, ranges):
201 """Generator that produces all the image data in 'ranges'. The
202 number of individual pieces returned is arbitrary (and in
203 particular is not necessarily equal to the number of ranges in
204 'ranges'.
205
Tianjie Xudf1166e2018-01-27 17:35:41 -0800206 Use a lock to protect the generator so that we will not run two
Doug Zongkerfc44a512014-08-26 13:10:25 -0700207 instances of this generator on the same object simultaneously."""
208
209 f = self.simg_f
Tianjie Xudf1166e2018-01-27 17:35:41 -0800210 with self.generator_lock:
211 for s, e in ranges:
212 to_read = e-s
213 idx = bisect.bisect_right(self.offset_index, s) - 1
Doug Zongkere18eb502014-10-15 15:55:50 -0700214 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
Tianjie Xudf1166e2018-01-27 17:35:41 -0800215
216 # for the first chunk we may be starting partway through it.
217 remain = chunk_len - (s - chunk_start)
218 this_read = min(remain, to_read)
Doug Zongkere18eb502014-10-15 15:55:50 -0700219 if filepos is not None:
Tianjie Xudf1166e2018-01-27 17:35:41 -0800220 p = filepos + ((s - chunk_start) * self.blocksize)
221 f.seek(p, os.SEEK_SET)
Doug Zongkere18eb502014-10-15 15:55:50 -0700222 yield f.read(this_read * self.blocksize)
223 else:
224 yield fill_data * (this_read * (self.blocksize >> 2))
Doug Zongkerfc44a512014-08-26 13:10:25 -0700225 to_read -= this_read
226
Tianjie Xudf1166e2018-01-27 17:35:41 -0800227 while to_read > 0:
228 # continue with following chunks if this range spans multiple chunks.
229 idx += 1
230 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
231 this_read = min(chunk_len, to_read)
232 if filepos is not None:
233 f.seek(filepos, os.SEEK_SET)
234 yield f.read(this_read * self.blocksize)
235 else:
236 yield fill_data * (this_read * (self.blocksize >> 2))
237 to_read -= this_read
238
Tao Baoe709b092018-02-07 12:40:00 -0800239 def LoadFileBlockMap(self, fn, clobbered_blocks, allow_shared_blocks):
240 """Loads the given block map file.
241
242 Args:
243 fn: The filename of the block map file.
244 clobbered_blocks: A RangeSet instance for the clobbered blocks.
245 allow_shared_blocks: Whether having shared blocks is allowed.
246 """
Doug Zongkerfc44a512014-08-26 13:10:25 -0700247 remaining = self.care_map
248 self.file_map = out = {}
249
250 with open(fn) as f:
251 for line in f:
Tao Bao22632cc2019-10-03 23:12:55 -0700252 fn, ranges_text = line.rstrip().split(None, 1)
Tianjie Xuf02ecec2020-02-26 21:50:47 -0800253 raw_ranges = rangelib.RangeSet.parse(ranges_text)
David Andersonab9c7e62020-02-18 23:29:44 -0800254
255 # Note: e2fsdroid records holes in the extent tree as "0" blocks.
256 # This causes confusion because clobbered_blocks always includes
257 # the superblock (physical block #0). Since the 0 blocks here do
258 # not represent actual physical blocks, remove them from the set.
Tianjie Xuf02ecec2020-02-26 21:50:47 -0800259 ranges = raw_ranges.subtract(rangelib.RangeSet("0"))
260 # b/150334561 we need to perserve the monotonic property of the raw
261 # range. Otherwise, the validation script will read the blocks with
262 # wrong order when pulling files from the image.
263 ranges.monotonic = raw_ranges.monotonic
Tao Bao22632cc2019-10-03 23:12:55 -0700264 ranges.extra['text_str'] = ranges_text
Tao Baoe709b092018-02-07 12:40:00 -0800265
266 if allow_shared_blocks:
Tao Bao2a20f342018-12-03 15:08:23 -0800267 # Find the shared blocks that have been claimed by others. If so, tag
268 # the entry so that we can skip applying imgdiff on this file.
Tao Baoe709b092018-02-07 12:40:00 -0800269 shared_blocks = ranges.subtract(remaining)
270 if shared_blocks:
Tao Bao2a20f342018-12-03 15:08:23 -0800271 non_shared = ranges.subtract(shared_blocks)
272 if not non_shared:
Tao Baoe709b092018-02-07 12:40:00 -0800273 continue
274
Tao Bao2a20f342018-12-03 15:08:23 -0800275 # Put the non-shared RangeSet as the value in the block map, which
276 # has a copy of the original RangeSet.
277 non_shared.extra['uses_shared_blocks'] = ranges
278 ranges = non_shared
Tao Baoe709b092018-02-07 12:40:00 -0800279
Doug Zongkerfc44a512014-08-26 13:10:25 -0700280 out[fn] = ranges
281 assert ranges.size() == ranges.intersect(remaining).size()
Tao Bao5ece99d2015-05-12 11:42:31 -0700282
283 # Currently we assume that blocks in clobbered_blocks are not part of
284 # any file.
285 assert not clobbered_blocks.overlaps(ranges)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700286 remaining = remaining.subtract(ranges)
287
Tao Bao5ece99d2015-05-12 11:42:31 -0700288 remaining = remaining.subtract(clobbered_blocks)
Tianjie Xu67c7cbb2018-08-30 00:32:07 -0700289 if self.hashtree_info:
290 remaining = remaining.subtract(self.hashtree_info.hashtree_range)
Tao Bao5ece99d2015-05-12 11:42:31 -0700291
Doug Zongkerfc44a512014-08-26 13:10:25 -0700292 # For all the remaining blocks in the care_map (ie, those that
Tao Bao5ece99d2015-05-12 11:42:31 -0700293 # aren't part of the data for any file nor part of the clobbered_blocks),
294 # divide them into blocks that are all zero and blocks that aren't.
295 # (Zero blocks are handled specially because (1) there are usually
296 # a lot of them and (2) bsdiff handles files with long sequences of
297 # repeated bytes especially poorly.)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700298
299 zero_blocks = []
300 nonzero_blocks = []
301 reference = '\0' * self.blocksize
302
Tao Bao7c4c6f52015-08-19 17:07:50 -0700303 # Workaround for bug 23227672. For squashfs, we don't have a system.map. So
304 # the whole system image will be treated as a single file. But for some
305 # unknown bug, the updater will be killed due to OOM when writing back the
306 # patched image to flash (observed on lenok-userdebug MEA49). Prior to
307 # getting a real fix, we evenly divide the non-zero blocks into smaller
308 # groups (currently 1024 blocks or 4MB per group).
309 # Bug: 23227672
310 MAX_BLOCKS_PER_GROUP = 1024
311 nonzero_groups = []
312
Doug Zongkerfc44a512014-08-26 13:10:25 -0700313 f = self.simg_f
314 for s, e in remaining:
315 for b in range(s, e):
316 idx = bisect.bisect_right(self.offset_index, b) - 1
Dan Albert8b72aef2015-03-23 19:13:21 -0700317 chunk_start, _, filepos, fill_data = self.offset_map[idx]
Doug Zongkere18eb502014-10-15 15:55:50 -0700318 if filepos is not None:
319 filepos += (b-chunk_start) * self.blocksize
320 f.seek(filepos, os.SEEK_SET)
321 data = f.read(self.blocksize)
322 else:
323 if fill_data == reference[:4]: # fill with all zeros
324 data = reference
325 else:
326 data = None
Doug Zongkerfc44a512014-08-26 13:10:25 -0700327
328 if data == reference:
329 zero_blocks.append(b)
330 zero_blocks.append(b+1)
331 else:
332 nonzero_blocks.append(b)
333 nonzero_blocks.append(b+1)
334
Tao Bao7c4c6f52015-08-19 17:07:50 -0700335 if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
336 nonzero_groups.append(nonzero_blocks)
337 # Clear the list.
338 nonzero_blocks = []
339
340 if nonzero_blocks:
341 nonzero_groups.append(nonzero_blocks)
342 nonzero_blocks = []
343
344 assert zero_blocks or nonzero_groups or clobbered_blocks
Tao Bao7f9470c2015-06-26 17:49:39 -0700345
346 if zero_blocks:
347 out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
Tao Bao7c4c6f52015-08-19 17:07:50 -0700348 if nonzero_groups:
349 for i, blocks in enumerate(nonzero_groups):
350 out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
Tao Bao8bd72022015-07-01 18:06:33 -0700351 if clobbered_blocks:
352 out["__COPY"] = clobbered_blocks
Tianjie Xu67c7cbb2018-08-30 00:32:07 -0700353 if self.hashtree_info:
354 out["__HASHTREE"] = self.hashtree_info.hashtree_range
Doug Zongkerfc44a512014-08-26 13:10:25 -0700355
356 def ResetFileMap(self):
357 """Throw away the file map and treat the entire image as
358 undifferentiated data."""
359 self.file_map = {"__DATA": self.care_map}
Yifan Hong7ad83b62019-04-04 10:57:39 -0700360
361
362def GetImagePartitionSize(img):
363 try:
364 simg = SparseImage(img, build_map=False)
365 return simg.blocksize * simg.total_blocks
366 except ValueError:
367 return os.path.getsize(img)
368
369
370if __name__ == '__main__':
371 parser = argparse.ArgumentParser()
372 parser.add_argument('image')
373 parser.add_argument('--get_partition_size', action='store_true',
374 help='Return partition size of the image')
375 args = parser.parse_args()
376 if args.get_partition_size:
377 print(GetImagePartitionSize(args.image))