blob: 5839d177300cf50078d307b40f83f9b02edc6566 [file] [log] [blame]
Kelvin Zhangcff4d762020-07-29 16:37:51 -04001# Copyright (C) 2020 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import collections
Kelvin Zhang513b86e2023-10-27 13:27:07 -070016import copy
17import imp
Kelvin Zhangcff4d762020-07-29 16:37:51 -040018import logging
19import os
Kelvin Zhang513b86e2023-10-27 13:27:07 -070020import time
21import threading
22import tempfile
Kelvin Zhangcff4d762020-07-29 16:37:51 -040023import zipfile
Kelvin Zhang513b86e2023-10-27 13:27:07 -070024import subprocess
25import shlex
Kelvin Zhangcff4d762020-07-29 16:37:51 -040026
27import common
28import edify_generator
Abhishek Nigamb148ac22023-11-08 02:19:31 +000029from edify_generator import ErrorCode
Kelvin Zhangcff4d762020-07-29 16:37:51 -040030from check_target_files_vintf import CheckVintfIfTrebleEnabled, HasPartition
Kelvin Zhang513b86e2023-10-27 13:27:07 -070031from common import OPTIONS, Run, MakeTempDir, RunAndCheckOutput, ZipWrite, MakeTempFile
Kelvin Zhangcff4d762020-07-29 16:37:51 -040032from ota_utils import UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata, PropertyFiles
Kelvin Zhang513b86e2023-10-27 13:27:07 -070033from blockimgdiff import BlockImageDiff
34from hashlib import sha1
Kelvin Zhangcff4d762020-07-29 16:37:51 -040035
36logger = logging.getLogger(__name__)
37
38
39def GetBlockDifferences(target_zip, source_zip, target_info, source_info,
40 device_specific):
41 """Returns a ordered dict of block differences with partition name as key."""
42
43 def GetIncrementalBlockDifferenceForPartition(name):
44 if not HasPartition(source_zip, name):
45 raise RuntimeError(
46 "can't generate incremental that adds {}".format(name))
47
48 partition_src = common.GetUserImage(name, OPTIONS.source_tmp, source_zip,
49 info_dict=source_info,
50 allow_shared_blocks=allow_shared_blocks)
51
Kelvin Zhangcff4d762020-07-29 16:37:51 -040052 partition_tgt = common.GetUserImage(name, OPTIONS.target_tmp, target_zip,
53 info_dict=target_info,
hungweichencc9c05d2022-08-23 05:45:42 +000054 allow_shared_blocks=allow_shared_blocks)
Kelvin Zhangcff4d762020-07-29 16:37:51 -040055
56 # Check the first block of the source system partition for remount R/W only
57 # if the filesystem is ext4.
58 partition_source_info = source_info["fstab"]["/" + name]
59 check_first_block = partition_source_info.fs_type == "ext4"
Kelvin Zhange8ce3842023-01-20 10:21:29 -080060 # Disable imgdiff because it relies on zlib to produce stable output
61 # across different versions, which is often not the case.
Kelvin Zhang513b86e2023-10-27 13:27:07 -070062 return BlockDifference(name, partition_tgt, partition_src,
Abhishek Nigamb148ac22023-11-08 02:19:31 +000063 check_first_block,
64 version=blockimgdiff_version,
65 disable_imgdiff=True)
Kelvin Zhangcff4d762020-07-29 16:37:51 -040066
67 if source_zip:
68 # See notes in common.GetUserImage()
69 allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
70 target_info.get('ext4_share_dup_blocks') == "true")
71 blockimgdiff_version = max(
72 int(i) for i in target_info.get(
73 "blockimgdiff_versions", "1").split(","))
74 assert blockimgdiff_version >= 3
75
76 block_diff_dict = collections.OrderedDict()
77 partition_names = ["system", "vendor", "product", "odm", "system_ext",
Ramji Jiyani7ecb0ec2022-02-09 02:53:07 +000078 "vendor_dlkm", "odm_dlkm", "system_dlkm"]
Kelvin Zhangcff4d762020-07-29 16:37:51 -040079 for partition in partition_names:
80 if not HasPartition(target_zip, partition):
81 continue
82 # Full OTA update.
83 if not source_zip:
84 tgt = common.GetUserImage(partition, OPTIONS.input_tmp, target_zip,
85 info_dict=target_info,
86 reset_file_map=True)
Kelvin Zhang513b86e2023-10-27 13:27:07 -070087 block_diff_dict[partition] = BlockDifference(partition, tgt,
Abhishek Nigamb148ac22023-11-08 02:19:31 +000088 src=None)
Kelvin Zhangcff4d762020-07-29 16:37:51 -040089 # Incremental OTA update.
90 else:
91 block_diff_dict[partition] = GetIncrementalBlockDifferenceForPartition(
92 partition)
93 assert "system" in block_diff_dict
94
95 # Get the block diffs from the device specific script. If there is a
96 # duplicate block diff for a partition, ignore the diff in the generic script
97 # and use the one in the device specific script instead.
98 if source_zip:
99 device_specific_diffs = device_specific.IncrementalOTA_GetBlockDifferences()
100 function_name = "IncrementalOTA_GetBlockDifferences"
101 else:
102 device_specific_diffs = device_specific.FullOTA_GetBlockDifferences()
103 function_name = "FullOTA_GetBlockDifferences"
104
105 if device_specific_diffs:
Kelvin Zhang513b86e2023-10-27 13:27:07 -0700106 assert all(isinstance(diff, BlockDifference)
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400107 for diff in device_specific_diffs), \
108 "{} is not returning a list of BlockDifference objects".format(
109 function_name)
110 for diff in device_specific_diffs:
111 if diff.partition in block_diff_dict:
112 logger.warning("Duplicate block difference found. Device specific block"
113 " diff for partition '%s' overrides the one in generic"
114 " script.", diff.partition)
115 block_diff_dict[diff.partition] = diff
116
117 return block_diff_dict
118
119
120def WriteFullOTAPackage(input_zip, output_file):
121 target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
122
123 # We don't know what version it will be installed on top of. We expect the API
124 # just won't change very often. Similarly for fstab, it might have changed in
125 # the target build.
126 target_api_version = target_info["recovery_api_version"]
127 script = edify_generator.EdifyGenerator(target_api_version, target_info)
128
129 if target_info.oem_props and not OPTIONS.oem_no_mount:
130 target_info.WriteMountOemScript(script)
131
132 metadata = GetPackageMetadata(target_info)
133
134 if not OPTIONS.no_signing:
135 staging_file = common.MakeTempFile(suffix='.zip')
136 else:
137 staging_file = output_file
138
139 output_zip = zipfile.ZipFile(
140 staging_file, "w", compression=zipfile.ZIP_DEFLATED)
141
Kelvin Zhang513b86e2023-10-27 13:27:07 -0700142 device_specific = DeviceSpecificParams(
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400143 input_zip=input_zip,
144 input_version=target_api_version,
145 output_zip=output_zip,
146 script=script,
147 input_tmp=OPTIONS.input_tmp,
148 metadata=metadata,
149 info_dict=OPTIONS.info_dict)
150
151 assert HasRecoveryPatch(input_zip, info_dict=OPTIONS.info_dict)
152
153 # Assertions (e.g. downgrade check, device properties check).
154 ts = target_info.GetBuildProp("ro.build.date.utc")
155 ts_text = target_info.GetBuildProp("ro.build.date")
156 script.AssertOlderBuild(ts, ts_text)
157
158 target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
159 device_specific.FullOTA_Assertions()
160
161 block_diff_dict = GetBlockDifferences(target_zip=input_zip, source_zip=None,
162 target_info=target_info,
163 source_info=None,
164 device_specific=device_specific)
165
166 # Two-step package strategy (in chronological order, which is *not*
167 # the order in which the generated script has things):
168 #
169 # if stage is not "2/3" or "3/3":
170 # write recovery image to boot partition
171 # set stage to "2/3"
172 # reboot to boot partition and restart recovery
173 # else if stage is "2/3":
174 # write recovery image to recovery partition
175 # set stage to "3/3"
176 # reboot to recovery partition and restart recovery
177 # else:
178 # (stage must be "3/3")
179 # set stage to ""
180 # do normal full package installation:
181 # wipe and install system, boot image, etc.
182 # set up system to update recovery partition on first boot
183 # complete script normally
184 # (allow recovery to mark itself finished and reboot)
185
186 recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
187 OPTIONS.input_tmp, "RECOVERY")
188 if OPTIONS.two_step:
189 if not target_info.get("multistage_support"):
190 assert False, "two-step packages not supported by this build"
191 fs = target_info["fstab"]["/misc"]
192 assert fs.fs_type.upper() == "EMMC", \
193 "two-step packages only supported on devices with EMMC /misc partitions"
194 bcb_dev = {"bcb_dev": fs.device}
195 common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data)
196 script.AppendExtra("""
197if get_stage("%(bcb_dev)s") == "2/3" then
198""" % bcb_dev)
199
200 # Stage 2/3: Write recovery image to /recovery (currently running /boot).
201 script.Comment("Stage 2/3")
202 script.WriteRawImage("/recovery", "recovery.img")
203 script.AppendExtra("""
204set_stage("%(bcb_dev)s", "3/3");
205reboot_now("%(bcb_dev)s", "recovery");
206else if get_stage("%(bcb_dev)s") == "3/3" then
207""" % bcb_dev)
208
209 # Stage 3/3: Make changes.
210 script.Comment("Stage 3/3")
211
212 # Dump fingerprints
213 script.Print("Target: {}".format(target_info.fingerprint))
214
215 device_specific.FullOTA_InstallBegin()
216
217 # All other partitions as well as the data wipe use 10% of the progress, and
218 # the update of the system partition takes the remaining progress.
219 system_progress = 0.9 - (len(block_diff_dict) - 1) * 0.1
220 if OPTIONS.wipe_user_data:
221 system_progress -= 0.1
222 progress_dict = {partition: 0.1 for partition in block_diff_dict}
223 progress_dict["system"] = system_progress
224
225 if target_info.get('use_dynamic_partitions') == "true":
226 # Use empty source_info_dict to indicate that all partitions / groups must
227 # be re-added.
Kelvin Zhang513b86e2023-10-27 13:27:07 -0700228 dynamic_partitions_diff = DynamicPartitionsDifference(
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400229 info_dict=OPTIONS.info_dict,
230 block_diffs=block_diff_dict.values(),
231 progress_dict=progress_dict)
232 dynamic_partitions_diff.WriteScript(script, output_zip,
233 write_verify_script=OPTIONS.verify)
234 else:
235 for block_diff in block_diff_dict.values():
236 block_diff.WriteScript(script, output_zip,
237 progress=progress_dict.get(block_diff.partition),
238 write_verify_script=OPTIONS.verify)
239
240 CheckVintfIfTrebleEnabled(OPTIONS.input_tmp, target_info)
241
242 boot_img = common.GetBootableImage(
243 "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
244 common.CheckSize(boot_img.data, "boot.img", target_info)
245 common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
246
247 script.WriteRawImage("/boot", "boot.img")
248
249 script.ShowProgress(0.1, 10)
250 device_specific.FullOTA_InstallEnd()
251
252 if OPTIONS.extra_script is not None:
253 script.AppendExtra(OPTIONS.extra_script)
254
255 script.UnmountAll()
256
257 if OPTIONS.wipe_user_data:
258 script.ShowProgress(0.1, 10)
259 script.FormatPartition("/data")
260
261 if OPTIONS.two_step:
262 script.AppendExtra("""
263set_stage("%(bcb_dev)s", "");
264""" % bcb_dev)
265 script.AppendExtra("else\n")
266
267 # Stage 1/3: Nothing to verify for full OTA. Write recovery image to /boot.
268 script.Comment("Stage 1/3")
269 _WriteRecoveryImageToBoot(script, output_zip)
270
271 script.AppendExtra("""
272set_stage("%(bcb_dev)s", "2/3");
273reboot_now("%(bcb_dev)s", "");
274endif;
275endif;
276""" % bcb_dev)
277
278 script.SetProgress(1)
279 script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
Tianjiea2076132020-08-19 17:25:32 -0700280 metadata.required_cache = script.required_cache
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400281
282 # We haven't written the metadata entry, which will be done in
283 # FinalizeMetadata.
Kelvin Zhangf92f7f02023-04-14 21:32:54 +0000284 common.ZipClose(output_zip)
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400285
286 needed_property_files = (
287 NonAbOtaPropertyFiles(),
288 )
Kelvin Zhangb0360072023-05-09 20:30:53 -0700289 FinalizeMetadata(metadata, staging_file, output_file,
290 needed_property_files, package_key=OPTIONS.package_key)
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400291
292
293def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
294 target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
295 source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
296
297 target_api_version = target_info["recovery_api_version"]
298 source_api_version = source_info["recovery_api_version"]
299 if source_api_version == 0:
300 logger.warning(
301 "Generating edify script for a source that can't install it.")
302
303 script = edify_generator.EdifyGenerator(
304 source_api_version, target_info, fstab=source_info["fstab"])
305
306 if target_info.oem_props or source_info.oem_props:
307 if not OPTIONS.oem_no_mount:
308 source_info.WriteMountOemScript(script)
309
310 metadata = GetPackageMetadata(target_info, source_info)
311
312 if not OPTIONS.no_signing:
313 staging_file = common.MakeTempFile(suffix='.zip')
314 else:
315 staging_file = output_file
316
317 output_zip = zipfile.ZipFile(
318 staging_file, "w", compression=zipfile.ZIP_DEFLATED)
319
Kelvin Zhang513b86e2023-10-27 13:27:07 -0700320 device_specific = DeviceSpecificParams(
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400321 source_zip=source_zip,
322 source_version=source_api_version,
323 source_tmp=OPTIONS.source_tmp,
324 target_zip=target_zip,
325 target_version=target_api_version,
326 target_tmp=OPTIONS.target_tmp,
327 output_zip=output_zip,
328 script=script,
329 metadata=metadata,
330 info_dict=source_info)
331
332 source_boot = common.GetBootableImage(
333 "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", source_info)
334 target_boot = common.GetBootableImage(
335 "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT", target_info)
336 updating_boot = (not OPTIONS.two_step and
337 (source_boot.data != target_boot.data))
338
339 target_recovery = common.GetBootableImage(
340 "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
341
342 block_diff_dict = GetBlockDifferences(target_zip=target_zip,
343 source_zip=source_zip,
344 target_info=target_info,
345 source_info=source_info,
346 device_specific=device_specific)
347
348 CheckVintfIfTrebleEnabled(OPTIONS.target_tmp, target_info)
349
350 # Assertions (e.g. device properties check).
351 target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
352 device_specific.IncrementalOTA_Assertions()
353
354 # Two-step incremental package strategy (in chronological order,
355 # which is *not* the order in which the generated script has
356 # things):
357 #
358 # if stage is not "2/3" or "3/3":
359 # do verification on current system
360 # write recovery image to boot partition
361 # set stage to "2/3"
362 # reboot to boot partition and restart recovery
363 # else if stage is "2/3":
364 # write recovery image to recovery partition
365 # set stage to "3/3"
366 # reboot to recovery partition and restart recovery
367 # else:
368 # (stage must be "3/3")
369 # perform update:
370 # patch system files, etc.
371 # force full install of new boot image
372 # set up system to update recovery partition on first boot
373 # complete script normally
374 # (allow recovery to mark itself finished and reboot)
375
376 if OPTIONS.two_step:
377 if not source_info.get("multistage_support"):
378 assert False, "two-step packages not supported by this build"
379 fs = source_info["fstab"]["/misc"]
380 assert fs.fs_type.upper() == "EMMC", \
381 "two-step packages only supported on devices with EMMC /misc partitions"
382 bcb_dev = {"bcb_dev": fs.device}
383 common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data)
384 script.AppendExtra("""
385if get_stage("%(bcb_dev)s") == "2/3" then
386""" % bcb_dev)
387
388 # Stage 2/3: Write recovery image to /recovery (currently running /boot).
389 script.Comment("Stage 2/3")
390 script.AppendExtra("sleep(20);\n")
391 script.WriteRawImage("/recovery", "recovery.img")
392 script.AppendExtra("""
393set_stage("%(bcb_dev)s", "3/3");
394reboot_now("%(bcb_dev)s", "recovery");
395else if get_stage("%(bcb_dev)s") != "3/3" then
396""" % bcb_dev)
397
398 # Stage 1/3: (a) Verify the current system.
399 script.Comment("Stage 1/3")
400
401 # Dump fingerprints
402 script.Print("Source: {}".format(source_info.fingerprint))
403 script.Print("Target: {}".format(target_info.fingerprint))
404
405 script.Print("Verifying current system...")
406
407 device_specific.IncrementalOTA_VerifyBegin()
408
409 WriteFingerprintAssertion(script, target_info, source_info)
410
411 # Check the required cache size (i.e. stashed blocks).
412 required_cache_sizes = [diff.required_cache for diff in
413 block_diff_dict.values()]
414 if updating_boot:
Kelvin Zhang513b86e2023-10-27 13:27:07 -0700415 boot_type, boot_device_expr = GetTypeAndDeviceExpr("/boot",
Abhishek Nigamb148ac22023-11-08 02:19:31 +0000416 source_info)
Kelvin Zhang513b86e2023-10-27 13:27:07 -0700417 d = Difference(target_boot, source_boot, "bsdiff")
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400418 _, _, d = d.ComputePatch()
419 if d is None:
420 include_full_boot = True
421 common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
422 else:
423 include_full_boot = False
424
425 logger.info(
426 "boot target: %d source: %d diff: %d", target_boot.size,
427 source_boot.size, len(d))
428
429 common.ZipWriteStr(output_zip, "boot.img.p", d)
430
431 target_expr = 'concat("{}:",{},":{}:{}")'.format(
432 boot_type, boot_device_expr, target_boot.size, target_boot.sha1)
433 source_expr = 'concat("{}:",{},":{}:{}")'.format(
434 boot_type, boot_device_expr, source_boot.size, source_boot.sha1)
435 script.PatchPartitionExprCheck(target_expr, source_expr)
436
437 required_cache_sizes.append(target_boot.size)
438
439 if required_cache_sizes:
440 script.CacheFreeSpaceCheck(max(required_cache_sizes))
441
442 # Verify the existing partitions.
443 for diff in block_diff_dict.values():
444 diff.WriteVerifyScript(script, touched_blocks_only=True)
445
446 device_specific.IncrementalOTA_VerifyEnd()
447
448 if OPTIONS.two_step:
449 # Stage 1/3: (b) Write recovery image to /boot.
450 _WriteRecoveryImageToBoot(script, output_zip)
451
452 script.AppendExtra("""
453set_stage("%(bcb_dev)s", "2/3");
454reboot_now("%(bcb_dev)s", "");
455else
456""" % bcb_dev)
457
458 # Stage 3/3: Make changes.
459 script.Comment("Stage 3/3")
460
461 script.Comment("---- start making changes here ----")
462
463 device_specific.IncrementalOTA_InstallBegin()
464
465 progress_dict = {partition: 0.1 for partition in block_diff_dict}
466 progress_dict["system"] = 1 - len(block_diff_dict) * 0.1
467
468 if OPTIONS.source_info_dict.get("use_dynamic_partitions") == "true":
469 if OPTIONS.target_info_dict.get("use_dynamic_partitions") != "true":
470 raise RuntimeError(
471 "can't generate incremental that disables dynamic partitions")
Kelvin Zhang513b86e2023-10-27 13:27:07 -0700472 dynamic_partitions_diff = DynamicPartitionsDifference(
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400473 info_dict=OPTIONS.target_info_dict,
474 source_info_dict=OPTIONS.source_info_dict,
475 block_diffs=block_diff_dict.values(),
476 progress_dict=progress_dict)
477 dynamic_partitions_diff.WriteScript(
478 script, output_zip, write_verify_script=OPTIONS.verify)
479 else:
480 for block_diff in block_diff_dict.values():
481 block_diff.WriteScript(script, output_zip,
482 progress=progress_dict.get(block_diff.partition),
483 write_verify_script=OPTIONS.verify)
484
485 if OPTIONS.two_step:
486 common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
487 script.WriteRawImage("/boot", "boot.img")
488 logger.info("writing full boot image (forced by two-step mode)")
489
490 if not OPTIONS.two_step:
491 if updating_boot:
492 if include_full_boot:
493 logger.info("boot image changed; including full.")
494 script.Print("Installing boot image...")
495 script.WriteRawImage("/boot", "boot.img")
496 else:
497 # Produce the boot image by applying a patch to the current
498 # contents of the boot partition, and write it back to the
499 # partition.
500 logger.info("boot image changed; including patch.")
501 script.Print("Patching boot image...")
502 script.ShowProgress(0.1, 10)
503 target_expr = 'concat("{}:",{},":{}:{}")'.format(
504 boot_type, boot_device_expr, target_boot.size, target_boot.sha1)
505 source_expr = 'concat("{}:",{},":{}:{}")'.format(
506 boot_type, boot_device_expr, source_boot.size, source_boot.sha1)
507 script.PatchPartitionExpr(target_expr, source_expr, '"boot.img.p"')
508 else:
509 logger.info("boot image unchanged; skipping.")
510
511 # Do device-specific installation (eg, write radio image).
512 device_specific.IncrementalOTA_InstallEnd()
513
514 if OPTIONS.extra_script is not None:
515 script.AppendExtra(OPTIONS.extra_script)
516
517 if OPTIONS.wipe_user_data:
518 script.Print("Erasing user data...")
519 script.FormatPartition("/data")
520
521 if OPTIONS.two_step:
522 script.AppendExtra("""
523set_stage("%(bcb_dev)s", "");
524endif;
525endif;
526""" % bcb_dev)
527
528 script.SetProgress(1)
529 # For downgrade OTAs, we prefer to use the update-binary in the source
530 # build that is actually newer than the one in the target build.
531 if OPTIONS.downgrade:
532 script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary)
533 else:
534 script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
Tianjiea2076132020-08-19 17:25:32 -0700535 metadata.required_cache = script.required_cache
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400536
537 # We haven't written the metadata entry yet, which will be handled in
538 # FinalizeMetadata().
Kelvin Zhangf92f7f02023-04-14 21:32:54 +0000539 common.ZipClose(output_zip)
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400540
541 # Sign the generated zip package unless no_signing is specified.
542 needed_property_files = (
543 NonAbOtaPropertyFiles(),
544 )
Kelvin Zhangb0360072023-05-09 20:30:53 -0700545 FinalizeMetadata(metadata, staging_file, output_file,
546 needed_property_files, package_key=OPTIONS.package_key)
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400547
548
549def GenerateNonAbOtaPackage(target_file, output_file, source_file=None):
550 """Generates a non-A/B OTA package."""
551 # Check the loaded info dicts first.
552 if OPTIONS.info_dict.get("no_recovery") == "true":
553 raise common.ExternalError(
554 "--- target build has specified no recovery ---")
555
556 # Non-A/B OTAs rely on /cache partition to store temporary files.
557 cache_size = OPTIONS.info_dict.get("cache_size")
558 if cache_size is None:
559 logger.warning("--- can't determine the cache partition size ---")
560 OPTIONS.cache_size = cache_size
561
562 if OPTIONS.extra_script is not None:
563 with open(OPTIONS.extra_script) as fp:
564 OPTIONS.extra_script = fp.read()
565
566 if OPTIONS.extracted_input is not None:
567 OPTIONS.input_tmp = OPTIONS.extracted_input
568 else:
Kelvin Zhangb0360072023-05-09 20:30:53 -0700569 if not os.path.isdir(target_file):
570 logger.info("unzipping target target-files...")
571 OPTIONS.input_tmp = common.UnzipTemp(target_file, UNZIP_PATTERN)
572 else:
573 OPTIONS.input_tmp = target_file
574 tmpfile = common.MakeTempFile(suffix=".zip")
575 os.unlink(tmpfile)
576 common.RunAndCheckOutput(
577 ["zip", tmpfile, "-r", ".", "-0"], cwd=target_file)
578 assert zipfile.is_zipfile(tmpfile)
579 target_file = tmpfile
580
Kelvin Zhangcff4d762020-07-29 16:37:51 -0400581 OPTIONS.target_tmp = OPTIONS.input_tmp
582
583 # If the caller explicitly specified the device-specific extensions path via
584 # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it
585 # is present in the target target_files. Otherwise, take the path of the file
586 # from 'tool_extensions' in the info dict and look for that in the local
587 # filesystem, relative to the current directory.
588 if OPTIONS.device_specific is None:
589 from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
590 if os.path.exists(from_input):
591 logger.info("(using device-specific extensions from target_files)")
592 OPTIONS.device_specific = from_input
593 else:
594 OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
595
596 if OPTIONS.device_specific is not None:
597 OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific)
598
599 # Generate a full OTA.
600 if source_file is None:
601 with zipfile.ZipFile(target_file) as input_zip:
602 WriteFullOTAPackage(
603 input_zip,
604 output_file)
605
606 # Generate an incremental OTA.
607 else:
608 logger.info("unzipping source target-files...")
609 OPTIONS.source_tmp = common.UnzipTemp(
610 OPTIONS.incremental_source, UNZIP_PATTERN)
611 with zipfile.ZipFile(target_file) as input_zip, \
612 zipfile.ZipFile(source_file) as source_zip:
613 WriteBlockIncrementalOTAPackage(
614 input_zip,
615 source_zip,
616 output_file)
617
618
619def WriteFingerprintAssertion(script, target_info, source_info):
620 source_oem_props = source_info.oem_props
621 target_oem_props = target_info.oem_props
622
623 if source_oem_props is None and target_oem_props is None:
624 script.AssertSomeFingerprint(
625 source_info.fingerprint, target_info.fingerprint)
626 elif source_oem_props is not None and target_oem_props is not None:
627 script.AssertSomeThumbprint(
628 target_info.GetBuildProp("ro.build.thumbprint"),
629 source_info.GetBuildProp("ro.build.thumbprint"))
630 elif source_oem_props is None and target_oem_props is not None:
631 script.AssertFingerprintOrThumbprint(
632 source_info.fingerprint,
633 target_info.GetBuildProp("ro.build.thumbprint"))
634 else:
635 script.AssertFingerprintOrThumbprint(
636 target_info.fingerprint,
637 source_info.GetBuildProp("ro.build.thumbprint"))
638
639
640class NonAbOtaPropertyFiles(PropertyFiles):
641 """The property-files for non-A/B OTA.
642
643 For non-A/B OTA, the property-files string contains the info for METADATA
644 entry, with which a system updater can be fetched the package metadata prior
645 to downloading the entire package.
646 """
647
648 def __init__(self):
649 super(NonAbOtaPropertyFiles, self).__init__()
650 self.name = 'ota-property-files'
651
652
653def _WriteRecoveryImageToBoot(script, output_zip):
654 """Find and write recovery image to /boot in two-step OTA.
655
656 In two-step OTAs, we write recovery image to /boot as the first step so that
657 we can reboot to there and install a new recovery image to /recovery.
658 A special "recovery-two-step.img" will be preferred, which encodes the correct
659 path of "/boot". Otherwise the device may show "device is corrupt" message
660 when booting into /boot.
661
662 Fall back to using the regular recovery.img if the two-step recovery image
663 doesn't exist. Note that rebuilding the special image at this point may be
664 infeasible, because we don't have the desired boot signer and keys when
665 calling ota_from_target_files.py.
666 """
667
668 recovery_two_step_img_name = "recovery-two-step.img"
669 recovery_two_step_img_path = os.path.join(
670 OPTIONS.input_tmp, "OTA", recovery_two_step_img_name)
671 if os.path.exists(recovery_two_step_img_path):
672 common.ZipWrite(
673 output_zip,
674 recovery_two_step_img_path,
675 arcname=recovery_two_step_img_name)
676 logger.info(
677 "two-step package: using %s in stage 1/3", recovery_two_step_img_name)
678 script.WriteRawImage("/boot", recovery_two_step_img_name)
679 else:
680 logger.info("two-step package: using recovery.img in stage 1/3")
681 # The "recovery.img" entry has been written into package earlier.
682 script.WriteRawImage("/boot", "recovery.img")
683
684
685def HasRecoveryPatch(target_files_zip, info_dict):
686 board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
687
688 if board_uses_vendorimage:
689 target_files_dir = "VENDOR"
690 else:
691 target_files_dir = "SYSTEM/vendor"
692
693 patch = "%s/recovery-from-boot.p" % target_files_dir
694 img = "%s/etc/recovery.img" % target_files_dir
695
696 namelist = target_files_zip.namelist()
697 return patch in namelist or img in namelist
Kelvin Zhang513b86e2023-10-27 13:27:07 -0700698
699
700class DeviceSpecificParams(object):
701 module = None
702
703 def __init__(self, **kwargs):
704 """Keyword arguments to the constructor become attributes of this
705 object, which is passed to all functions in the device-specific
706 module."""
707 for k, v in kwargs.items():
708 setattr(self, k, v)
709 self.extras = OPTIONS.extras
710
711 if self.module is None:
712 path = OPTIONS.device_specific
713 if not path:
714 return
715 try:
716 if os.path.isdir(path):
717 info = imp.find_module("releasetools", [path])
718 else:
719 d, f = os.path.split(path)
720 b, x = os.path.splitext(f)
721 if x == ".py":
722 f = b
723 info = imp.find_module(f, [d])
724 logger.info("loaded device-specific extensions from %s", path)
725 self.module = imp.load_module("device_specific", *info)
726 except ImportError:
727 logger.info("unable to load device-specific module; assuming none")
728
729 def _DoCall(self, function_name, *args, **kwargs):
730 """Call the named function in the device-specific module, passing
731 the given args and kwargs. The first argument to the call will be
732 the DeviceSpecific object itself. If there is no module, or the
733 module does not define the function, return the value of the
734 'default' kwarg (which itself defaults to None)."""
735 if self.module is None or not hasattr(self.module, function_name):
736 return kwargs.get("default")
737 return getattr(self.module, function_name)(*((self,) + args), **kwargs)
738
739 def FullOTA_Assertions(self):
740 """Called after emitting the block of assertions at the top of a
741 full OTA package. Implementations can add whatever additional
742 assertions they like."""
743 return self._DoCall("FullOTA_Assertions")
744
745 def FullOTA_InstallBegin(self):
746 """Called at the start of full OTA installation."""
747 return self._DoCall("FullOTA_InstallBegin")
748
749 def FullOTA_GetBlockDifferences(self):
750 """Called during full OTA installation and verification.
751 Implementation should return a list of BlockDifference objects describing
752 the update on each additional partitions.
753 """
754 return self._DoCall("FullOTA_GetBlockDifferences")
755
756 def FullOTA_InstallEnd(self):
757 """Called at the end of full OTA installation; typically this is
758 used to install the image for the device's baseband processor."""
759 return self._DoCall("FullOTA_InstallEnd")
760
761 def IncrementalOTA_Assertions(self):
762 """Called after emitting the block of assertions at the top of an
763 incremental OTA package. Implementations can add whatever
764 additional assertions they like."""
765 return self._DoCall("IncrementalOTA_Assertions")
766
767 def IncrementalOTA_VerifyBegin(self):
768 """Called at the start of the verification phase of incremental
769 OTA installation; additional checks can be placed here to abort
770 the script before any changes are made."""
771 return self._DoCall("IncrementalOTA_VerifyBegin")
772
773 def IncrementalOTA_VerifyEnd(self):
774 """Called at the end of the verification phase of incremental OTA
775 installation; additional checks can be placed here to abort the
776 script before any changes are made."""
777 return self._DoCall("IncrementalOTA_VerifyEnd")
778
779 def IncrementalOTA_InstallBegin(self):
780 """Called at the start of incremental OTA installation (after
781 verification is complete)."""
782 return self._DoCall("IncrementalOTA_InstallBegin")
783
784 def IncrementalOTA_GetBlockDifferences(self):
785 """Called during incremental OTA installation and verification.
786 Implementation should return a list of BlockDifference objects describing
787 the update on each additional partitions.
788 """
789 return self._DoCall("IncrementalOTA_GetBlockDifferences")
790
791 def IncrementalOTA_InstallEnd(self):
792 """Called at the end of incremental OTA installation; typically
793 this is used to install the image for the device's baseband
794 processor."""
795 return self._DoCall("IncrementalOTA_InstallEnd")
796
797 def VerifyOTA_Assertions(self):
798 return self._DoCall("VerifyOTA_Assertions")
799
800
801DIFF_PROGRAM_BY_EXT = {
802 ".gz": "imgdiff",
803 ".zip": ["imgdiff", "-z"],
804 ".jar": ["imgdiff", "-z"],
805 ".apk": ["imgdiff", "-z"],
806 ".img": "imgdiff",
807}
808
809
810class Difference(object):
811 def __init__(self, tf, sf, diff_program=None):
812 self.tf = tf
813 self.sf = sf
814 self.patch = None
815 self.diff_program = diff_program
816
817 def ComputePatch(self):
818 """Compute the patch (as a string of data) needed to turn sf into
819 tf. Returns the same tuple as GetPatch()."""
820
821 tf = self.tf
822 sf = self.sf
823
824 if self.diff_program:
825 diff_program = self.diff_program
826 else:
827 ext = os.path.splitext(tf.name)[1]
828 diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
829
830 ttemp = tf.WriteToTemp()
831 stemp = sf.WriteToTemp()
832
833 ext = os.path.splitext(tf.name)[1]
834
835 try:
836 ptemp = tempfile.NamedTemporaryFile()
837 if isinstance(diff_program, list):
838 cmd = copy.copy(diff_program)
839 else:
840 cmd = [diff_program]
841 cmd.append(stemp.name)
842 cmd.append(ttemp.name)
843 cmd.append(ptemp.name)
844 p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
845 err = []
846
847 def run():
848 _, e = p.communicate()
849 if e:
850 err.append(e)
851 th = threading.Thread(target=run)
852 th.start()
853 th.join(timeout=300) # 5 mins
854 if th.is_alive():
855 logger.warning("diff command timed out")
856 p.terminate()
857 th.join(5)
858 if th.is_alive():
859 p.kill()
860 th.join()
861
862 if p.returncode != 0:
863 logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
864 self.patch = None
865 return None, None, None
866 diff = ptemp.read()
867 finally:
868 ptemp.close()
869 stemp.close()
870 ttemp.close()
871
872 self.patch = diff
873 return self.tf, self.sf, self.patch
874
875 def GetPatch(self):
876 """Returns a tuple of (target_file, source_file, patch_data).
877
878 patch_data may be None if ComputePatch hasn't been called, or if
879 computing the patch failed.
880 """
881 return self.tf, self.sf, self.patch
882
883
884def ComputeDifferences(diffs):
885 """Call ComputePatch on all the Difference objects in 'diffs'."""
886 logger.info("%d diffs to compute", len(diffs))
887
888 # Do the largest files first, to try and reduce the long-pole effect.
889 by_size = [(i.tf.size, i) for i in diffs]
890 by_size.sort(reverse=True)
891 by_size = [i[1] for i in by_size]
892
893 lock = threading.Lock()
894 diff_iter = iter(by_size) # accessed under lock
895
896 def worker():
897 try:
898 lock.acquire()
899 for d in diff_iter:
900 lock.release()
901 start = time.time()
902 d.ComputePatch()
903 dur = time.time() - start
904 lock.acquire()
905
906 tf, sf, patch = d.GetPatch()
907 if sf.name == tf.name:
908 name = tf.name
909 else:
910 name = "%s (%s)" % (tf.name, sf.name)
911 if patch is None:
912 logger.error("patching failed! %40s", name)
913 else:
914 logger.info(
915 "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
916 tf.size, 100.0 * len(patch) / tf.size, name)
917 lock.release()
918 except Exception:
919 logger.exception("Failed to compute diff from worker")
920 raise
921
922 # start worker threads; wait for them all to finish.
923 threads = [threading.Thread(target=worker)
924 for i in range(OPTIONS.worker_threads)]
925 for th in threads:
926 th.start()
927 while threads:
928 threads.pop().join()
929
930
931class BlockDifference(object):
932 def __init__(self, partition, tgt, src=None, check_first_block=False,
933 version=None, disable_imgdiff=False):
934 self.tgt = tgt
935 self.src = src
936 self.partition = partition
937 self.check_first_block = check_first_block
938 self.disable_imgdiff = disable_imgdiff
939
940 if version is None:
941 version = max(
942 int(i) for i in
943 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
944 assert version >= 3
945 self.version = version
946
947 b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
948 version=self.version,
949 disable_imgdiff=self.disable_imgdiff)
950 self.path = os.path.join(MakeTempDir(), partition)
951 b.Compute(self.path)
952 self._required_cache = b.max_stashed_size
953 self.touched_src_ranges = b.touched_src_ranges
954 self.touched_src_sha1 = b.touched_src_sha1
955
956 # On devices with dynamic partitions, for new partitions,
957 # src is None but OPTIONS.source_info_dict is not.
958 if OPTIONS.source_info_dict is None:
959 is_dynamic_build = OPTIONS.info_dict.get(
960 "use_dynamic_partitions") == "true"
961 is_dynamic_source = False
962 else:
963 is_dynamic_build = OPTIONS.source_info_dict.get(
964 "use_dynamic_partitions") == "true"
965 is_dynamic_source = partition in shlex.split(
966 OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
967
968 is_dynamic_target = partition in shlex.split(
969 OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
970
971 # For dynamic partitions builds, check partition list in both source
972 # and target build because new partitions may be added, and existing
973 # partitions may be removed.
974 is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
975
976 if is_dynamic:
977 self.device = 'map_partition("%s")' % partition
978 else:
979 if OPTIONS.source_info_dict is None:
980 _, device_expr = GetTypeAndDeviceExpr("/" + partition,
981 OPTIONS.info_dict)
982 else:
983 _, device_expr = GetTypeAndDeviceExpr("/" + partition,
984 OPTIONS.source_info_dict)
985 self.device = device_expr
986
987 @property
988 def required_cache(self):
989 return self._required_cache
990
991 def WriteScript(self, script, output_zip, progress=None,
992 write_verify_script=False):
993 if not self.src:
994 # write the output unconditionally
995 script.Print("Patching %s image unconditionally..." % (self.partition,))
996 else:
997 script.Print("Patching %s image after verification." % (self.partition,))
998
999 if progress:
1000 script.ShowProgress(progress, 0)
1001 self._WriteUpdate(script, output_zip)
1002
1003 if write_verify_script:
1004 self.WritePostInstallVerifyScript(script)
1005
1006 def WriteStrictVerifyScript(self, script):
1007 """Verify all the blocks in the care_map, including clobbered blocks.
1008
1009 This differs from the WriteVerifyScript() function: a) it prints different
1010 error messages; b) it doesn't allow half-way updated images to pass the
1011 verification."""
1012
1013 partition = self.partition
1014 script.Print("Verifying %s..." % (partition,))
1015 ranges = self.tgt.care_map
1016 ranges_str = ranges.to_string_raw()
1017 script.AppendExtra(
1018 'range_sha1(%s, "%s") == "%s" && ui_print(" Verified.") || '
1019 'ui_print("%s has unexpected contents.");' % (
1020 self.device, ranges_str,
1021 self.tgt.TotalSha1(include_clobbered_blocks=True),
1022 self.partition))
1023 script.AppendExtra("")
1024
1025 def WriteVerifyScript(self, script, touched_blocks_only=False):
1026 partition = self.partition
1027
1028 # full OTA
1029 if not self.src:
1030 script.Print("Image %s will be patched unconditionally." % (partition,))
1031
1032 # incremental OTA
1033 else:
1034 if touched_blocks_only:
1035 ranges = self.touched_src_ranges
1036 expected_sha1 = self.touched_src_sha1
1037 else:
1038 ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1039 expected_sha1 = self.src.TotalSha1()
1040
1041 # No blocks to be checked, skipping.
1042 if not ranges:
1043 return
1044
1045 ranges_str = ranges.to_string_raw()
1046 script.AppendExtra(
1047 'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
1048 'package_extract_file("%s.transfer.list"), "%s.new.dat", '
1049 '"%s.patch.dat")) then' % (
1050 self.device, ranges_str, expected_sha1,
1051 self.device, partition, partition, partition))
1052 script.Print('Verified %s image...' % (partition,))
1053 script.AppendExtra('else')
1054
1055 if self.version >= 4:
1056
1057 # Bug: 21124327
1058 # When generating incrementals for the system and vendor partitions in
1059 # version 4 or newer, explicitly check the first block (which contains
1060 # the superblock) of the partition to see if it's what we expect. If
1061 # this check fails, give an explicit log message about the partition
1062 # having been remounted R/W (the most likely explanation).
1063 if self.check_first_block:
1064 script.AppendExtra('check_first_block(%s);' % (self.device,))
1065
1066 # If version >= 4, try block recovery before abort update
1067 if partition == "system":
1068 code = ErrorCode.SYSTEM_RECOVER_FAILURE
1069 else:
1070 code = ErrorCode.VENDOR_RECOVER_FAILURE
1071 script.AppendExtra((
1072 'ifelse (block_image_recover({device}, "{ranges}") && '
1073 'block_image_verify({device}, '
1074 'package_extract_file("{partition}.transfer.list"), '
1075 '"{partition}.new.dat", "{partition}.patch.dat"), '
1076 'ui_print("{partition} recovered successfully."), '
1077 'abort("E{code}: {partition} partition fails to recover"));\n'
1078 'endif;').format(device=self.device, ranges=ranges_str,
1079 partition=partition, code=code))
1080
1081 # Abort the OTA update. Note that the incremental OTA cannot be applied
1082 # even if it may match the checksum of the target partition.
1083 # a) If version < 3, operations like move and erase will make changes
1084 # unconditionally and damage the partition.
1085 # b) If version >= 3, it won't even reach here.
1086 else:
1087 if partition == "system":
1088 code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
1089 else:
1090 code = ErrorCode.VENDOR_VERIFICATION_FAILURE
1091 script.AppendExtra((
1092 'abort("E%d: %s partition has unexpected contents");\n'
1093 'endif;') % (code, partition))
1094
1095 def WritePostInstallVerifyScript(self, script):
1096 partition = self.partition
1097 script.Print('Verifying the updated %s image...' % (partition,))
1098 # Unlike pre-install verification, clobbered_blocks should not be ignored.
1099 ranges = self.tgt.care_map
1100 ranges_str = ranges.to_string_raw()
1101 script.AppendExtra(
1102 'if range_sha1(%s, "%s") == "%s" then' % (
1103 self.device, ranges_str,
1104 self.tgt.TotalSha1(include_clobbered_blocks=True)))
1105
1106 # Bug: 20881595
1107 # Verify that extended blocks are really zeroed out.
1108 if self.tgt.extended:
1109 ranges_str = self.tgt.extended.to_string_raw()
1110 script.AppendExtra(
1111 'if range_sha1(%s, "%s") == "%s" then' % (
1112 self.device, ranges_str,
1113 self._HashZeroBlocks(self.tgt.extended.size())))
1114 script.Print('Verified the updated %s image.' % (partition,))
1115 if partition == "system":
1116 code = ErrorCode.SYSTEM_NONZERO_CONTENTS
1117 else:
1118 code = ErrorCode.VENDOR_NONZERO_CONTENTS
1119 script.AppendExtra(
1120 'else\n'
1121 ' abort("E%d: %s partition has unexpected non-zero contents after '
1122 'OTA update");\n'
1123 'endif;' % (code, partition))
1124 else:
1125 script.Print('Verified the updated %s image.' % (partition,))
1126
1127 if partition == "system":
1128 code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
1129 else:
1130 code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
1131
1132 script.AppendExtra(
1133 'else\n'
1134 ' abort("E%d: %s partition has unexpected contents after OTA '
1135 'update");\n'
1136 'endif;' % (code, partition))
1137
1138 def _WriteUpdate(self, script, output_zip):
1139 ZipWrite(output_zip,
1140 '{}.transfer.list'.format(self.path),
1141 '{}.transfer.list'.format(self.partition))
1142
1143 # For full OTA, compress the new.dat with brotli with quality 6 to reduce
1144 # its size. Quailty 9 almost triples the compression time but doesn't
1145 # further reduce the size too much. For a typical 1.8G system.new.dat
1146 # zip | brotli(quality 6) | brotli(quality 9)
1147 # compressed_size: 942M | 869M (~8% reduced) | 854M
1148 # compression_time: 75s | 265s | 719s
1149 # decompression_time: 15s | 25s | 25s
1150
1151 if not self.src:
1152 brotli_cmd = ['brotli', '--quality=6',
1153 '--output={}.new.dat.br'.format(self.path),
1154 '{}.new.dat'.format(self.path)]
1155 print("Compressing {}.new.dat with brotli".format(self.partition))
1156 RunAndCheckOutput(brotli_cmd)
1157
1158 new_data_name = '{}.new.dat.br'.format(self.partition)
1159 ZipWrite(output_zip,
1160 '{}.new.dat.br'.format(self.path),
1161 new_data_name,
1162 compress_type=zipfile.ZIP_STORED)
1163 else:
1164 new_data_name = '{}.new.dat'.format(self.partition)
1165 ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
1166
1167 ZipWrite(output_zip,
1168 '{}.patch.dat'.format(self.path),
1169 '{}.patch.dat'.format(self.partition),
1170 compress_type=zipfile.ZIP_STORED)
1171
1172 if self.partition == "system":
1173 code = ErrorCode.SYSTEM_UPDATE_FAILURE
1174 else:
1175 code = ErrorCode.VENDOR_UPDATE_FAILURE
1176
1177 call = ('block_image_update({device}, '
1178 'package_extract_file("{partition}.transfer.list"), '
1179 '"{new_data_name}", "{partition}.patch.dat") ||\n'
1180 ' abort("E{code}: Failed to update {partition} image.");'.format(
1181 device=self.device, partition=self.partition,
1182 new_data_name=new_data_name, code=code))
1183 script.AppendExtra(script.WordWrap(call))
1184
1185 def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1186 data = source.ReadRangeSet(ranges)
1187 ctx = sha1()
1188
1189 for p in data:
1190 ctx.update(p)
1191
1192 return ctx.hexdigest()
1193
1194 def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1195 """Return the hash value for all zero blocks."""
1196 zero_block = '\x00' * 4096
1197 ctx = sha1()
1198 for _ in range(num_blocks):
1199 ctx.update(zero_block)
1200
1201 return ctx.hexdigest()
1202
1203
1204def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1205 info_dict=None):
1206 """Generates the recovery-from-boot patch and writes the script to output.
1207
1208 Most of the space in the boot and recovery images is just the kernel, which is
1209 identical for the two, so the resulting patch should be efficient. Add it to
1210 the output zip, along with a shell script that is run from init.rc on first
1211 boot to actually do the patching and install the new recovery image.
1212
1213 Args:
1214 input_dir: The top-level input directory of the target-files.zip.
1215 output_sink: The callback function that writes the result.
1216 recovery_img: File object for the recovery image.
1217 boot_img: File objects for the boot image.
1218 info_dict: A dict returned by common.LoadInfoDict() on the input
1219 target_files. Will use OPTIONS.info_dict if None has been given.
1220 """
1221 if info_dict is None:
1222 info_dict = OPTIONS.info_dict
1223
1224 full_recovery_image = info_dict.get("full_recovery_image") == "true"
1225 board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
1226
1227 if board_uses_vendorimage:
1228 # In this case, the output sink is rooted at VENDOR
1229 recovery_img_path = "etc/recovery.img"
1230 recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
1231 sh_dir = "bin"
1232 else:
1233 # In this case the output sink is rooted at SYSTEM
1234 recovery_img_path = "vendor/etc/recovery.img"
1235 recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
1236 sh_dir = "vendor/bin"
1237
1238 if full_recovery_image:
1239 output_sink(recovery_img_path, recovery_img.data)
1240
1241 else:
1242 system_root_image = info_dict.get("system_root_image") == "true"
1243 include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
1244 include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
1245 path = os.path.join(input_dir, recovery_resource_dat_path)
1246 # With system-root-image, boot and recovery images will have mismatching
1247 # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
1248 # to handle such a case.
1249 if system_root_image or include_recovery_dtbo or include_recovery_acpio:
1250 diff_program = ["bsdiff"]
1251 bonus_args = ""
1252 assert not os.path.exists(path)
1253 else:
1254 diff_program = ["imgdiff"]
1255 if os.path.exists(path):
1256 diff_program.append("-b")
1257 diff_program.append(path)
1258 bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
1259 else:
1260 bonus_args = ""
1261
1262 d = Difference(recovery_img, boot_img, diff_program=diff_program)
1263 _, _, patch = d.ComputePatch()
1264 output_sink("recovery-from-boot.p", patch)
1265
1266 try:
1267 # The following GetTypeAndDevice()s need to use the path in the target
1268 # info_dict instead of source_info_dict.
1269 boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
1270 check_no_slot=False)
1271 recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
1272 check_no_slot=False)
1273 except KeyError:
1274 return
1275
1276 if full_recovery_image:
1277
1278 # Note that we use /vendor to refer to the recovery resources. This will
1279 # work for a separate vendor partition mounted at /vendor or a
1280 # /system/vendor subdirectory on the system partition, for which init will
1281 # create a symlink from /vendor to /system/vendor.
1282
1283 sh = """#!/vendor/bin/sh
1284if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
1285 applypatch \\
1286 --flash /vendor/etc/recovery.img \\
1287 --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
1288 log -t recovery "Installing new recovery image: succeeded" || \\
1289 log -t recovery "Installing new recovery image: failed"
1290else
1291 log -t recovery "Recovery image already installed"
1292fi
1293""" % {'type': recovery_type,
1294 'device': recovery_device,
1295 'sha1': recovery_img.sha1,
1296 'size': recovery_img.size}
1297 else:
1298 sh = """#!/vendor/bin/sh
1299if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1300 applypatch %(bonus_args)s \\
1301 --patch /vendor/recovery-from-boot.p \\
1302 --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
1303 --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
1304 log -t recovery "Installing new recovery image: succeeded" || \\
1305 log -t recovery "Installing new recovery image: failed"
1306else
1307 log -t recovery "Recovery image already installed"
1308fi
1309""" % {'boot_size': boot_img.size,
1310 'boot_sha1': boot_img.sha1,
1311 'recovery_size': recovery_img.size,
1312 'recovery_sha1': recovery_img.sha1,
1313 'boot_type': boot_type,
1314 'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
1315 'recovery_type': recovery_type,
1316 'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
1317 'bonus_args': bonus_args}
1318
1319 # The install script location moved from /system/etc to /system/bin in the L
1320 # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
1321 sh_location = os.path.join(sh_dir, "install-recovery.sh")
1322
1323 logger.info("putting script in %s", sh_location)
1324
1325 output_sink(sh_location, sh.encode())
1326
1327
1328class DynamicPartitionUpdate(object):
1329 def __init__(self, src_group=None, tgt_group=None, progress=None,
1330 block_difference=None):
1331 self.src_group = src_group
1332 self.tgt_group = tgt_group
1333 self.progress = progress
1334 self.block_difference = block_difference
1335
1336 @property
1337 def src_size(self):
1338 if not self.block_difference:
1339 return 0
1340 return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
1341
1342 @property
1343 def tgt_size(self):
1344 if not self.block_difference:
1345 return 0
1346 return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
1347
1348 @staticmethod
1349 def _GetSparseImageSize(img):
1350 if not img:
1351 return 0
1352 return img.blocksize * img.total_blocks
1353
1354
1355class DynamicGroupUpdate(object):
1356 def __init__(self, src_size=None, tgt_size=None):
1357 # None: group does not exist. 0: no size limits.
1358 self.src_size = src_size
1359 self.tgt_size = tgt_size
1360
1361
1362class DynamicPartitionsDifference(object):
1363 def __init__(self, info_dict, block_diffs, progress_dict=None,
1364 source_info_dict=None):
1365 if progress_dict is None:
1366 progress_dict = {}
1367
1368 self._remove_all_before_apply = False
1369 if source_info_dict is None:
1370 self._remove_all_before_apply = True
1371 source_info_dict = {}
1372
1373 block_diff_dict = collections.OrderedDict(
1374 [(e.partition, e) for e in block_diffs])
1375
1376 assert len(block_diff_dict) == len(block_diffs), \
1377 "Duplicated BlockDifference object for {}".format(
1378 [partition for partition, count in
1379 collections.Counter(e.partition for e in block_diffs).items()
1380 if count > 1])
1381
1382 self._partition_updates = collections.OrderedDict()
1383
1384 for p, block_diff in block_diff_dict.items():
1385 self._partition_updates[p] = DynamicPartitionUpdate()
1386 self._partition_updates[p].block_difference = block_diff
1387
1388 for p, progress in progress_dict.items():
1389 if p in self._partition_updates:
1390 self._partition_updates[p].progress = progress
1391
1392 tgt_groups = shlex.split(info_dict.get(
1393 "super_partition_groups", "").strip())
1394 src_groups = shlex.split(source_info_dict.get(
1395 "super_partition_groups", "").strip())
1396
1397 for g in tgt_groups:
1398 for p in shlex.split(info_dict.get(
1399 "super_%s_partition_list" % g, "").strip()):
1400 assert p in self._partition_updates, \
1401 "{} is in target super_{}_partition_list but no BlockDifference " \
1402 "object is provided.".format(p, g)
1403 self._partition_updates[p].tgt_group = g
1404
1405 for g in src_groups:
1406 for p in shlex.split(source_info_dict.get(
1407 "super_%s_partition_list" % g, "").strip()):
1408 assert p in self._partition_updates, \
1409 "{} is in source super_{}_partition_list but no BlockDifference " \
1410 "object is provided.".format(p, g)
1411 self._partition_updates[p].src_group = g
1412
1413 target_dynamic_partitions = set(shlex.split(info_dict.get(
1414 "dynamic_partition_list", "").strip()))
1415 block_diffs_with_target = set(p for p, u in self._partition_updates.items()
1416 if u.tgt_size)
1417 assert block_diffs_with_target == target_dynamic_partitions, \
1418 "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
1419 list(target_dynamic_partitions), list(block_diffs_with_target))
1420
1421 source_dynamic_partitions = set(shlex.split(source_info_dict.get(
1422 "dynamic_partition_list", "").strip()))
1423 block_diffs_with_source = set(p for p, u in self._partition_updates.items()
1424 if u.src_size)
1425 assert block_diffs_with_source == source_dynamic_partitions, \
1426 "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
1427 list(source_dynamic_partitions), list(block_diffs_with_source))
1428
1429 if self._partition_updates:
1430 logger.info("Updating dynamic partitions %s",
1431 self._partition_updates.keys())
1432
1433 self._group_updates = collections.OrderedDict()
1434
1435 for g in tgt_groups:
1436 self._group_updates[g] = DynamicGroupUpdate()
1437 self._group_updates[g].tgt_size = int(info_dict.get(
1438 "super_%s_group_size" % g, "0").strip())
1439
1440 for g in src_groups:
1441 if g not in self._group_updates:
1442 self._group_updates[g] = DynamicGroupUpdate()
1443 self._group_updates[g].src_size = int(source_info_dict.get(
1444 "super_%s_group_size" % g, "0").strip())
1445
1446 self._Compute()
1447
1448 def WriteScript(self, script, output_zip, write_verify_script=False):
1449 script.Comment('--- Start patching dynamic partitions ---')
1450 for p, u in self._partition_updates.items():
1451 if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
1452 script.Comment('Patch partition %s' % p)
1453 u.block_difference.WriteScript(script, output_zip, progress=u.progress,
1454 write_verify_script=False)
1455
1456 op_list_path = MakeTempFile()
1457 with open(op_list_path, 'w') as f:
1458 for line in self._op_list:
1459 f.write('{}\n'.format(line))
1460
1461 ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
1462
1463 script.Comment('Update dynamic partition metadata')
1464 script.AppendExtra('assert(update_dynamic_partitions('
1465 'package_extract_file("dynamic_partitions_op_list")));')
1466
1467 if write_verify_script:
1468 for p, u in self._partition_updates.items():
1469 if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
1470 u.block_difference.WritePostInstallVerifyScript(script)
1471 script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
1472
1473 for p, u in self._partition_updates.items():
1474 if u.tgt_size and u.src_size <= u.tgt_size:
1475 script.Comment('Patch partition %s' % p)
1476 u.block_difference.WriteScript(script, output_zip, progress=u.progress,
1477 write_verify_script=write_verify_script)
1478 if write_verify_script:
1479 script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
1480
1481 script.Comment('--- End patching dynamic partitions ---')
1482
1483 def _Compute(self):
1484 self._op_list = list()
1485
1486 def append(line):
1487 self._op_list.append(line)
1488
1489 def comment(line):
1490 self._op_list.append("# %s" % line)
1491
1492 if self._remove_all_before_apply:
1493 comment('Remove all existing dynamic partitions and groups before '
1494 'applying full OTA')
1495 append('remove_all_groups')
1496
1497 for p, u in self._partition_updates.items():
1498 if u.src_group and not u.tgt_group:
1499 append('remove %s' % p)
1500
1501 for p, u in self._partition_updates.items():
1502 if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
1503 comment('Move partition %s from %s to default' % (p, u.src_group))
1504 append('move %s default' % p)
1505
1506 for p, u in self._partition_updates.items():
1507 if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
1508 comment('Shrink partition %s from %d to %d' %
1509 (p, u.src_size, u.tgt_size))
1510 append('resize %s %s' % (p, u.tgt_size))
1511
1512 for g, u in self._group_updates.items():
1513 if u.src_size is not None and u.tgt_size is None:
1514 append('remove_group %s' % g)
1515 if (u.src_size is not None and u.tgt_size is not None and
1516 u.src_size > u.tgt_size):
1517 comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
1518 append('resize_group %s %d' % (g, u.tgt_size))
1519
1520 for g, u in self._group_updates.items():
1521 if u.src_size is None and u.tgt_size is not None:
1522 comment('Add group %s with maximum size %d' % (g, u.tgt_size))
1523 append('add_group %s %d' % (g, u.tgt_size))
1524 if (u.src_size is not None and u.tgt_size is not None and
1525 u.src_size < u.tgt_size):
1526 comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
1527 append('resize_group %s %d' % (g, u.tgt_size))
1528
1529 for p, u in self._partition_updates.items():
1530 if u.tgt_group and not u.src_group:
1531 comment('Add partition %s to group %s' % (p, u.tgt_group))
1532 append('add %s %s' % (p, u.tgt_group))
1533
1534 for p, u in self._partition_updates.items():
1535 if u.tgt_size and u.src_size < u.tgt_size:
1536 comment('Grow partition %s from %d to %d' %
1537 (p, u.src_size, u.tgt_size))
1538 append('resize %s %d' % (p, u.tgt_size))
1539
1540 for p, u in self._partition_updates.items():
1541 if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
1542 comment('Move partition %s from default to %s' %
1543 (p, u.tgt_group))
1544 append('move %s %s' % (p, u.tgt_group))
1545
1546
Abhishek Nigamb148ac22023-11-08 02:19:31 +00001547# map recovery.fstab's fs_types to mount/format "partition types"
1548PARTITION_TYPES = {
1549 "ext4": "EMMC",
1550 "emmc": "EMMC",
1551 "f2fs": "EMMC",
1552 "squashfs": "EMMC",
1553 "erofs": "EMMC"
1554}
1555
1556
Kelvin Zhang513b86e2023-10-27 13:27:07 -07001557def GetTypeAndDevice(mount_point, info, check_no_slot=True):
1558 """
1559 Use GetTypeAndDeviceExpr whenever possible. This function is kept for
1560 backwards compatibility. It aborts if the fstab entry has slotselect option
1561 (unless check_no_slot is explicitly set to False).
1562 """
1563 fstab = info["fstab"]
1564 if fstab:
1565 if check_no_slot:
1566 assert not fstab[mount_point].slotselect, \
1567 "Use GetTypeAndDeviceExpr instead"
1568 return (PARTITION_TYPES[fstab[mount_point].fs_type],
1569 fstab[mount_point].device)
1570 raise KeyError
1571
1572
1573def GetTypeAndDeviceExpr(mount_point, info):
1574 """
1575 Return the filesystem of the partition, and an edify expression that evaluates
1576 to the device at runtime.
1577 """
1578 fstab = info["fstab"]
1579 if fstab:
1580 p = fstab[mount_point]
1581 device_expr = '"%s"' % fstab[mount_point].device
1582 if p.slotselect:
1583 device_expr = 'add_slot_suffix(%s)' % device_expr
1584 return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
1585 raise KeyError