Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 1 | # Copyright (C) 2020 The Android Open Source Project |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | # you may not use this file except in compliance with the License. |
| 5 | # You may obtain a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | # See the License for the specific language governing permissions and |
| 13 | # limitations under the License. |
| 14 | |
| 15 | import copy |
| 16 | import itertools |
Yifan Hong | 125d0b6 | 2020-09-24 17:07:03 -0700 | [diff] [blame] | 17 | import logging |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 18 | import os |
Kelvin Zhang | b9fdf2d | 2022-08-12 14:07:31 -0700 | [diff] [blame] | 19 | import shutil |
Kelvin Zhang | 25ab998 | 2021-06-22 09:51:34 -0400 | [diff] [blame] | 20 | import struct |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 21 | import zipfile |
| 22 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 23 | import ota_metadata_pb2 |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 24 | import common |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 25 | import fnmatch |
| 26 | from common import (ZipDelete, DoesInputFileContain, ReadBytesFromInputFile, OPTIONS, MakeTempFile, |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 27 | ZipWriteStr, BuildInfo, LoadDictionaryFromFile, |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 28 | SignFile, PARTITIONS_WITH_BUILD_PROP, PartitionBuildProps, |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 29 | GetRamdiskFormat, ParseUpdateEngineConfig) |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 30 | from payload_signer import PayloadSigner |
| 31 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 32 | |
Yifan Hong | 125d0b6 | 2020-09-24 17:07:03 -0700 | [diff] [blame] | 33 | logger = logging.getLogger(__name__) |
Kelvin Zhang | 2e41738 | 2020-08-20 11:33:11 -0400 | [diff] [blame] | 34 | |
| 35 | OPTIONS.no_signing = False |
| 36 | OPTIONS.force_non_ab = False |
| 37 | OPTIONS.wipe_user_data = False |
| 38 | OPTIONS.downgrade = False |
| 39 | OPTIONS.key_passwords = {} |
| 40 | OPTIONS.package_key = None |
| 41 | OPTIONS.incremental_source = None |
| 42 | OPTIONS.retrofit_dynamic_partitions = False |
| 43 | OPTIONS.output_metadata_path = None |
| 44 | OPTIONS.boot_variable_file = None |
| 45 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 46 | METADATA_NAME = 'META-INF/com/android/metadata' |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 47 | METADATA_PROTO_NAME = 'META-INF/com/android/metadata.pb' |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 48 | UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*', |
| 49 | 'RADIO/*', '*/build.prop', '*/default.prop', '*/build.default', "*/etc/vintf/*"] |
Kelvin Zhang | 05ff705 | 2021-02-10 09:13:26 -0500 | [diff] [blame] | 50 | SECURITY_PATCH_LEVEL_PROP_NAME = "ro.build.version.security_patch" |
Kelvin Zhang | 2268091 | 2023-05-19 13:12:59 -0700 | [diff] [blame] | 51 | TARGET_FILES_IMAGES_SUBDIR = ["IMAGES", "PREBUILT_IMAGES", "RADIO"] |
Kelvin Zhang | 05ff705 | 2021-02-10 09:13:26 -0500 | [diff] [blame] | 52 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 53 | |
Kelvin Zhang | 8f83000 | 2023-08-16 13:16:48 -0700 | [diff] [blame] | 54 | # Key is the compression algorithm, value is minimum API level required to |
| 55 | # use this compression algorithm for VABC OTA on device. |
| 56 | VABC_COMPRESSION_PARAM_SUPPORT = { |
| 57 | "gz": 31, |
| 58 | "brotli": 31, |
| 59 | "none": 31, |
| 60 | # lz4 support is added in Android U |
| 61 | "lz4": 34, |
| 62 | # zstd support is added in Android V |
| 63 | "zstd": 35, |
| 64 | } |
| 65 | |
| 66 | |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 67 | def FinalizeMetadata(metadata, input_file, output_file, needed_property_files=None, package_key=None, pw=None): |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 68 | """Finalizes the metadata and signs an A/B OTA package. |
| 69 | |
| 70 | In order to stream an A/B OTA package, we need 'ota-streaming-property-files' |
| 71 | that contains the offsets and sizes for the ZIP entries. An example |
| 72 | property-files string is as follows. |
| 73 | |
| 74 | "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379" |
| 75 | |
| 76 | OTA server can pass down this string, in addition to the package URL, to the |
| 77 | system update client. System update client can then fetch individual ZIP |
| 78 | entries (ZIP_STORED) directly at the given offset of the URL. |
| 79 | |
| 80 | Args: |
| 81 | metadata: The metadata dict for the package. |
| 82 | input_file: The input ZIP filename that doesn't contain the package METADATA |
| 83 | entry yet. |
| 84 | output_file: The final output ZIP filename. |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 85 | needed_property_files: The list of PropertyFiles' to be generated. Default is [AbOtaPropertyFiles(), StreamingPropertyFiles()] |
| 86 | package_key: The key used to sign this OTA package |
| 87 | pw: Password for the package_key |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 88 | """ |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 89 | no_signing = package_key is None |
| 90 | |
| 91 | if needed_property_files is None: |
| 92 | # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers |
| 93 | # all the info of the latter. However, system updaters and OTA servers need to |
| 94 | # take time to switch to the new flag. We keep both of the flags for |
| 95 | # P-timeframe, and will remove StreamingPropertyFiles in later release. |
| 96 | needed_property_files = ( |
| 97 | AbOtaPropertyFiles(), |
| 98 | StreamingPropertyFiles(), |
| 99 | ) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 100 | |
| 101 | def ComputeAllPropertyFiles(input_file, needed_property_files): |
| 102 | # Write the current metadata entry with placeholders. |
Kelvin Zhang | 2e1ff6e | 2022-10-10 10:58:57 -0700 | [diff] [blame] | 103 | with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 104 | for property_files in needed_property_files: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 105 | metadata.property_files[property_files.name] = property_files.Compute( |
| 106 | input_zip) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 107 | |
Kelvin Zhang | 2e1ff6e | 2022-10-10 10:58:57 -0700 | [diff] [blame] | 108 | ZipDelete(input_file, [METADATA_NAME, METADATA_PROTO_NAME], True) |
| 109 | with zipfile.ZipFile(input_file, 'a', allowZip64=True) as output_zip: |
| 110 | WriteMetadata(metadata, output_zip) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 111 | |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 112 | if no_signing: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 113 | return input_file |
| 114 | |
| 115 | prelim_signing = MakeTempFile(suffix='.zip') |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 116 | SignOutput(input_file, prelim_signing, package_key, pw) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 117 | return prelim_signing |
| 118 | |
| 119 | def FinalizeAllPropertyFiles(prelim_signing, needed_property_files): |
Kelvin Zhang | 2e1ff6e | 2022-10-10 10:58:57 -0700 | [diff] [blame] | 120 | with zipfile.ZipFile(prelim_signing, 'r', allowZip64=True) as prelim_signing_zip: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 121 | for property_files in needed_property_files: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 122 | metadata.property_files[property_files.name] = property_files.Finalize( |
| 123 | prelim_signing_zip, |
| 124 | len(metadata.property_files[property_files.name])) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 125 | |
| 126 | # SignOutput(), which in turn calls signapk.jar, will possibly reorder the ZIP |
| 127 | # entries, as well as padding the entry headers. We do a preliminary signing |
| 128 | # (with an incomplete metadata entry) to allow that to happen. Then compute |
| 129 | # the ZIP entry offsets, write back the final metadata and do the final |
| 130 | # signing. |
| 131 | prelim_signing = ComputeAllPropertyFiles(input_file, needed_property_files) |
| 132 | try: |
| 133 | FinalizeAllPropertyFiles(prelim_signing, needed_property_files) |
| 134 | except PropertyFiles.InsufficientSpaceException: |
| 135 | # Even with the preliminary signing, the entry orders may change |
| 136 | # dramatically, which leads to insufficiently reserved space during the |
| 137 | # first call to ComputeAllPropertyFiles(). In that case, we redo all the |
| 138 | # preliminary signing works, based on the already ordered ZIP entries, to |
| 139 | # address the issue. |
| 140 | prelim_signing = ComputeAllPropertyFiles( |
| 141 | prelim_signing, needed_property_files) |
| 142 | FinalizeAllPropertyFiles(prelim_signing, needed_property_files) |
| 143 | |
| 144 | # Replace the METADATA entry. |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 145 | ZipDelete(prelim_signing, [METADATA_NAME, METADATA_PROTO_NAME]) |
Kelvin Zhang | 2e1ff6e | 2022-10-10 10:58:57 -0700 | [diff] [blame] | 146 | with zipfile.ZipFile(prelim_signing, 'a', allowZip64=True) as output_zip: |
| 147 | WriteMetadata(metadata, output_zip) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 148 | |
| 149 | # Re-sign the package after updating the metadata entry. |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 150 | if no_signing: |
Kelvin Zhang | f80e886 | 2023-01-20 10:18:11 -0800 | [diff] [blame] | 151 | logger.info(f"Signing disabled for output file {output_file}") |
Kelvin Zhang | b9fdf2d | 2022-08-12 14:07:31 -0700 | [diff] [blame] | 152 | shutil.copy(prelim_signing, output_file) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 153 | else: |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 154 | logger.info( |
| 155 | f"Signing the output file {output_file} with key {package_key}") |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 156 | SignOutput(prelim_signing, output_file, package_key, pw) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 157 | |
| 158 | # Reopen the final signed zip to double check the streaming metadata. |
Kelvin Zhang | 928c234 | 2020-09-22 16:15:57 -0400 | [diff] [blame] | 159 | with zipfile.ZipFile(output_file, allowZip64=True) as output_zip: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 160 | for property_files in needed_property_files: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 161 | property_files.Verify( |
| 162 | output_zip, metadata.property_files[property_files.name].strip()) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 163 | |
| 164 | # If requested, dump the metadata to a separate file. |
| 165 | output_metadata_path = OPTIONS.output_metadata_path |
| 166 | if output_metadata_path: |
| 167 | WriteMetadata(metadata, output_metadata_path) |
| 168 | |
| 169 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 170 | def WriteMetadata(metadata_proto, output): |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 171 | """Writes the metadata to the zip archive or a file. |
| 172 | |
| 173 | Args: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 174 | metadata_proto: The metadata protobuf for the package. |
| 175 | output: A ZipFile object or a string of the output file path. If a string |
| 176 | path is given, the metadata in the protobuf format will be written to |
| 177 | {output}.pb, e.g. ota_metadata.pb |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 178 | """ |
| 179 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 180 | metadata_dict = BuildLegacyOtaMetadata(metadata_proto) |
| 181 | legacy_metadata = "".join(["%s=%s\n" % kv for kv in |
| 182 | sorted(metadata_dict.items())]) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 183 | if isinstance(output, zipfile.ZipFile): |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 184 | ZipWriteStr(output, METADATA_PROTO_NAME, metadata_proto.SerializeToString(), |
| 185 | compress_type=zipfile.ZIP_STORED) |
| 186 | ZipWriteStr(output, METADATA_NAME, legacy_metadata, |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 187 | compress_type=zipfile.ZIP_STORED) |
| 188 | return |
| 189 | |
Cole Faust | b820bcd | 2021-10-28 13:59:48 -0700 | [diff] [blame] | 190 | with open('{}.pb'.format(output), 'wb') as f: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 191 | f.write(metadata_proto.SerializeToString()) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 192 | with open(output, 'w') as f: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 193 | f.write(legacy_metadata) |
| 194 | |
| 195 | |
| 196 | def UpdateDeviceState(device_state, build_info, boot_variable_values, |
| 197 | is_post_build): |
| 198 | """Update the fields of the DeviceState proto with build info.""" |
| 199 | |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 200 | def UpdatePartitionStates(partition_states): |
| 201 | """Update the per-partition state according to its build.prop""" |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 202 | if not build_info.is_ab: |
| 203 | return |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 204 | build_info_set = ComputeRuntimeBuildInfos(build_info, |
| 205 | boot_variable_values) |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 206 | assert "ab_partitions" in build_info.info_dict,\ |
Kelvin Zhang | 05ff705 | 2021-02-10 09:13:26 -0500 | [diff] [blame] | 207 | "ab_partitions property required for ab update." |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 208 | ab_partitions = set(build_info.info_dict.get("ab_partitions")) |
| 209 | |
| 210 | # delta_generator will error out on unused timestamps, |
| 211 | # so only generate timestamps for dynamic partitions |
| 212 | # used in OTA update. |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 213 | for partition in sorted(set(PARTITIONS_WITH_BUILD_PROP) & ab_partitions): |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 214 | partition_prop = build_info.info_dict.get( |
| 215 | '{}.build.prop'.format(partition)) |
| 216 | # Skip if the partition is missing, or it doesn't have a build.prop |
| 217 | if not partition_prop or not partition_prop.build_props: |
| 218 | continue |
| 219 | |
| 220 | partition_state = partition_states.add() |
| 221 | partition_state.partition_name = partition |
| 222 | # Update the partition's runtime device names and fingerprints |
| 223 | partition_devices = set() |
| 224 | partition_fingerprints = set() |
| 225 | for runtime_build_info in build_info_set: |
| 226 | partition_devices.add( |
| 227 | runtime_build_info.GetPartitionBuildProp('ro.product.device', |
| 228 | partition)) |
| 229 | partition_fingerprints.add( |
| 230 | runtime_build_info.GetPartitionFingerprint(partition)) |
| 231 | |
| 232 | partition_state.device.extend(sorted(partition_devices)) |
| 233 | partition_state.build.extend(sorted(partition_fingerprints)) |
| 234 | |
| 235 | # TODO(xunchang) set the boot image's version with kmi. Note the boot |
| 236 | # image doesn't have a file map. |
| 237 | partition_state.version = build_info.GetPartitionBuildProp( |
| 238 | 'ro.build.date.utc', partition) |
| 239 | |
| 240 | # TODO(xunchang), we can save a call to ComputeRuntimeBuildInfos. |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 241 | build_devices, build_fingerprints = \ |
| 242 | CalculateRuntimeDevicesAndFingerprints(build_info, boot_variable_values) |
| 243 | device_state.device.extend(sorted(build_devices)) |
| 244 | device_state.build.extend(sorted(build_fingerprints)) |
| 245 | device_state.build_incremental = build_info.GetBuildProp( |
| 246 | 'ro.build.version.incremental') |
| 247 | |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 248 | UpdatePartitionStates(device_state.partition_state) |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 249 | |
| 250 | if is_post_build: |
| 251 | device_state.sdk_level = build_info.GetBuildProp( |
| 252 | 'ro.build.version.sdk') |
| 253 | device_state.security_patch_level = build_info.GetBuildProp( |
| 254 | 'ro.build.version.security_patch') |
| 255 | # Use the actual post-timestamp, even for a downgrade case. |
| 256 | device_state.timestamp = int(build_info.GetBuildProp('ro.build.date.utc')) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 257 | |
| 258 | |
| 259 | def GetPackageMetadata(target_info, source_info=None): |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 260 | """Generates and returns the metadata proto. |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 261 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 262 | It generates a ota_metadata protobuf that contains the info to be written |
| 263 | into an OTA package (META-INF/com/android/metadata.pb). It also handles the |
| 264 | detection of downgrade / data wipe based on the global options. |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 265 | |
| 266 | Args: |
| 267 | target_info: The BuildInfo instance that holds the target build info. |
| 268 | source_info: The BuildInfo instance that holds the source build info, or |
| 269 | None if generating full OTA. |
| 270 | |
| 271 | Returns: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 272 | A protobuf to be written into package metadata entry. |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 273 | """ |
| 274 | assert isinstance(target_info, BuildInfo) |
| 275 | assert source_info is None or isinstance(source_info, BuildInfo) |
| 276 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 277 | boot_variable_values = {} |
| 278 | if OPTIONS.boot_variable_file: |
| 279 | d = LoadDictionaryFromFile(OPTIONS.boot_variable_file) |
| 280 | for key, values in d.items(): |
| 281 | boot_variable_values[key] = [val.strip() for val in values.split(',')] |
| 282 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 283 | metadata_proto = ota_metadata_pb2.OtaMetadata() |
| 284 | # TODO(xunchang) some fields, e.g. post-device isn't necessary. We can |
| 285 | # consider skipping them if they aren't used by clients. |
| 286 | UpdateDeviceState(metadata_proto.postcondition, target_info, |
| 287 | boot_variable_values, True) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 288 | |
| 289 | if target_info.is_ab and not OPTIONS.force_non_ab: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 290 | metadata_proto.type = ota_metadata_pb2.OtaMetadata.AB |
| 291 | metadata_proto.required_cache = 0 |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 292 | else: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 293 | metadata_proto.type = ota_metadata_pb2.OtaMetadata.BLOCK |
| 294 | # cache requirement will be updated by the non-A/B codes. |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 295 | |
| 296 | if OPTIONS.wipe_user_data: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 297 | metadata_proto.wipe = True |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 298 | |
| 299 | if OPTIONS.retrofit_dynamic_partitions: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 300 | metadata_proto.retrofit_dynamic_partitions = True |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 301 | |
| 302 | is_incremental = source_info is not None |
| 303 | if is_incremental: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 304 | UpdateDeviceState(metadata_proto.precondition, source_info, |
| 305 | boot_variable_values, False) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 306 | else: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 307 | metadata_proto.precondition.device.extend( |
| 308 | metadata_proto.postcondition.device) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 309 | |
| 310 | # Detect downgrades and set up downgrade flags accordingly. |
| 311 | if is_incremental: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 312 | HandleDowngradeMetadata(metadata_proto, target_info, source_info) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 313 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 314 | return metadata_proto |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 315 | |
| 316 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 317 | def BuildLegacyOtaMetadata(metadata_proto): |
| 318 | """Converts the metadata proto to a legacy metadata dict. |
| 319 | |
| 320 | This metadata dict is used to build the legacy metadata text file for |
| 321 | backward compatibility. We won't add new keys to the legacy metadata format. |
| 322 | If new information is needed, we should add it as a new field in OtaMetadata |
| 323 | proto definition. |
| 324 | """ |
| 325 | |
| 326 | separator = '|' |
| 327 | |
| 328 | metadata_dict = {} |
| 329 | if metadata_proto.type == ota_metadata_pb2.OtaMetadata.AB: |
| 330 | metadata_dict['ota-type'] = 'AB' |
| 331 | elif metadata_proto.type == ota_metadata_pb2.OtaMetadata.BLOCK: |
| 332 | metadata_dict['ota-type'] = 'BLOCK' |
| 333 | if metadata_proto.wipe: |
| 334 | metadata_dict['ota-wipe'] = 'yes' |
| 335 | if metadata_proto.retrofit_dynamic_partitions: |
| 336 | metadata_dict['ota-retrofit-dynamic-partitions'] = 'yes' |
| 337 | if metadata_proto.downgrade: |
| 338 | metadata_dict['ota-downgrade'] = 'yes' |
| 339 | |
| 340 | metadata_dict['ota-required-cache'] = str(metadata_proto.required_cache) |
| 341 | |
| 342 | post_build = metadata_proto.postcondition |
| 343 | metadata_dict['post-build'] = separator.join(post_build.build) |
| 344 | metadata_dict['post-build-incremental'] = post_build.build_incremental |
| 345 | metadata_dict['post-sdk-level'] = post_build.sdk_level |
| 346 | metadata_dict['post-security-patch-level'] = post_build.security_patch_level |
| 347 | metadata_dict['post-timestamp'] = str(post_build.timestamp) |
| 348 | |
| 349 | pre_build = metadata_proto.precondition |
| 350 | metadata_dict['pre-device'] = separator.join(pre_build.device) |
| 351 | # incremental updates |
| 352 | if len(pre_build.build) != 0: |
| 353 | metadata_dict['pre-build'] = separator.join(pre_build.build) |
| 354 | metadata_dict['pre-build-incremental'] = pre_build.build_incremental |
| 355 | |
Kelvin Zhang | 05ff705 | 2021-02-10 09:13:26 -0500 | [diff] [blame] | 356 | if metadata_proto.spl_downgrade: |
| 357 | metadata_dict['spl-downgrade'] = 'yes' |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 358 | metadata_dict.update(metadata_proto.property_files) |
| 359 | |
| 360 | return metadata_dict |
| 361 | |
| 362 | |
| 363 | def HandleDowngradeMetadata(metadata_proto, target_info, source_info): |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 364 | # Only incremental OTAs are allowed to reach here. |
| 365 | assert OPTIONS.incremental_source is not None |
| 366 | |
| 367 | post_timestamp = target_info.GetBuildProp("ro.build.date.utc") |
| 368 | pre_timestamp = source_info.GetBuildProp("ro.build.date.utc") |
| 369 | is_downgrade = int(post_timestamp) < int(pre_timestamp) |
| 370 | |
Kelvin Zhang | 05ff705 | 2021-02-10 09:13:26 -0500 | [diff] [blame] | 371 | if OPTIONS.spl_downgrade: |
| 372 | metadata_proto.spl_downgrade = True |
| 373 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 374 | if OPTIONS.downgrade: |
| 375 | if not is_downgrade: |
| 376 | raise RuntimeError( |
| 377 | "--downgrade or --override_timestamp specified but no downgrade " |
| 378 | "detected: pre: %s, post: %s" % (pre_timestamp, post_timestamp)) |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 379 | metadata_proto.downgrade = True |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 380 | else: |
| 381 | if is_downgrade: |
| 382 | raise RuntimeError( |
| 383 | "Downgrade detected based on timestamp check: pre: %s, post: %s. " |
| 384 | "Need to specify --override_timestamp OR --downgrade to allow " |
| 385 | "building the incremental." % (pre_timestamp, post_timestamp)) |
| 386 | |
| 387 | |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 388 | def ComputeRuntimeBuildInfos(default_build_info, boot_variable_values): |
| 389 | """Returns a set of build info objects that may exist during runtime.""" |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 390 | |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 391 | build_info_set = {default_build_info} |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 392 | if not boot_variable_values: |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 393 | return build_info_set |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 394 | |
| 395 | # Calculate all possible combinations of the values for the boot variables. |
| 396 | keys = boot_variable_values.keys() |
| 397 | value_list = boot_variable_values.values() |
| 398 | combinations = [dict(zip(keys, values)) |
| 399 | for values in itertools.product(*value_list)] |
| 400 | for placeholder_values in combinations: |
| 401 | # Reload the info_dict as some build properties may change their values |
| 402 | # based on the value of ro.boot* properties. |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 403 | info_dict = copy.deepcopy(default_build_info.info_dict) |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 404 | for partition in PARTITIONS_WITH_BUILD_PROP: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 405 | partition_prop_key = "{}.build.prop".format(partition) |
| 406 | input_file = info_dict[partition_prop_key].input_file |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 407 | ramdisk = GetRamdiskFormat(info_dict) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 408 | if isinstance(input_file, zipfile.ZipFile): |
Kelvin Zhang | 928c234 | 2020-09-22 16:15:57 -0400 | [diff] [blame] | 409 | with zipfile.ZipFile(input_file.filename, allowZip64=True) as input_zip: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 410 | info_dict[partition_prop_key] = \ |
| 411 | PartitionBuildProps.FromInputFile(input_zip, partition, |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 412 | placeholder_values, |
| 413 | ramdisk) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 414 | else: |
| 415 | info_dict[partition_prop_key] = \ |
| 416 | PartitionBuildProps.FromInputFile(input_file, partition, |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 417 | placeholder_values, |
| 418 | ramdisk) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 419 | info_dict["build.prop"] = info_dict["system.build.prop"] |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 420 | build_info_set.add(BuildInfo(info_dict, default_build_info.oem_dicts)) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 421 | |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 422 | return build_info_set |
| 423 | |
| 424 | |
| 425 | def CalculateRuntimeDevicesAndFingerprints(default_build_info, |
| 426 | boot_variable_values): |
| 427 | """Returns a tuple of sets for runtime devices and fingerprints""" |
| 428 | |
| 429 | device_names = set() |
| 430 | fingerprints = set() |
| 431 | build_info_set = ComputeRuntimeBuildInfos(default_build_info, |
| 432 | boot_variable_values) |
| 433 | for runtime_build_info in build_info_set: |
| 434 | device_names.add(runtime_build_info.device) |
| 435 | fingerprints.add(runtime_build_info.fingerprint) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 436 | return device_names, fingerprints |
| 437 | |
| 438 | |
Kelvin Zhang | 25ab998 | 2021-06-22 09:51:34 -0400 | [diff] [blame] | 439 | def GetZipEntryOffset(zfp, entry_info): |
| 440 | """Get offset to a beginning of a particular zip entry |
| 441 | Args: |
| 442 | fp: zipfile.ZipFile |
| 443 | entry_info: zipfile.ZipInfo |
| 444 | |
| 445 | Returns: |
| 446 | (offset, size) tuple |
| 447 | """ |
| 448 | # Don't use len(entry_info.extra). Because that returns size of extra |
| 449 | # fields in central directory. We need to look at local file directory, |
| 450 | # as these two might have different sizes. |
| 451 | |
| 452 | # We cannot work with zipfile.ZipFile instances, we need a |fp| for the underlying file. |
| 453 | zfp = zfp.fp |
| 454 | zfp.seek(entry_info.header_offset) |
| 455 | data = zfp.read(zipfile.sizeFileHeader) |
| 456 | fheader = struct.unpack(zipfile.structFileHeader, data) |
| 457 | # Last two fields of local file header are filename length and |
| 458 | # extra length |
| 459 | filename_len = fheader[-2] |
| 460 | extra_len = fheader[-1] |
| 461 | offset = entry_info.header_offset |
| 462 | offset += zipfile.sizeFileHeader |
| 463 | offset += filename_len + extra_len |
| 464 | size = entry_info.file_size |
| 465 | return (offset, size) |
| 466 | |
| 467 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 468 | class PropertyFiles(object): |
| 469 | """A class that computes the property-files string for an OTA package. |
| 470 | |
| 471 | A property-files string is a comma-separated string that contains the |
| 472 | offset/size info for an OTA package. The entries, which must be ZIP_STORED, |
| 473 | can be fetched directly with the package URL along with the offset/size info. |
| 474 | These strings can be used for streaming A/B OTAs, or allowing an updater to |
| 475 | download package metadata entry directly, without paying the cost of |
| 476 | downloading entire package. |
| 477 | |
| 478 | Computing the final property-files string requires two passes. Because doing |
| 479 | the whole package signing (with signapk.jar) will possibly reorder the ZIP |
| 480 | entries, which may in turn invalidate earlier computed ZIP entry offset/size |
| 481 | values. |
| 482 | |
| 483 | This class provides functions to be called for each pass. The general flow is |
| 484 | as follows. |
| 485 | |
| 486 | property_files = PropertyFiles() |
| 487 | # The first pass, which writes placeholders before doing initial signing. |
| 488 | property_files.Compute() |
| 489 | SignOutput() |
| 490 | |
| 491 | # The second pass, by replacing the placeholders with actual data. |
| 492 | property_files.Finalize() |
| 493 | SignOutput() |
| 494 | |
| 495 | And the caller can additionally verify the final result. |
| 496 | |
| 497 | property_files.Verify() |
| 498 | """ |
| 499 | |
| 500 | def __init__(self): |
| 501 | self.name = None |
| 502 | self.required = () |
| 503 | self.optional = () |
| 504 | |
| 505 | def Compute(self, input_zip): |
| 506 | """Computes and returns a property-files string with placeholders. |
| 507 | |
| 508 | We reserve extra space for the offset and size of the metadata entry itself, |
| 509 | although we don't know the final values until the package gets signed. |
| 510 | |
| 511 | Args: |
| 512 | input_zip: The input ZIP file. |
| 513 | |
| 514 | Returns: |
| 515 | A string with placeholders for the metadata offset/size info, e.g. |
| 516 | "payload.bin:679:343,payload_properties.txt:378:45,metadata: ". |
| 517 | """ |
| 518 | return self.GetPropertyFilesString(input_zip, reserve_space=True) |
| 519 | |
| 520 | class InsufficientSpaceException(Exception): |
| 521 | pass |
| 522 | |
| 523 | def Finalize(self, input_zip, reserved_length): |
| 524 | """Finalizes a property-files string with actual METADATA offset/size info. |
| 525 | |
| 526 | The input ZIP file has been signed, with the ZIP entries in the desired |
| 527 | place (signapk.jar will possibly reorder the ZIP entries). Now we compute |
| 528 | the ZIP entry offsets and construct the property-files string with actual |
| 529 | data. Note that during this process, we must pad the property-files string |
| 530 | to the reserved length, so that the METADATA entry size remains the same. |
| 531 | Otherwise the entries' offsets and sizes may change again. |
| 532 | |
| 533 | Args: |
| 534 | input_zip: The input ZIP file. |
| 535 | reserved_length: The reserved length of the property-files string during |
| 536 | the call to Compute(). The final string must be no more than this |
| 537 | size. |
| 538 | |
| 539 | Returns: |
| 540 | A property-files string including the metadata offset/size info, e.g. |
| 541 | "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379 ". |
| 542 | |
| 543 | Raises: |
| 544 | InsufficientSpaceException: If the reserved length is insufficient to hold |
| 545 | the final string. |
| 546 | """ |
| 547 | result = self.GetPropertyFilesString(input_zip, reserve_space=False) |
| 548 | if len(result) > reserved_length: |
| 549 | raise self.InsufficientSpaceException( |
| 550 | 'Insufficient reserved space: reserved={}, actual={}'.format( |
| 551 | reserved_length, len(result))) |
| 552 | |
| 553 | result += ' ' * (reserved_length - len(result)) |
| 554 | return result |
| 555 | |
| 556 | def Verify(self, input_zip, expected): |
| 557 | """Verifies the input ZIP file contains the expected property-files string. |
| 558 | |
| 559 | Args: |
| 560 | input_zip: The input ZIP file. |
| 561 | expected: The property-files string that's computed from Finalize(). |
| 562 | |
| 563 | Raises: |
| 564 | AssertionError: On finding a mismatch. |
| 565 | """ |
| 566 | actual = self.GetPropertyFilesString(input_zip) |
| 567 | assert actual == expected, \ |
| 568 | "Mismatching streaming metadata: {} vs {}.".format(actual, expected) |
| 569 | |
| 570 | def GetPropertyFilesString(self, zip_file, reserve_space=False): |
| 571 | """ |
| 572 | Constructs the property-files string per request. |
| 573 | |
| 574 | Args: |
| 575 | zip_file: The input ZIP file. |
| 576 | reserved_length: The reserved length of the property-files string. |
| 577 | |
| 578 | Returns: |
| 579 | A property-files string including the metadata offset/size info, e.g. |
| 580 | "payload.bin:679:343,payload_properties.txt:378:45,metadata: ". |
| 581 | """ |
| 582 | |
| 583 | def ComputeEntryOffsetSize(name): |
| 584 | """Computes the zip entry offset and size.""" |
| 585 | info = zip_file.getinfo(name) |
Kelvin Zhang | 25ab998 | 2021-06-22 09:51:34 -0400 | [diff] [blame] | 586 | (offset, size) = GetZipEntryOffset(zip_file, info) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 587 | return '%s:%d:%d' % (os.path.basename(name), offset, size) |
| 588 | |
| 589 | tokens = [] |
| 590 | tokens.extend(self._GetPrecomputed(zip_file)) |
| 591 | for entry in self.required: |
| 592 | tokens.append(ComputeEntryOffsetSize(entry)) |
| 593 | for entry in self.optional: |
| 594 | if entry in zip_file.namelist(): |
| 595 | tokens.append(ComputeEntryOffsetSize(entry)) |
| 596 | |
| 597 | # 'META-INF/com/android/metadata' is required. We don't know its actual |
| 598 | # offset and length (as well as the values for other entries). So we reserve |
| 599 | # 15-byte as a placeholder ('offset:length'), which is sufficient to cover |
| 600 | # the space for metadata entry. Because 'offset' allows a max of 10-digit |
| 601 | # (i.e. ~9 GiB), with a max of 4-digit for the length. Note that all the |
| 602 | # reserved space serves the metadata entry only. |
| 603 | if reserve_space: |
| 604 | tokens.append('metadata:' + ' ' * 15) |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 605 | tokens.append('metadata.pb:' + ' ' * 15) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 606 | else: |
| 607 | tokens.append(ComputeEntryOffsetSize(METADATA_NAME)) |
Luca Stefani | b6075c5 | 2021-11-03 17:10:54 +0100 | [diff] [blame] | 608 | if METADATA_PROTO_NAME in zip_file.namelist(): |
Kelvin Zhang | 2e1ff6e | 2022-10-10 10:58:57 -0700 | [diff] [blame] | 609 | tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME)) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 610 | |
| 611 | return ','.join(tokens) |
| 612 | |
| 613 | def _GetPrecomputed(self, input_zip): |
| 614 | """Computes the additional tokens to be included into the property-files. |
| 615 | |
| 616 | This applies to tokens without actual ZIP entries, such as |
| 617 | payload_metadata.bin. We want to expose the offset/size to updaters, so |
| 618 | that they can download the payload metadata directly with the info. |
| 619 | |
| 620 | Args: |
| 621 | input_zip: The input zip file. |
| 622 | |
| 623 | Returns: |
| 624 | A list of strings (tokens) to be added to the property-files string. |
| 625 | """ |
| 626 | # pylint: disable=no-self-use |
| 627 | # pylint: disable=unused-argument |
| 628 | return [] |
| 629 | |
| 630 | |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 631 | def SignOutput(temp_zip_name, output_zip_name, package_key=None, pw=None): |
| 632 | if package_key is None: |
| 633 | package_key = OPTIONS.package_key |
| 634 | if pw is None and OPTIONS.key_passwords: |
| 635 | pw = OPTIONS.key_passwords[package_key] |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 636 | |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 637 | SignFile(temp_zip_name, output_zip_name, package_key, pw, |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 638 | whole_file=True) |
Tianjie | a5fca03 | 2021-06-01 22:06:28 -0700 | [diff] [blame] | 639 | |
| 640 | |
| 641 | def ConstructOtaApexInfo(target_zip, source_file=None): |
| 642 | """If applicable, add the source version to the apex info.""" |
| 643 | |
| 644 | def _ReadApexInfo(input_zip): |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 645 | if not DoesInputFileContain(input_zip, "META/apex_info.pb"): |
Tianjie | a5fca03 | 2021-06-01 22:06:28 -0700 | [diff] [blame] | 646 | logger.warning("target_file doesn't contain apex_info.pb %s", input_zip) |
| 647 | return None |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 648 | return ReadBytesFromInputFile(input_zip, "META/apex_info.pb") |
Tianjie | a5fca03 | 2021-06-01 22:06:28 -0700 | [diff] [blame] | 649 | |
| 650 | target_apex_string = _ReadApexInfo(target_zip) |
| 651 | # Return early if the target apex info doesn't exist or is empty. |
| 652 | if not target_apex_string: |
| 653 | return target_apex_string |
| 654 | |
| 655 | # If the source apex info isn't available, just return the target info |
| 656 | if not source_file: |
| 657 | return target_apex_string |
| 658 | |
Kelvin Zhang | ca45d7a | 2023-04-21 09:46:47 -0700 | [diff] [blame] | 659 | source_apex_string = _ReadApexInfo(source_file) |
Tianjie | a5fca03 | 2021-06-01 22:06:28 -0700 | [diff] [blame] | 660 | if not source_apex_string: |
| 661 | return target_apex_string |
| 662 | |
| 663 | source_apex_proto = ota_metadata_pb2.ApexMetadata() |
| 664 | source_apex_proto.ParseFromString(source_apex_string) |
| 665 | source_apex_versions = {apex.package_name: apex.version for apex in |
| 666 | source_apex_proto.apex_info} |
| 667 | |
| 668 | # If the apex package is available in the source build, initialize the source |
| 669 | # apex version. |
| 670 | target_apex_proto = ota_metadata_pb2.ApexMetadata() |
| 671 | target_apex_proto.ParseFromString(target_apex_string) |
| 672 | for target_apex in target_apex_proto.apex_info: |
| 673 | name = target_apex.package_name |
| 674 | if name in source_apex_versions: |
| 675 | target_apex.source_version = source_apex_versions[name] |
| 676 | |
| 677 | return target_apex_proto.SerializeToString() |
Kelvin Zhang | 410bb38 | 2022-01-06 09:15:54 -0800 | [diff] [blame] | 678 | |
| 679 | |
Kelvin Zhang | f2728d6 | 2022-01-10 11:42:36 -0800 | [diff] [blame] | 680 | def IsLz4diffCompatible(source_file: str, target_file: str): |
| 681 | """Check whether lz4diff versions in two builds are compatible |
| 682 | |
| 683 | Args: |
| 684 | source_file: Path to source build's target_file.zip |
| 685 | target_file: Path to target build's target_file.zip |
| 686 | |
| 687 | Returns: |
| 688 | bool true if and only if lz4diff versions are compatible |
| 689 | """ |
| 690 | if source_file is None or target_file is None: |
| 691 | return False |
| 692 | # Right now we enable lz4diff as long as source build has liblz4.so. |
| 693 | # In the future we might introduce version system to lz4diff as well. |
| 694 | if zipfile.is_zipfile(source_file): |
| 695 | with zipfile.ZipFile(source_file, "r") as zfp: |
| 696 | return "META/liblz4.so" in zfp.namelist() |
| 697 | else: |
| 698 | assert os.path.isdir(source_file) |
| 699 | return os.path.exists(os.path.join(source_file, "META", "liblz4.so")) |
| 700 | |
| 701 | |
Kelvin Zhang | 410bb38 | 2022-01-06 09:15:54 -0800 | [diff] [blame] | 702 | def IsZucchiniCompatible(source_file: str, target_file: str): |
| 703 | """Check whether zucchini versions in two builds are compatible |
| 704 | |
| 705 | Args: |
| 706 | source_file: Path to source build's target_file.zip |
| 707 | target_file: Path to target build's target_file.zip |
| 708 | |
| 709 | Returns: |
| 710 | bool true if and only if zucchini versions are compatible |
| 711 | """ |
| 712 | if source_file is None or target_file is None: |
| 713 | return False |
| 714 | assert os.path.exists(source_file) |
| 715 | assert os.path.exists(target_file) |
| 716 | |
| 717 | assert zipfile.is_zipfile(source_file) or os.path.isdir(source_file) |
| 718 | assert zipfile.is_zipfile(target_file) or os.path.isdir(target_file) |
| 719 | _ZUCCHINI_CONFIG_ENTRY_NAME = "META/zucchini_config.txt" |
| 720 | |
| 721 | def ReadEntry(path, entry): |
| 722 | # Read an entry inside a .zip file or extracted dir of .zip file |
| 723 | if zipfile.is_zipfile(path): |
| 724 | with zipfile.ZipFile(path, "r", allowZip64=True) as zfp: |
| 725 | if entry in zfp.namelist(): |
| 726 | return zfp.read(entry).decode() |
| 727 | else: |
Zhou Xuezan | d0d49f5 | 2022-09-14 16:26:55 +0800 | [diff] [blame] | 728 | entry_path = os.path.join(path, entry) |
Kelvin Zhang | 410bb38 | 2022-01-06 09:15:54 -0800 | [diff] [blame] | 729 | if os.path.exists(entry_path): |
| 730 | with open(entry_path, "r") as fp: |
| 731 | return fp.read() |
HÃ¥kan Kvist | 3db1ef6 | 2022-05-03 10:19:41 +0200 | [diff] [blame] | 732 | return False |
| 733 | sourceEntry = ReadEntry(source_file, _ZUCCHINI_CONFIG_ENTRY_NAME) |
| 734 | targetEntry = ReadEntry(target_file, _ZUCCHINI_CONFIG_ENTRY_NAME) |
| 735 | return sourceEntry and targetEntry and sourceEntry == targetEntry |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 736 | |
| 737 | |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 738 | def ExtractTargetFiles(path: str): |
| 739 | if os.path.isdir(path): |
| 740 | logger.info("target files %s is already extracted", path) |
| 741 | return path |
| 742 | extracted_dir = common.MakeTempDir("target_files") |
Kelvin Zhang | 8f83000 | 2023-08-16 13:16:48 -0700 | [diff] [blame] | 743 | logger.info(f"Extracting target files {path} to {extracted_dir}") |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 744 | common.UnzipToDir(path, extracted_dir, UNZIP_PATTERN + [""]) |
Kelvin Zhang | 2268091 | 2023-05-19 13:12:59 -0700 | [diff] [blame] | 745 | for subdir in TARGET_FILES_IMAGES_SUBDIR: |
| 746 | image_dir = os.path.join(extracted_dir, subdir) |
| 747 | if not os.path.exists(image_dir): |
| 748 | continue |
| 749 | for filename in os.listdir(image_dir): |
| 750 | if not filename.endswith(".img"): |
| 751 | continue |
| 752 | common.UnsparseImage(os.path.join(image_dir, filename)) |
| 753 | |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 754 | return extracted_dir |
| 755 | |
| 756 | |
| 757 | def LocatePartitionPath(target_files_dir: str, partition: str, allow_empty): |
| 758 | path = os.path.join(target_files_dir, "RADIO", partition + ".img") |
| 759 | if os.path.exists(path): |
| 760 | return path |
| 761 | path = os.path.join(target_files_dir, "IMAGES", partition + ".img") |
| 762 | if os.path.exists(path): |
| 763 | return path |
| 764 | if allow_empty: |
| 765 | return "" |
| 766 | raise common.ExternalError( |
| 767 | "Partition {} not found in target files {}".format(partition, target_files_dir)) |
| 768 | |
| 769 | |
| 770 | def GetPartitionImages(target_files_dir: str, ab_partitions, allow_empty=True): |
| 771 | assert os.path.isdir(target_files_dir) |
| 772 | return ":".join([LocatePartitionPath(target_files_dir, partition, allow_empty) for partition in ab_partitions]) |
| 773 | |
| 774 | |
| 775 | def LocatePartitionMap(target_files_dir: str, partition: str): |
| 776 | path = os.path.join(target_files_dir, "RADIO", partition + ".map") |
| 777 | if os.path.exists(path): |
| 778 | return path |
Kelvin Zhang | b789e84 | 2023-06-13 10:34:32 -0700 | [diff] [blame] | 779 | path = os.path.join(target_files_dir, "IMAGES", partition + ".map") |
| 780 | if os.path.exists(path): |
| 781 | return path |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 782 | return "" |
| 783 | |
| 784 | |
| 785 | def GetPartitionMaps(target_files_dir: str, ab_partitions): |
| 786 | assert os.path.isdir(target_files_dir) |
| 787 | return ":".join([LocatePartitionMap(target_files_dir, partition) for partition in ab_partitions]) |
| 788 | |
| 789 | |
Kelvin Zhang | fa92869 | 2022-08-16 17:01:53 +0000 | [diff] [blame] | 790 | class PayloadGenerator(object): |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 791 | """Manages the creation and the signing of an A/B OTA Payload.""" |
| 792 | |
| 793 | PAYLOAD_BIN = 'payload.bin' |
| 794 | PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt' |
| 795 | SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin' |
| 796 | SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt' |
| 797 | |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 798 | def __init__(self, secondary=False, wipe_user_data=False, minor_version=None, is_partial_update=False): |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 799 | """Initializes a Payload instance. |
| 800 | |
| 801 | Args: |
| 802 | secondary: Whether it's generating a secondary payload (default: False). |
| 803 | """ |
| 804 | self.payload_file = None |
| 805 | self.payload_properties = None |
| 806 | self.secondary = secondary |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 807 | self.wipe_user_data = wipe_user_data |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 808 | self.minor_version = minor_version |
| 809 | self.is_partial_update = is_partial_update |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 810 | |
| 811 | def _Run(self, cmd): # pylint: disable=no-self-use |
| 812 | # Don't pipe (buffer) the output if verbose is set. Let |
| 813 | # brillo_update_payload write to stdout/stderr directly, so its progress can |
| 814 | # be monitored. |
| 815 | if OPTIONS.verbose: |
| 816 | common.RunAndCheckOutput(cmd, stdout=None, stderr=None) |
| 817 | else: |
| 818 | common.RunAndCheckOutput(cmd) |
| 819 | |
| 820 | def Generate(self, target_file, source_file=None, additional_args=None): |
| 821 | """Generates a payload from the given target-files zip(s). |
| 822 | |
| 823 | Args: |
| 824 | target_file: The filename of the target build target-files zip. |
| 825 | source_file: The filename of the source build target-files zip; or None if |
| 826 | generating a full OTA. |
| 827 | additional_args: A list of additional args that should be passed to |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 828 | delta_generator binary; or None. |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 829 | """ |
| 830 | if additional_args is None: |
| 831 | additional_args = [] |
| 832 | |
| 833 | payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin") |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 834 | target_dir = ExtractTargetFiles(target_file) |
| 835 | cmd = ["delta_generator", |
| 836 | "--out_file", payload_file] |
Kelvin Zhang | 89b87f6 | 2023-06-01 10:23:05 -0700 | [diff] [blame] | 837 | with open(os.path.join(target_dir, "META", "ab_partitions.txt"), "r") as fp: |
| 838 | ab_partitions = fp.read().strip().splitlines() |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 839 | cmd.extend(["--partition_names", ":".join(ab_partitions)]) |
| 840 | cmd.extend( |
| 841 | ["--new_partitions", GetPartitionImages(target_dir, ab_partitions, False)]) |
| 842 | cmd.extend( |
| 843 | ["--new_mapfiles", GetPartitionMaps(target_dir, ab_partitions)]) |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 844 | if source_file is not None: |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 845 | source_dir = ExtractTargetFiles(source_file) |
| 846 | cmd.extend( |
| 847 | ["--old_partitions", GetPartitionImages(source_dir, ab_partitions, True)]) |
| 848 | cmd.extend( |
| 849 | ["--old_mapfiles", GetPartitionMaps(source_dir, ab_partitions)]) |
| 850 | |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 851 | if OPTIONS.disable_fec_computation: |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 852 | cmd.extend(["--disable_fec_computation=true"]) |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 853 | if OPTIONS.disable_verity_computation: |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 854 | cmd.extend(["--disable_verity_computation=true"]) |
| 855 | postinstall_config = os.path.join( |
| 856 | target_dir, "META", "postinstall_config.txt") |
| 857 | |
| 858 | if os.path.exists(postinstall_config): |
| 859 | cmd.extend(["--new_postinstall_config_file", postinstall_config]) |
| 860 | dynamic_partition_info = os.path.join( |
| 861 | target_dir, "META", "dynamic_partitions_info.txt") |
| 862 | |
| 863 | if os.path.exists(dynamic_partition_info): |
| 864 | cmd.extend(["--dynamic_partition_info_file", dynamic_partition_info]) |
| 865 | |
HÃ¥kan Kvist | ddb968d | 2023-06-09 11:59:22 +0200 | [diff] [blame] | 866 | apex_info = os.path.join( |
Kelvin Zhang | 8f83000 | 2023-08-16 13:16:48 -0700 | [diff] [blame] | 867 | target_dir, "META", "apex_info.pb") |
HÃ¥kan Kvist | ddb968d | 2023-06-09 11:59:22 +0200 | [diff] [blame] | 868 | if os.path.exists(apex_info): |
| 869 | cmd.extend(["--apex_info_file", apex_info]) |
| 870 | |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 871 | major_version, minor_version = ParseUpdateEngineConfig( |
| 872 | os.path.join(target_dir, "META", "update_engine_config.txt")) |
Kelvin Zhang | 629bc8d | 2023-04-11 21:08:27 -0700 | [diff] [blame] | 873 | if source_file: |
| 874 | major_version, minor_version = ParseUpdateEngineConfig( |
| 875 | os.path.join(source_dir, "META", "update_engine_config.txt")) |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 876 | if self.minor_version: |
| 877 | minor_version = self.minor_version |
| 878 | cmd.extend(["--major_version", str(major_version)]) |
| 879 | if source_file is not None or self.is_partial_update: |
| 880 | cmd.extend(["--minor_version", str(minor_version)]) |
| 881 | if self.is_partial_update: |
| 882 | cmd.extend(["--is_partial_update=true"]) |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 883 | cmd.extend(additional_args) |
| 884 | self._Run(cmd) |
| 885 | |
| 886 | self.payload_file = payload_file |
| 887 | self.payload_properties = None |
| 888 | |
| 889 | def Sign(self, payload_signer): |
| 890 | """Generates and signs the hashes of the payload and metadata. |
| 891 | |
| 892 | Args: |
| 893 | payload_signer: A PayloadSigner() instance that serves the signing work. |
| 894 | |
| 895 | Raises: |
| 896 | AssertionError: On any failure when calling brillo_update_payload script. |
| 897 | """ |
| 898 | assert isinstance(payload_signer, PayloadSigner) |
| 899 | |
| 900 | # 1. Generate hashes of the payload and metadata files. |
| 901 | payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin") |
| 902 | metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin") |
| 903 | cmd = ["brillo_update_payload", "hash", |
| 904 | "--unsigned_payload", self.payload_file, |
| 905 | "--signature_size", str(payload_signer.maximum_signature_size), |
| 906 | "--metadata_hash_file", metadata_sig_file, |
| 907 | "--payload_hash_file", payload_sig_file] |
| 908 | self._Run(cmd) |
| 909 | |
| 910 | # 2. Sign the hashes. |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 911 | signed_payload_sig_file = payload_signer.SignHashFile(payload_sig_file) |
| 912 | signed_metadata_sig_file = payload_signer.SignHashFile(metadata_sig_file) |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 913 | |
| 914 | # 3. Insert the signatures back into the payload file. |
| 915 | signed_payload_file = common.MakeTempFile(prefix="signed-payload-", |
| 916 | suffix=".bin") |
| 917 | cmd = ["brillo_update_payload", "sign", |
| 918 | "--unsigned_payload", self.payload_file, |
| 919 | "--payload", signed_payload_file, |
| 920 | "--signature_size", str(payload_signer.maximum_signature_size), |
| 921 | "--metadata_signature_file", signed_metadata_sig_file, |
| 922 | "--payload_signature_file", signed_payload_sig_file] |
| 923 | self._Run(cmd) |
| 924 | |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 925 | self.payload_file = signed_payload_file |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 926 | |
| 927 | def WriteToZip(self, output_zip): |
| 928 | """Writes the payload to the given zip. |
| 929 | |
| 930 | Args: |
| 931 | output_zip: The output ZipFile instance. |
| 932 | """ |
| 933 | assert self.payload_file is not None |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 934 | # 4. Dump the signed payload properties. |
| 935 | properties_file = common.MakeTempFile(prefix="payload-properties-", |
| 936 | suffix=".txt") |
Kelvin Zhang | c7441e5 | 2023-08-22 08:56:30 -0700 | [diff] [blame^] | 937 | cmd = ["delta_generator", |
| 938 | "--in_file=" + self.payload_file, |
| 939 | "--properties_file=" + properties_file] |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 940 | self._Run(cmd) |
| 941 | |
| 942 | if self.secondary: |
| 943 | with open(properties_file, "a") as f: |
| 944 | f.write("SWITCH_SLOT_ON_REBOOT=0\n") |
| 945 | |
| 946 | if self.wipe_user_data: |
| 947 | with open(properties_file, "a") as f: |
| 948 | f.write("POWERWASH=1\n") |
| 949 | |
| 950 | self.payload_properties = properties_file |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 951 | |
| 952 | if self.secondary: |
Kelvin Zhang | fa92869 | 2022-08-16 17:01:53 +0000 | [diff] [blame] | 953 | payload_arcname = PayloadGenerator.SECONDARY_PAYLOAD_BIN |
| 954 | payload_properties_arcname = PayloadGenerator.SECONDARY_PAYLOAD_PROPERTIES_TXT |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 955 | else: |
Kelvin Zhang | fa92869 | 2022-08-16 17:01:53 +0000 | [diff] [blame] | 956 | payload_arcname = PayloadGenerator.PAYLOAD_BIN |
| 957 | payload_properties_arcname = PayloadGenerator.PAYLOAD_PROPERTIES_TXT |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 958 | |
| 959 | # Add the signed payload file and properties into the zip. In order to |
| 960 | # support streaming, we pack them as ZIP_STORED. So these entries can be |
| 961 | # read directly with the offset and length pairs. |
| 962 | common.ZipWrite(output_zip, self.payload_file, arcname=payload_arcname, |
| 963 | compress_type=zipfile.ZIP_STORED) |
| 964 | common.ZipWrite(output_zip, self.payload_properties, |
| 965 | arcname=payload_properties_arcname, |
| 966 | compress_type=zipfile.ZIP_STORED) |
| 967 | |
| 968 | |
| 969 | class StreamingPropertyFiles(PropertyFiles): |
| 970 | """A subclass for computing the property-files for streaming A/B OTAs.""" |
| 971 | |
| 972 | def __init__(self): |
| 973 | super(StreamingPropertyFiles, self).__init__() |
| 974 | self.name = 'ota-streaming-property-files' |
| 975 | self.required = ( |
| 976 | # payload.bin and payload_properties.txt must exist. |
| 977 | 'payload.bin', |
| 978 | 'payload_properties.txt', |
| 979 | ) |
| 980 | self.optional = ( |
| 981 | # apex_info.pb isn't directly used in the update flow |
| 982 | 'apex_info.pb', |
| 983 | # care_map is available only if dm-verity is enabled. |
| 984 | 'care_map.pb', |
| 985 | 'care_map.txt', |
| 986 | # compatibility.zip is available only if target supports Treble. |
| 987 | 'compatibility.zip', |
| 988 | ) |
| 989 | |
| 990 | |
| 991 | class AbOtaPropertyFiles(StreamingPropertyFiles): |
| 992 | """The property-files for A/B OTA that includes payload_metadata.bin info. |
| 993 | |
| 994 | Since P, we expose one more token (aka property-file), in addition to the ones |
| 995 | for streaming A/B OTA, for a virtual entry of 'payload_metadata.bin'. |
| 996 | 'payload_metadata.bin' is the header part of a payload ('payload.bin'), which |
| 997 | doesn't exist as a separate ZIP entry, but can be used to verify if the |
| 998 | payload can be applied on the given device. |
| 999 | |
| 1000 | For backward compatibility, we keep both of the 'ota-streaming-property-files' |
| 1001 | and the newly added 'ota-property-files' in P. The new token will only be |
| 1002 | available in 'ota-property-files'. |
| 1003 | """ |
| 1004 | |
| 1005 | def __init__(self): |
| 1006 | super(AbOtaPropertyFiles, self).__init__() |
| 1007 | self.name = 'ota-property-files' |
| 1008 | |
| 1009 | def _GetPrecomputed(self, input_zip): |
| 1010 | offset, size = self._GetPayloadMetadataOffsetAndSize(input_zip) |
| 1011 | return ['payload_metadata.bin:{}:{}'.format(offset, size)] |
| 1012 | |
| 1013 | @staticmethod |
| 1014 | def _GetPayloadMetadataOffsetAndSize(input_zip): |
| 1015 | """Computes the offset and size of the payload metadata for a given package. |
| 1016 | |
| 1017 | (From system/update_engine/update_metadata.proto) |
| 1018 | A delta update file contains all the deltas needed to update a system from |
| 1019 | one specific version to another specific version. The update format is |
| 1020 | represented by this struct pseudocode: |
| 1021 | |
| 1022 | struct delta_update_file { |
| 1023 | char magic[4] = "CrAU"; |
| 1024 | uint64 file_format_version; |
| 1025 | uint64 manifest_size; // Size of protobuf DeltaArchiveManifest |
| 1026 | |
| 1027 | // Only present if format_version > 1: |
| 1028 | uint32 metadata_signature_size; |
| 1029 | |
| 1030 | // The Bzip2 compressed DeltaArchiveManifest |
| 1031 | char manifest[metadata_signature_size]; |
| 1032 | |
| 1033 | // The signature of the metadata (from the beginning of the payload up to |
| 1034 | // this location, not including the signature itself). This is a |
| 1035 | // serialized Signatures message. |
| 1036 | char medatada_signature_message[metadata_signature_size]; |
| 1037 | |
| 1038 | // Data blobs for files, no specific format. The specific offset |
| 1039 | // and length of each data blob is recorded in the DeltaArchiveManifest. |
| 1040 | struct { |
| 1041 | char data[]; |
| 1042 | } blobs[]; |
| 1043 | |
| 1044 | // These two are not signed: |
| 1045 | uint64 payload_signatures_message_size; |
| 1046 | char payload_signatures_message[]; |
| 1047 | }; |
| 1048 | |
| 1049 | 'payload-metadata.bin' contains all the bytes from the beginning of the |
| 1050 | payload, till the end of 'medatada_signature_message'. |
| 1051 | """ |
| 1052 | payload_info = input_zip.getinfo('payload.bin') |
| 1053 | (payload_offset, payload_size) = GetZipEntryOffset(input_zip, payload_info) |
| 1054 | |
| 1055 | # Read the underlying raw zipfile at specified offset |
| 1056 | payload_fp = input_zip.fp |
| 1057 | payload_fp.seek(payload_offset) |
| 1058 | header_bin = payload_fp.read(24) |
| 1059 | |
| 1060 | # network byte order (big-endian) |
| 1061 | header = struct.unpack("!IQQL", header_bin) |
| 1062 | |
| 1063 | # 'CrAU' |
| 1064 | magic = header[0] |
| 1065 | assert magic == 0x43724155, "Invalid magic: {:x}, computed offset {}" \ |
| 1066 | .format(magic, payload_offset) |
| 1067 | |
| 1068 | manifest_size = header[2] |
| 1069 | metadata_signature_size = header[3] |
| 1070 | metadata_total = 24 + manifest_size + metadata_signature_size |
Kelvin Zhang | bf01f8b | 2022-08-30 18:25:43 +0000 | [diff] [blame] | 1071 | assert metadata_total <= payload_size |
Kelvin Zhang | 62a7f6e | 2022-08-30 17:41:29 +0000 | [diff] [blame] | 1072 | |
| 1073 | return (payload_offset, metadata_total) |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 1074 | |
| 1075 | |
| 1076 | def Fnmatch(filename, pattersn): |
| 1077 | return any([fnmatch.fnmatch(filename, pat) for pat in pattersn]) |
| 1078 | |
| 1079 | |
| 1080 | def CopyTargetFilesDir(input_dir): |
| 1081 | output_dir = common.MakeTempDir("target_files") |
Kelvin Zhang | 2268091 | 2023-05-19 13:12:59 -0700 | [diff] [blame] | 1082 | |
| 1083 | def SymlinkIfNotSparse(src, dst): |
| 1084 | if common.IsSparseImage(src): |
| 1085 | return common.UnsparseImage(src, dst) |
| 1086 | else: |
| 1087 | return os.link(src, dst) |
| 1088 | |
| 1089 | for subdir in TARGET_FILES_IMAGES_SUBDIR: |
Kelvin Zhang | 6b10e15 | 2023-05-02 15:48:16 -0700 | [diff] [blame] | 1090 | if not os.path.exists(os.path.join(input_dir, subdir)): |
| 1091 | continue |
| 1092 | shutil.copytree(os.path.join(input_dir, subdir), os.path.join( |
Kelvin Zhang | 2268091 | 2023-05-19 13:12:59 -0700 | [diff] [blame] | 1093 | output_dir, subdir), dirs_exist_ok=True, copy_function=SymlinkIfNotSparse) |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 1094 | shutil.copytree(os.path.join(input_dir, "META"), os.path.join( |
| 1095 | output_dir, "META"), dirs_exist_ok=True) |
Kelvin Zhang | 6b10e15 | 2023-05-02 15:48:16 -0700 | [diff] [blame] | 1096 | |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 1097 | for (dirpath, _, filenames) in os.walk(input_dir): |
| 1098 | for filename in filenames: |
| 1099 | path = os.path.join(dirpath, filename) |
| 1100 | relative_path = path.removeprefix(input_dir).removeprefix("/") |
| 1101 | if not Fnmatch(relative_path, UNZIP_PATTERN): |
| 1102 | continue |
| 1103 | if filename.endswith(".prop") or filename == "prop.default" or "/etc/vintf/" in relative_path: |
| 1104 | target_path = os.path.join( |
| 1105 | output_dir, relative_path) |
| 1106 | os.makedirs(os.path.dirname(target_path), exist_ok=True) |
| 1107 | shutil.copy(path, target_path) |
| 1108 | return output_dir |