Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 1 | # Copyright (C) 2020 The Android Open Source Project |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | # you may not use this file except in compliance with the License. |
| 5 | # You may obtain a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | # See the License for the specific language governing permissions and |
| 13 | # limitations under the License. |
| 14 | |
| 15 | import copy |
| 16 | import itertools |
Yifan Hong | 125d0b6 | 2020-09-24 17:07:03 -0700 | [diff] [blame] | 17 | import logging |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 18 | import os |
Kelvin Zhang | 25ab998 | 2021-06-22 09:51:34 -0400 | [diff] [blame] | 19 | import struct |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 20 | import zipfile |
| 21 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 22 | import ota_metadata_pb2 |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 23 | from common import (ZipDelete, ZipClose, OPTIONS, MakeTempFile, |
| 24 | ZipWriteStr, BuildInfo, LoadDictionaryFromFile, |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 25 | SignFile, PARTITIONS_WITH_BUILD_PROP, PartitionBuildProps) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 26 | |
Yifan Hong | 125d0b6 | 2020-09-24 17:07:03 -0700 | [diff] [blame] | 27 | logger = logging.getLogger(__name__) |
Kelvin Zhang | 2e41738 | 2020-08-20 11:33:11 -0400 | [diff] [blame] | 28 | |
| 29 | OPTIONS.no_signing = False |
| 30 | OPTIONS.force_non_ab = False |
| 31 | OPTIONS.wipe_user_data = False |
| 32 | OPTIONS.downgrade = False |
| 33 | OPTIONS.key_passwords = {} |
| 34 | OPTIONS.package_key = None |
| 35 | OPTIONS.incremental_source = None |
| 36 | OPTIONS.retrofit_dynamic_partitions = False |
| 37 | OPTIONS.output_metadata_path = None |
| 38 | OPTIONS.boot_variable_file = None |
| 39 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 40 | METADATA_NAME = 'META-INF/com/android/metadata' |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 41 | METADATA_PROTO_NAME = 'META-INF/com/android/metadata.pb' |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 42 | UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*', 'RADIO/*'] |
Kelvin Zhang | 05ff705 | 2021-02-10 09:13:26 -0500 | [diff] [blame] | 43 | SECURITY_PATCH_LEVEL_PROP_NAME = "ro.build.version.security_patch" |
| 44 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 45 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 46 | def FinalizeMetadata(metadata, input_file, output_file, needed_property_files): |
| 47 | """Finalizes the metadata and signs an A/B OTA package. |
| 48 | |
| 49 | In order to stream an A/B OTA package, we need 'ota-streaming-property-files' |
| 50 | that contains the offsets and sizes for the ZIP entries. An example |
| 51 | property-files string is as follows. |
| 52 | |
| 53 | "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379" |
| 54 | |
| 55 | OTA server can pass down this string, in addition to the package URL, to the |
| 56 | system update client. System update client can then fetch individual ZIP |
| 57 | entries (ZIP_STORED) directly at the given offset of the URL. |
| 58 | |
| 59 | Args: |
| 60 | metadata: The metadata dict for the package. |
| 61 | input_file: The input ZIP filename that doesn't contain the package METADATA |
| 62 | entry yet. |
| 63 | output_file: The final output ZIP filename. |
| 64 | needed_property_files: The list of PropertyFiles' to be generated. |
| 65 | """ |
| 66 | |
| 67 | def ComputeAllPropertyFiles(input_file, needed_property_files): |
| 68 | # Write the current metadata entry with placeholders. |
Kelvin Zhang | 928c234 | 2020-09-22 16:15:57 -0400 | [diff] [blame] | 69 | with zipfile.ZipFile(input_file, allowZip64=True) as input_zip: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 70 | for property_files in needed_property_files: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 71 | metadata.property_files[property_files.name] = property_files.Compute( |
| 72 | input_zip) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 73 | namelist = input_zip.namelist() |
| 74 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 75 | if METADATA_NAME in namelist or METADATA_PROTO_NAME in namelist: |
| 76 | ZipDelete(input_file, [METADATA_NAME, METADATA_PROTO_NAME]) |
Kelvin Zhang | 928c234 | 2020-09-22 16:15:57 -0400 | [diff] [blame] | 77 | output_zip = zipfile.ZipFile(input_file, 'a', allowZip64=True) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 78 | WriteMetadata(metadata, output_zip) |
| 79 | ZipClose(output_zip) |
| 80 | |
| 81 | if OPTIONS.no_signing: |
| 82 | return input_file |
| 83 | |
| 84 | prelim_signing = MakeTempFile(suffix='.zip') |
| 85 | SignOutput(input_file, prelim_signing) |
| 86 | return prelim_signing |
| 87 | |
| 88 | def FinalizeAllPropertyFiles(prelim_signing, needed_property_files): |
Kelvin Zhang | 928c234 | 2020-09-22 16:15:57 -0400 | [diff] [blame] | 89 | with zipfile.ZipFile(prelim_signing, allowZip64=True) as prelim_signing_zip: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 90 | for property_files in needed_property_files: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 91 | metadata.property_files[property_files.name] = property_files.Finalize( |
| 92 | prelim_signing_zip, |
| 93 | len(metadata.property_files[property_files.name])) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 94 | |
| 95 | # SignOutput(), which in turn calls signapk.jar, will possibly reorder the ZIP |
| 96 | # entries, as well as padding the entry headers. We do a preliminary signing |
| 97 | # (with an incomplete metadata entry) to allow that to happen. Then compute |
| 98 | # the ZIP entry offsets, write back the final metadata and do the final |
| 99 | # signing. |
| 100 | prelim_signing = ComputeAllPropertyFiles(input_file, needed_property_files) |
| 101 | try: |
| 102 | FinalizeAllPropertyFiles(prelim_signing, needed_property_files) |
| 103 | except PropertyFiles.InsufficientSpaceException: |
| 104 | # Even with the preliminary signing, the entry orders may change |
| 105 | # dramatically, which leads to insufficiently reserved space during the |
| 106 | # first call to ComputeAllPropertyFiles(). In that case, we redo all the |
| 107 | # preliminary signing works, based on the already ordered ZIP entries, to |
| 108 | # address the issue. |
| 109 | prelim_signing = ComputeAllPropertyFiles( |
| 110 | prelim_signing, needed_property_files) |
| 111 | FinalizeAllPropertyFiles(prelim_signing, needed_property_files) |
| 112 | |
| 113 | # Replace the METADATA entry. |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 114 | ZipDelete(prelim_signing, [METADATA_NAME, METADATA_PROTO_NAME]) |
Kelvin Zhang | 928c234 | 2020-09-22 16:15:57 -0400 | [diff] [blame] | 115 | output_zip = zipfile.ZipFile(prelim_signing, 'a', allowZip64=True) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 116 | WriteMetadata(metadata, output_zip) |
| 117 | ZipClose(output_zip) |
| 118 | |
| 119 | # Re-sign the package after updating the metadata entry. |
| 120 | if OPTIONS.no_signing: |
| 121 | output_file = prelim_signing |
| 122 | else: |
| 123 | SignOutput(prelim_signing, output_file) |
| 124 | |
| 125 | # Reopen the final signed zip to double check the streaming metadata. |
Kelvin Zhang | 928c234 | 2020-09-22 16:15:57 -0400 | [diff] [blame] | 126 | with zipfile.ZipFile(output_file, allowZip64=True) as output_zip: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 127 | for property_files in needed_property_files: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 128 | property_files.Verify( |
| 129 | output_zip, metadata.property_files[property_files.name].strip()) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 130 | |
| 131 | # If requested, dump the metadata to a separate file. |
| 132 | output_metadata_path = OPTIONS.output_metadata_path |
| 133 | if output_metadata_path: |
| 134 | WriteMetadata(metadata, output_metadata_path) |
| 135 | |
| 136 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 137 | def WriteMetadata(metadata_proto, output): |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 138 | """Writes the metadata to the zip archive or a file. |
| 139 | |
| 140 | Args: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 141 | metadata_proto: The metadata protobuf for the package. |
| 142 | output: A ZipFile object or a string of the output file path. If a string |
| 143 | path is given, the metadata in the protobuf format will be written to |
| 144 | {output}.pb, e.g. ota_metadata.pb |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 145 | """ |
| 146 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 147 | metadata_dict = BuildLegacyOtaMetadata(metadata_proto) |
| 148 | legacy_metadata = "".join(["%s=%s\n" % kv for kv in |
| 149 | sorted(metadata_dict.items())]) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 150 | if isinstance(output, zipfile.ZipFile): |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 151 | ZipWriteStr(output, METADATA_PROTO_NAME, metadata_proto.SerializeToString(), |
| 152 | compress_type=zipfile.ZIP_STORED) |
| 153 | ZipWriteStr(output, METADATA_NAME, legacy_metadata, |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 154 | compress_type=zipfile.ZIP_STORED) |
| 155 | return |
| 156 | |
Cole Faust | b820bcd | 2021-10-28 13:59:48 -0700 | [diff] [blame^] | 157 | with open('{}.pb'.format(output), 'wb') as f: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 158 | f.write(metadata_proto.SerializeToString()) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 159 | with open(output, 'w') as f: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 160 | f.write(legacy_metadata) |
| 161 | |
| 162 | |
| 163 | def UpdateDeviceState(device_state, build_info, boot_variable_values, |
| 164 | is_post_build): |
| 165 | """Update the fields of the DeviceState proto with build info.""" |
| 166 | |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 167 | def UpdatePartitionStates(partition_states): |
| 168 | """Update the per-partition state according to its build.prop""" |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 169 | if not build_info.is_ab: |
| 170 | return |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 171 | build_info_set = ComputeRuntimeBuildInfos(build_info, |
| 172 | boot_variable_values) |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 173 | assert "ab_partitions" in build_info.info_dict,\ |
Kelvin Zhang | 05ff705 | 2021-02-10 09:13:26 -0500 | [diff] [blame] | 174 | "ab_partitions property required for ab update." |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 175 | ab_partitions = set(build_info.info_dict.get("ab_partitions")) |
| 176 | |
| 177 | # delta_generator will error out on unused timestamps, |
| 178 | # so only generate timestamps for dynamic partitions |
| 179 | # used in OTA update. |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 180 | for partition in sorted(set(PARTITIONS_WITH_BUILD_PROP) & ab_partitions): |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 181 | partition_prop = build_info.info_dict.get( |
| 182 | '{}.build.prop'.format(partition)) |
| 183 | # Skip if the partition is missing, or it doesn't have a build.prop |
| 184 | if not partition_prop or not partition_prop.build_props: |
| 185 | continue |
| 186 | |
| 187 | partition_state = partition_states.add() |
| 188 | partition_state.partition_name = partition |
| 189 | # Update the partition's runtime device names and fingerprints |
| 190 | partition_devices = set() |
| 191 | partition_fingerprints = set() |
| 192 | for runtime_build_info in build_info_set: |
| 193 | partition_devices.add( |
| 194 | runtime_build_info.GetPartitionBuildProp('ro.product.device', |
| 195 | partition)) |
| 196 | partition_fingerprints.add( |
| 197 | runtime_build_info.GetPartitionFingerprint(partition)) |
| 198 | |
| 199 | partition_state.device.extend(sorted(partition_devices)) |
| 200 | partition_state.build.extend(sorted(partition_fingerprints)) |
| 201 | |
| 202 | # TODO(xunchang) set the boot image's version with kmi. Note the boot |
| 203 | # image doesn't have a file map. |
| 204 | partition_state.version = build_info.GetPartitionBuildProp( |
| 205 | 'ro.build.date.utc', partition) |
| 206 | |
| 207 | # TODO(xunchang), we can save a call to ComputeRuntimeBuildInfos. |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 208 | build_devices, build_fingerprints = \ |
| 209 | CalculateRuntimeDevicesAndFingerprints(build_info, boot_variable_values) |
| 210 | device_state.device.extend(sorted(build_devices)) |
| 211 | device_state.build.extend(sorted(build_fingerprints)) |
| 212 | device_state.build_incremental = build_info.GetBuildProp( |
| 213 | 'ro.build.version.incremental') |
| 214 | |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 215 | UpdatePartitionStates(device_state.partition_state) |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 216 | |
| 217 | if is_post_build: |
| 218 | device_state.sdk_level = build_info.GetBuildProp( |
| 219 | 'ro.build.version.sdk') |
| 220 | device_state.security_patch_level = build_info.GetBuildProp( |
| 221 | 'ro.build.version.security_patch') |
| 222 | # Use the actual post-timestamp, even for a downgrade case. |
| 223 | device_state.timestamp = int(build_info.GetBuildProp('ro.build.date.utc')) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 224 | |
| 225 | |
| 226 | def GetPackageMetadata(target_info, source_info=None): |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 227 | """Generates and returns the metadata proto. |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 228 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 229 | It generates a ota_metadata protobuf that contains the info to be written |
| 230 | into an OTA package (META-INF/com/android/metadata.pb). It also handles the |
| 231 | detection of downgrade / data wipe based on the global options. |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 232 | |
| 233 | Args: |
| 234 | target_info: The BuildInfo instance that holds the target build info. |
| 235 | source_info: The BuildInfo instance that holds the source build info, or |
| 236 | None if generating full OTA. |
| 237 | |
| 238 | Returns: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 239 | A protobuf to be written into package metadata entry. |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 240 | """ |
| 241 | assert isinstance(target_info, BuildInfo) |
| 242 | assert source_info is None or isinstance(source_info, BuildInfo) |
| 243 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 244 | boot_variable_values = {} |
| 245 | if OPTIONS.boot_variable_file: |
| 246 | d = LoadDictionaryFromFile(OPTIONS.boot_variable_file) |
| 247 | for key, values in d.items(): |
| 248 | boot_variable_values[key] = [val.strip() for val in values.split(',')] |
| 249 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 250 | metadata_proto = ota_metadata_pb2.OtaMetadata() |
| 251 | # TODO(xunchang) some fields, e.g. post-device isn't necessary. We can |
| 252 | # consider skipping them if they aren't used by clients. |
| 253 | UpdateDeviceState(metadata_proto.postcondition, target_info, |
| 254 | boot_variable_values, True) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 255 | |
| 256 | if target_info.is_ab and not OPTIONS.force_non_ab: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 257 | metadata_proto.type = ota_metadata_pb2.OtaMetadata.AB |
| 258 | metadata_proto.required_cache = 0 |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 259 | else: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 260 | metadata_proto.type = ota_metadata_pb2.OtaMetadata.BLOCK |
| 261 | # cache requirement will be updated by the non-A/B codes. |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 262 | |
| 263 | if OPTIONS.wipe_user_data: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 264 | metadata_proto.wipe = True |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 265 | |
| 266 | if OPTIONS.retrofit_dynamic_partitions: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 267 | metadata_proto.retrofit_dynamic_partitions = True |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 268 | |
| 269 | is_incremental = source_info is not None |
| 270 | if is_incremental: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 271 | UpdateDeviceState(metadata_proto.precondition, source_info, |
| 272 | boot_variable_values, False) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 273 | else: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 274 | metadata_proto.precondition.device.extend( |
| 275 | metadata_proto.postcondition.device) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 276 | |
| 277 | # Detect downgrades and set up downgrade flags accordingly. |
| 278 | if is_incremental: |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 279 | HandleDowngradeMetadata(metadata_proto, target_info, source_info) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 280 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 281 | return metadata_proto |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 282 | |
| 283 | |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 284 | def BuildLegacyOtaMetadata(metadata_proto): |
| 285 | """Converts the metadata proto to a legacy metadata dict. |
| 286 | |
| 287 | This metadata dict is used to build the legacy metadata text file for |
| 288 | backward compatibility. We won't add new keys to the legacy metadata format. |
| 289 | If new information is needed, we should add it as a new field in OtaMetadata |
| 290 | proto definition. |
| 291 | """ |
| 292 | |
| 293 | separator = '|' |
| 294 | |
| 295 | metadata_dict = {} |
| 296 | if metadata_proto.type == ota_metadata_pb2.OtaMetadata.AB: |
| 297 | metadata_dict['ota-type'] = 'AB' |
| 298 | elif metadata_proto.type == ota_metadata_pb2.OtaMetadata.BLOCK: |
| 299 | metadata_dict['ota-type'] = 'BLOCK' |
| 300 | if metadata_proto.wipe: |
| 301 | metadata_dict['ota-wipe'] = 'yes' |
| 302 | if metadata_proto.retrofit_dynamic_partitions: |
| 303 | metadata_dict['ota-retrofit-dynamic-partitions'] = 'yes' |
| 304 | if metadata_proto.downgrade: |
| 305 | metadata_dict['ota-downgrade'] = 'yes' |
| 306 | |
| 307 | metadata_dict['ota-required-cache'] = str(metadata_proto.required_cache) |
| 308 | |
| 309 | post_build = metadata_proto.postcondition |
| 310 | metadata_dict['post-build'] = separator.join(post_build.build) |
| 311 | metadata_dict['post-build-incremental'] = post_build.build_incremental |
| 312 | metadata_dict['post-sdk-level'] = post_build.sdk_level |
| 313 | metadata_dict['post-security-patch-level'] = post_build.security_patch_level |
| 314 | metadata_dict['post-timestamp'] = str(post_build.timestamp) |
| 315 | |
| 316 | pre_build = metadata_proto.precondition |
| 317 | metadata_dict['pre-device'] = separator.join(pre_build.device) |
| 318 | # incremental updates |
| 319 | if len(pre_build.build) != 0: |
| 320 | metadata_dict['pre-build'] = separator.join(pre_build.build) |
| 321 | metadata_dict['pre-build-incremental'] = pre_build.build_incremental |
| 322 | |
Kelvin Zhang | 05ff705 | 2021-02-10 09:13:26 -0500 | [diff] [blame] | 323 | if metadata_proto.spl_downgrade: |
| 324 | metadata_dict['spl-downgrade'] = 'yes' |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 325 | metadata_dict.update(metadata_proto.property_files) |
| 326 | |
| 327 | return metadata_dict |
| 328 | |
| 329 | |
| 330 | def HandleDowngradeMetadata(metadata_proto, target_info, source_info): |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 331 | # Only incremental OTAs are allowed to reach here. |
| 332 | assert OPTIONS.incremental_source is not None |
| 333 | |
| 334 | post_timestamp = target_info.GetBuildProp("ro.build.date.utc") |
| 335 | pre_timestamp = source_info.GetBuildProp("ro.build.date.utc") |
| 336 | is_downgrade = int(post_timestamp) < int(pre_timestamp) |
| 337 | |
Kelvin Zhang | 05ff705 | 2021-02-10 09:13:26 -0500 | [diff] [blame] | 338 | if OPTIONS.spl_downgrade: |
| 339 | metadata_proto.spl_downgrade = True |
| 340 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 341 | if OPTIONS.downgrade: |
| 342 | if not is_downgrade: |
| 343 | raise RuntimeError( |
| 344 | "--downgrade or --override_timestamp specified but no downgrade " |
| 345 | "detected: pre: %s, post: %s" % (pre_timestamp, post_timestamp)) |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 346 | metadata_proto.downgrade = True |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 347 | else: |
| 348 | if is_downgrade: |
| 349 | raise RuntimeError( |
| 350 | "Downgrade detected based on timestamp check: pre: %s, post: %s. " |
| 351 | "Need to specify --override_timestamp OR --downgrade to allow " |
| 352 | "building the incremental." % (pre_timestamp, post_timestamp)) |
| 353 | |
| 354 | |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 355 | def ComputeRuntimeBuildInfos(default_build_info, boot_variable_values): |
| 356 | """Returns a set of build info objects that may exist during runtime.""" |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 357 | |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 358 | build_info_set = {default_build_info} |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 359 | if not boot_variable_values: |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 360 | return build_info_set |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 361 | |
| 362 | # Calculate all possible combinations of the values for the boot variables. |
| 363 | keys = boot_variable_values.keys() |
| 364 | value_list = boot_variable_values.values() |
| 365 | combinations = [dict(zip(keys, values)) |
| 366 | for values in itertools.product(*value_list)] |
| 367 | for placeholder_values in combinations: |
| 368 | # Reload the info_dict as some build properties may change their values |
| 369 | # based on the value of ro.boot* properties. |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 370 | info_dict = copy.deepcopy(default_build_info.info_dict) |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 371 | for partition in PARTITIONS_WITH_BUILD_PROP: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 372 | partition_prop_key = "{}.build.prop".format(partition) |
| 373 | input_file = info_dict[partition_prop_key].input_file |
| 374 | if isinstance(input_file, zipfile.ZipFile): |
Kelvin Zhang | 928c234 | 2020-09-22 16:15:57 -0400 | [diff] [blame] | 375 | with zipfile.ZipFile(input_file.filename, allowZip64=True) as input_zip: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 376 | info_dict[partition_prop_key] = \ |
| 377 | PartitionBuildProps.FromInputFile(input_zip, partition, |
| 378 | placeholder_values) |
| 379 | else: |
| 380 | info_dict[partition_prop_key] = \ |
| 381 | PartitionBuildProps.FromInputFile(input_file, partition, |
| 382 | placeholder_values) |
| 383 | info_dict["build.prop"] = info_dict["system.build.prop"] |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 384 | build_info_set.add(BuildInfo(info_dict, default_build_info.oem_dicts)) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 385 | |
Tianjie | 2bb1486 | 2020-08-28 16:24:34 -0700 | [diff] [blame] | 386 | return build_info_set |
| 387 | |
| 388 | |
| 389 | def CalculateRuntimeDevicesAndFingerprints(default_build_info, |
| 390 | boot_variable_values): |
| 391 | """Returns a tuple of sets for runtime devices and fingerprints""" |
| 392 | |
| 393 | device_names = set() |
| 394 | fingerprints = set() |
| 395 | build_info_set = ComputeRuntimeBuildInfos(default_build_info, |
| 396 | boot_variable_values) |
| 397 | for runtime_build_info in build_info_set: |
| 398 | device_names.add(runtime_build_info.device) |
| 399 | fingerprints.add(runtime_build_info.fingerprint) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 400 | return device_names, fingerprints |
| 401 | |
| 402 | |
Kelvin Zhang | 25ab998 | 2021-06-22 09:51:34 -0400 | [diff] [blame] | 403 | def GetZipEntryOffset(zfp, entry_info): |
| 404 | """Get offset to a beginning of a particular zip entry |
| 405 | Args: |
| 406 | fp: zipfile.ZipFile |
| 407 | entry_info: zipfile.ZipInfo |
| 408 | |
| 409 | Returns: |
| 410 | (offset, size) tuple |
| 411 | """ |
| 412 | # Don't use len(entry_info.extra). Because that returns size of extra |
| 413 | # fields in central directory. We need to look at local file directory, |
| 414 | # as these two might have different sizes. |
| 415 | |
| 416 | # We cannot work with zipfile.ZipFile instances, we need a |fp| for the underlying file. |
| 417 | zfp = zfp.fp |
| 418 | zfp.seek(entry_info.header_offset) |
| 419 | data = zfp.read(zipfile.sizeFileHeader) |
| 420 | fheader = struct.unpack(zipfile.structFileHeader, data) |
| 421 | # Last two fields of local file header are filename length and |
| 422 | # extra length |
| 423 | filename_len = fheader[-2] |
| 424 | extra_len = fheader[-1] |
| 425 | offset = entry_info.header_offset |
| 426 | offset += zipfile.sizeFileHeader |
| 427 | offset += filename_len + extra_len |
| 428 | size = entry_info.file_size |
| 429 | return (offset, size) |
| 430 | |
| 431 | |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 432 | class PropertyFiles(object): |
| 433 | """A class that computes the property-files string for an OTA package. |
| 434 | |
| 435 | A property-files string is a comma-separated string that contains the |
| 436 | offset/size info for an OTA package. The entries, which must be ZIP_STORED, |
| 437 | can be fetched directly with the package URL along with the offset/size info. |
| 438 | These strings can be used for streaming A/B OTAs, or allowing an updater to |
| 439 | download package metadata entry directly, without paying the cost of |
| 440 | downloading entire package. |
| 441 | |
| 442 | Computing the final property-files string requires two passes. Because doing |
| 443 | the whole package signing (with signapk.jar) will possibly reorder the ZIP |
| 444 | entries, which may in turn invalidate earlier computed ZIP entry offset/size |
| 445 | values. |
| 446 | |
| 447 | This class provides functions to be called for each pass. The general flow is |
| 448 | as follows. |
| 449 | |
| 450 | property_files = PropertyFiles() |
| 451 | # The first pass, which writes placeholders before doing initial signing. |
| 452 | property_files.Compute() |
| 453 | SignOutput() |
| 454 | |
| 455 | # The second pass, by replacing the placeholders with actual data. |
| 456 | property_files.Finalize() |
| 457 | SignOutput() |
| 458 | |
| 459 | And the caller can additionally verify the final result. |
| 460 | |
| 461 | property_files.Verify() |
| 462 | """ |
| 463 | |
| 464 | def __init__(self): |
| 465 | self.name = None |
| 466 | self.required = () |
| 467 | self.optional = () |
| 468 | |
| 469 | def Compute(self, input_zip): |
| 470 | """Computes and returns a property-files string with placeholders. |
| 471 | |
| 472 | We reserve extra space for the offset and size of the metadata entry itself, |
| 473 | although we don't know the final values until the package gets signed. |
| 474 | |
| 475 | Args: |
| 476 | input_zip: The input ZIP file. |
| 477 | |
| 478 | Returns: |
| 479 | A string with placeholders for the metadata offset/size info, e.g. |
| 480 | "payload.bin:679:343,payload_properties.txt:378:45,metadata: ". |
| 481 | """ |
| 482 | return self.GetPropertyFilesString(input_zip, reserve_space=True) |
| 483 | |
| 484 | class InsufficientSpaceException(Exception): |
| 485 | pass |
| 486 | |
| 487 | def Finalize(self, input_zip, reserved_length): |
| 488 | """Finalizes a property-files string with actual METADATA offset/size info. |
| 489 | |
| 490 | The input ZIP file has been signed, with the ZIP entries in the desired |
| 491 | place (signapk.jar will possibly reorder the ZIP entries). Now we compute |
| 492 | the ZIP entry offsets and construct the property-files string with actual |
| 493 | data. Note that during this process, we must pad the property-files string |
| 494 | to the reserved length, so that the METADATA entry size remains the same. |
| 495 | Otherwise the entries' offsets and sizes may change again. |
| 496 | |
| 497 | Args: |
| 498 | input_zip: The input ZIP file. |
| 499 | reserved_length: The reserved length of the property-files string during |
| 500 | the call to Compute(). The final string must be no more than this |
| 501 | size. |
| 502 | |
| 503 | Returns: |
| 504 | A property-files string including the metadata offset/size info, e.g. |
| 505 | "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379 ". |
| 506 | |
| 507 | Raises: |
| 508 | InsufficientSpaceException: If the reserved length is insufficient to hold |
| 509 | the final string. |
| 510 | """ |
| 511 | result = self.GetPropertyFilesString(input_zip, reserve_space=False) |
| 512 | if len(result) > reserved_length: |
| 513 | raise self.InsufficientSpaceException( |
| 514 | 'Insufficient reserved space: reserved={}, actual={}'.format( |
| 515 | reserved_length, len(result))) |
| 516 | |
| 517 | result += ' ' * (reserved_length - len(result)) |
| 518 | return result |
| 519 | |
| 520 | def Verify(self, input_zip, expected): |
| 521 | """Verifies the input ZIP file contains the expected property-files string. |
| 522 | |
| 523 | Args: |
| 524 | input_zip: The input ZIP file. |
| 525 | expected: The property-files string that's computed from Finalize(). |
| 526 | |
| 527 | Raises: |
| 528 | AssertionError: On finding a mismatch. |
| 529 | """ |
| 530 | actual = self.GetPropertyFilesString(input_zip) |
| 531 | assert actual == expected, \ |
| 532 | "Mismatching streaming metadata: {} vs {}.".format(actual, expected) |
| 533 | |
| 534 | def GetPropertyFilesString(self, zip_file, reserve_space=False): |
| 535 | """ |
| 536 | Constructs the property-files string per request. |
| 537 | |
| 538 | Args: |
| 539 | zip_file: The input ZIP file. |
| 540 | reserved_length: The reserved length of the property-files string. |
| 541 | |
| 542 | Returns: |
| 543 | A property-files string including the metadata offset/size info, e.g. |
| 544 | "payload.bin:679:343,payload_properties.txt:378:45,metadata: ". |
| 545 | """ |
| 546 | |
| 547 | def ComputeEntryOffsetSize(name): |
| 548 | """Computes the zip entry offset and size.""" |
| 549 | info = zip_file.getinfo(name) |
Kelvin Zhang | 25ab998 | 2021-06-22 09:51:34 -0400 | [diff] [blame] | 550 | (offset, size) = GetZipEntryOffset(zip_file, info) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 551 | return '%s:%d:%d' % (os.path.basename(name), offset, size) |
| 552 | |
| 553 | tokens = [] |
| 554 | tokens.extend(self._GetPrecomputed(zip_file)) |
| 555 | for entry in self.required: |
| 556 | tokens.append(ComputeEntryOffsetSize(entry)) |
| 557 | for entry in self.optional: |
| 558 | if entry in zip_file.namelist(): |
| 559 | tokens.append(ComputeEntryOffsetSize(entry)) |
| 560 | |
| 561 | # 'META-INF/com/android/metadata' is required. We don't know its actual |
| 562 | # offset and length (as well as the values for other entries). So we reserve |
| 563 | # 15-byte as a placeholder ('offset:length'), which is sufficient to cover |
| 564 | # the space for metadata entry. Because 'offset' allows a max of 10-digit |
| 565 | # (i.e. ~9 GiB), with a max of 4-digit for the length. Note that all the |
| 566 | # reserved space serves the metadata entry only. |
| 567 | if reserve_space: |
| 568 | tokens.append('metadata:' + ' ' * 15) |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 569 | tokens.append('metadata.pb:' + ' ' * 15) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 570 | else: |
| 571 | tokens.append(ComputeEntryOffsetSize(METADATA_NAME)) |
Tianjie | a207613 | 2020-08-19 17:25:32 -0700 | [diff] [blame] | 572 | tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME)) |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 573 | |
| 574 | return ','.join(tokens) |
| 575 | |
| 576 | def _GetPrecomputed(self, input_zip): |
| 577 | """Computes the additional tokens to be included into the property-files. |
| 578 | |
| 579 | This applies to tokens without actual ZIP entries, such as |
| 580 | payload_metadata.bin. We want to expose the offset/size to updaters, so |
| 581 | that they can download the payload metadata directly with the info. |
| 582 | |
| 583 | Args: |
| 584 | input_zip: The input zip file. |
| 585 | |
| 586 | Returns: |
| 587 | A list of strings (tokens) to be added to the property-files string. |
| 588 | """ |
| 589 | # pylint: disable=no-self-use |
| 590 | # pylint: disable=unused-argument |
| 591 | return [] |
| 592 | |
| 593 | |
| 594 | def SignOutput(temp_zip_name, output_zip_name): |
| 595 | pw = OPTIONS.key_passwords[OPTIONS.package_key] |
| 596 | |
| 597 | SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw, |
| 598 | whole_file=True) |
Tianjie | a5fca03 | 2021-06-01 22:06:28 -0700 | [diff] [blame] | 599 | |
| 600 | |
| 601 | def ConstructOtaApexInfo(target_zip, source_file=None): |
| 602 | """If applicable, add the source version to the apex info.""" |
| 603 | |
| 604 | def _ReadApexInfo(input_zip): |
| 605 | if "META/apex_info.pb" not in input_zip.namelist(): |
| 606 | logger.warning("target_file doesn't contain apex_info.pb %s", input_zip) |
| 607 | return None |
| 608 | |
| 609 | with input_zip.open("META/apex_info.pb", "r") as zfp: |
| 610 | return zfp.read() |
| 611 | |
| 612 | target_apex_string = _ReadApexInfo(target_zip) |
| 613 | # Return early if the target apex info doesn't exist or is empty. |
| 614 | if not target_apex_string: |
| 615 | return target_apex_string |
| 616 | |
| 617 | # If the source apex info isn't available, just return the target info |
| 618 | if not source_file: |
| 619 | return target_apex_string |
| 620 | |
| 621 | with zipfile.ZipFile(source_file, "r", allowZip64=True) as source_zip: |
| 622 | source_apex_string = _ReadApexInfo(source_zip) |
| 623 | if not source_apex_string: |
| 624 | return target_apex_string |
| 625 | |
| 626 | source_apex_proto = ota_metadata_pb2.ApexMetadata() |
| 627 | source_apex_proto.ParseFromString(source_apex_string) |
| 628 | source_apex_versions = {apex.package_name: apex.version for apex in |
| 629 | source_apex_proto.apex_info} |
| 630 | |
| 631 | # If the apex package is available in the source build, initialize the source |
| 632 | # apex version. |
| 633 | target_apex_proto = ota_metadata_pb2.ApexMetadata() |
| 634 | target_apex_proto.ParseFromString(target_apex_string) |
| 635 | for target_apex in target_apex_proto.apex_info: |
| 636 | name = target_apex.package_name |
| 637 | if name in source_apex_versions: |
| 638 | target_apex.source_version = source_apex_versions[name] |
| 639 | |
| 640 | return target_apex_proto.SerializeToString() |