Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1 | # Copyright (C) 2008 The Android Open Source Project |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | # you may not use this file except in compliance with the License. |
| 5 | # You may obtain a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | # See the License for the specific language governing permissions and |
| 13 | # limitations under the License. |
| 14 | |
Tao Bao | 89fbb0f | 2017-01-10 10:47:58 -0800 | [diff] [blame] | 15 | from __future__ import print_function |
| 16 | |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 17 | import base64 |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 18 | import collections |
Doug Zongker | ea5d7a9 | 2010-09-12 15:26:16 -0700 | [diff] [blame] | 19 | import copy |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 20 | import datetime |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 21 | import errno |
Tao Bao | 0ff15de | 2019-03-20 11:26:06 -0700 | [diff] [blame] | 22 | import fnmatch |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 23 | import getopt |
| 24 | import getpass |
Narayan Kamath | a07bf04 | 2017-08-14 14:49:21 +0100 | [diff] [blame] | 25 | import gzip |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 26 | import imp |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 27 | import json |
| 28 | import logging |
| 29 | import logging.config |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 30 | import os |
Ying Wang | 7e6d4e4 | 2010-12-13 16:25:36 -0800 | [diff] [blame] | 31 | import platform |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 32 | import re |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 33 | import shlex |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 34 | import shutil |
| 35 | import subprocess |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 36 | import stat |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 37 | import sys |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 38 | import tempfile |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 39 | import threading |
| 40 | import time |
Doug Zongker | 048e7ca | 2009-06-15 14:31:53 -0700 | [diff] [blame] | 41 | import zipfile |
Kelvin Zhang | c68c6b9 | 2023-11-14 10:54:50 -0800 | [diff] [blame] | 42 | |
| 43 | from typing import Iterable, Callable |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 44 | from dataclasses import dataclass |
Tao Bao | 12d87fc | 2018-01-31 12:18:52 -0800 | [diff] [blame] | 45 | from hashlib import sha1, sha256 |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 46 | |
Tianjie Xu | 41976c7 | 2019-07-03 13:57:01 -0700 | [diff] [blame] | 47 | import images |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 48 | import sparse_img |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 49 | from blockimgdiff import BlockImageDiff |
Doug Zongker | ab7ca1d | 2014-08-26 10:40:28 -0700 | [diff] [blame] | 50 | |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 51 | logger = logging.getLogger(__name__) |
| 52 | |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 53 | |
Kelvin Zhang | c68c6b9 | 2023-11-14 10:54:50 -0800 | [diff] [blame] | 54 | @dataclass |
| 55 | class OptionHandler: |
| 56 | extra_long_opts: Iterable[str] |
| 57 | handler: Callable |
| 58 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 59 | class Options(object): |
Tao Bao | afd92a8 | 2019-10-10 22:44:22 -0700 | [diff] [blame] | 60 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 61 | def __init__(self): |
Tao Bao | afd92a8 | 2019-10-10 22:44:22 -0700 | [diff] [blame] | 62 | # Set up search path, in order to find framework/ and lib64/. At the time of |
| 63 | # running this function, user-supplied search path (`--path`) hasn't been |
| 64 | # available. So the value set here is the default, which might be overridden |
| 65 | # by commandline flag later. |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 66 | exec_path = os.path.realpath(sys.argv[0]) |
Tao Bao | afd92a8 | 2019-10-10 22:44:22 -0700 | [diff] [blame] | 67 | if exec_path.endswith('.py'): |
| 68 | script_name = os.path.basename(exec_path) |
| 69 | # logger hasn't been initialized yet at this point. Use print to output |
| 70 | # warnings. |
| 71 | print( |
| 72 | 'Warning: releasetools script should be invoked as hermetic Python ' |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 73 | 'executable -- build and run `{}` directly.'.format( |
| 74 | script_name[:-3]), |
Tao Bao | afd92a8 | 2019-10-10 22:44:22 -0700 | [diff] [blame] | 75 | file=sys.stderr) |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 76 | self.search_path = os.path.dirname(os.path.dirname(exec_path)) |
Pavel Salomatov | 3267655 | 2019-03-06 20:00:45 +0300 | [diff] [blame] | 77 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 78 | self.signapk_path = "framework/signapk.jar" # Relative to search_path |
Kelvin Zhang | 4fc3aa0 | 2021-11-16 18:58:58 -0800 | [diff] [blame] | 79 | if not os.path.exists(os.path.join(self.search_path, self.signapk_path)): |
| 80 | if "ANDROID_HOST_OUT" in os.environ: |
| 81 | self.search_path = os.environ["ANDROID_HOST_OUT"] |
Alex Klyubin | 9667b18 | 2015-12-10 13:38:50 -0800 | [diff] [blame] | 82 | self.signapk_shared_library_path = "lib64" # Relative to search_path |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 83 | self.extra_signapk_args = [] |
Martin Stjernholm | 58472e8 | 2022-01-07 22:08:47 +0000 | [diff] [blame] | 84 | self.aapt2_path = "aapt2" |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 85 | self.java_path = "java" # Use the one on the path by default. |
Sorin Basca | 0508583 | 2022-09-14 11:33:22 +0100 | [diff] [blame] | 86 | self.java_args = ["-Xmx4096m"] # The default JVM args. |
Tianjie Xu | 88a759d | 2020-01-23 10:47:54 -0800 | [diff] [blame] | 87 | self.android_jar_path = None |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 88 | self.public_key_suffix = ".x509.pem" |
| 89 | self.private_key_suffix = ".pk8" |
Dan Albert | cd9ecc0 | 2015-03-27 16:37:23 -0700 | [diff] [blame] | 90 | # use otatools built boot_signer by default |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 91 | self.verbose = False |
| 92 | self.tempfiles = [] |
| 93 | self.device_specific = None |
| 94 | self.extras = {} |
| 95 | self.info_dict = None |
Tao Bao | 6f0b219 | 2015-10-13 16:37:12 -0700 | [diff] [blame] | 96 | self.source_info_dict = None |
| 97 | self.target_info_dict = None |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 98 | self.worker_threads = None |
Tao Bao | 575d68a | 2015-08-07 19:49:45 -0700 | [diff] [blame] | 99 | # Stash size cannot exceed cache_size * threshold. |
| 100 | self.cache_size = None |
| 101 | self.stash_threshold = 0.8 |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 102 | self.logfile = None |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 103 | |
| 104 | |
| 105 | OPTIONS = Options() |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 106 | |
Tao Bao | 7119751 | 2018-10-11 14:08:45 -0700 | [diff] [blame] | 107 | # The block size that's used across the releasetools scripts. |
| 108 | BLOCK_SIZE = 4096 |
| 109 | |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 110 | # Values for "certificate" in apkcerts that mean special things. |
| 111 | SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") |
| 112 | |
Tao Bao | 5cc0abb | 2019-03-21 10:18:05 -0700 | [diff] [blame] | 113 | # The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note |
| 114 | # that system_other is not in the list because we don't want to include its |
Tianjie | bf0b8a8 | 2021-03-03 17:31:04 -0800 | [diff] [blame] | 115 | # descriptor into vbmeta.img. When adding a new entry here, the |
| 116 | # AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated |
| 117 | # accordingly. |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 118 | AVB_PARTITIONS = ('boot', 'init_boot', 'dtbo', 'odm', 'product', 'pvmfw', |
| 119 | 'recovery', 'system', 'system_ext', 'vendor', 'vendor_boot', |
| 120 | 'vendor_kernel_boot', 'vendor_dlkm', 'odm_dlkm', |
| 121 | 'system_dlkm') |
Tao Bao | 9dd909e | 2017-11-14 11:27:32 -0800 | [diff] [blame] | 122 | |
Tao Bao | 08c190f | 2019-06-03 23:07:58 -0700 | [diff] [blame] | 123 | # Chained VBMeta partitions. |
| 124 | AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor') |
| 125 | |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 126 | # avbtool arguments name |
| 127 | AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG = '--include_descriptors_from_image' |
| 128 | AVB_ARG_NAME_CHAIN_PARTITION = '--chain_partition' |
| 129 | |
Tianjie Xu | 861f413 | 2018-09-12 11:49:33 -0700 | [diff] [blame] | 130 | # Partitions that should have their care_map added to META/care_map.pb |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 131 | PARTITIONS_WITH_CARE_MAP = [ |
Yifan Hong | cfb917a | 2020-05-07 14:58:20 -0700 | [diff] [blame] | 132 | 'system', |
| 133 | 'vendor', |
| 134 | 'product', |
| 135 | 'system_ext', |
| 136 | 'odm', |
| 137 | 'vendor_dlkm', |
Yifan Hong | f496f1b | 2020-07-15 16:52:59 -0700 | [diff] [blame] | 138 | 'odm_dlkm', |
Ramji Jiyani | 13a4137 | 2022-01-27 07:05:08 +0000 | [diff] [blame] | 139 | 'system_dlkm', |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 140 | ] |
Tianjie Xu | 861f413 | 2018-09-12 11:49:33 -0700 | [diff] [blame] | 141 | |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 142 | # Partitions with a build.prop file |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 143 | PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot', 'init_boot'] |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 144 | |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 145 | # See sysprop.mk. If file is moved, add new search paths here; don't remove |
| 146 | # existing search paths. |
| 147 | RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop'] |
Tianjie Xu | 861f413 | 2018-09-12 11:49:33 -0700 | [diff] [blame] | 148 | |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 149 | |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 150 | @dataclass |
| 151 | class AvbChainedPartitionArg: |
| 152 | """The required arguments for avbtool --chain_partition.""" |
| 153 | partition: str |
| 154 | rollback_index_location: int |
| 155 | pubkey_path: str |
| 156 | |
| 157 | def to_string(self): |
| 158 | """Convert to string command arguments.""" |
| 159 | return '{}:{}:{}'.format( |
| 160 | self.partition, self.rollback_index_location, self.pubkey_path) |
| 161 | |
| 162 | |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 163 | class ErrorCode(object): |
| 164 | """Define error_codes for failures that happen during the actual |
| 165 | update package installation. |
| 166 | |
| 167 | Error codes 0-999 are reserved for failures before the package |
| 168 | installation (i.e. low battery, package verification failure). |
| 169 | Detailed code in 'bootable/recovery/error_code.h' """ |
| 170 | |
| 171 | SYSTEM_VERIFICATION_FAILURE = 1000 |
| 172 | SYSTEM_UPDATE_FAILURE = 1001 |
| 173 | SYSTEM_UNEXPECTED_CONTENTS = 1002 |
| 174 | SYSTEM_NONZERO_CONTENTS = 1003 |
| 175 | SYSTEM_RECOVER_FAILURE = 1004 |
| 176 | VENDOR_VERIFICATION_FAILURE = 2000 |
| 177 | VENDOR_UPDATE_FAILURE = 2001 |
| 178 | VENDOR_UNEXPECTED_CONTENTS = 2002 |
| 179 | VENDOR_NONZERO_CONTENTS = 2003 |
| 180 | VENDOR_RECOVER_FAILURE = 2004 |
| 181 | OEM_PROP_MISMATCH = 3000 |
| 182 | FINGERPRINT_MISMATCH = 3001 |
| 183 | THUMBPRINT_MISMATCH = 3002 |
| 184 | OLDER_BUILD = 3003 |
| 185 | DEVICE_MISMATCH = 3004 |
| 186 | BAD_PATCH_FILE = 3005 |
| 187 | INSUFFICIENT_CACHE_SPACE = 3006 |
| 188 | TUNE_PARTITION_FAILURE = 3007 |
| 189 | APPLY_PATCH_FAILURE = 3008 |
| 190 | |
| 191 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 192 | class ExternalError(RuntimeError): |
| 193 | pass |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 194 | |
| 195 | |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 196 | def InitLogging(): |
| 197 | DEFAULT_LOGGING_CONFIG = { |
| 198 | 'version': 1, |
| 199 | 'disable_existing_loggers': False, |
| 200 | 'formatters': { |
| 201 | 'standard': { |
| 202 | 'format': |
| 203 | '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s', |
| 204 | 'datefmt': '%Y-%m-%d %H:%M:%S', |
| 205 | }, |
| 206 | }, |
| 207 | 'handlers': { |
| 208 | 'default': { |
| 209 | 'class': 'logging.StreamHandler', |
| 210 | 'formatter': 'standard', |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 211 | 'level': 'WARNING', |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 212 | }, |
| 213 | }, |
| 214 | 'loggers': { |
| 215 | '': { |
| 216 | 'handlers': ['default'], |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 217 | 'propagate': True, |
Kelvin Zhang | 9d74128 | 2023-10-24 15:35:54 -0700 | [diff] [blame] | 218 | 'level': 'NOTSET', |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 219 | } |
| 220 | } |
| 221 | } |
| 222 | env_config = os.getenv('LOGGING_CONFIG') |
| 223 | if env_config: |
| 224 | with open(env_config) as f: |
| 225 | config = json.load(f) |
| 226 | else: |
| 227 | config = DEFAULT_LOGGING_CONFIG |
| 228 | |
| 229 | # Increase the logging level for verbose mode. |
| 230 | if OPTIONS.verbose: |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 231 | config = copy.deepcopy(config) |
| 232 | config['handlers']['default']['level'] = 'INFO' |
| 233 | |
| 234 | if OPTIONS.logfile: |
| 235 | config = copy.deepcopy(config) |
| 236 | config['handlers']['logfile'] = { |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 237 | 'class': 'logging.FileHandler', |
| 238 | 'formatter': 'standard', |
| 239 | 'level': 'INFO', |
| 240 | 'mode': 'w', |
| 241 | 'filename': OPTIONS.logfile, |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 242 | } |
| 243 | config['loggers']['']['handlers'].append('logfile') |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 244 | |
| 245 | logging.config.dictConfig(config) |
| 246 | |
| 247 | |
Jiyong Park | c8c94ac | 2020-11-20 03:03:57 +0900 | [diff] [blame] | 248 | def FindHostToolPath(tool_name): |
| 249 | """Finds the path to the host tool. |
| 250 | |
| 251 | Args: |
| 252 | tool_name: name of the tool to find |
| 253 | Returns: |
Cole Faust | 6833d7d | 2023-08-01 18:00:37 -0700 | [diff] [blame] | 254 | path to the tool if found under the same directory as this binary is located at. If not found, |
| 255 | tool_name is returned. |
Jiyong Park | c8c94ac | 2020-11-20 03:03:57 +0900 | [diff] [blame] | 256 | """ |
Jiyong Park | c8c94ac | 2020-11-20 03:03:57 +0900 | [diff] [blame] | 257 | my_dir = os.path.dirname(os.path.realpath(sys.argv[0])) |
| 258 | tool_path = os.path.join(my_dir, tool_name) |
| 259 | if os.path.exists(tool_path): |
| 260 | return tool_path |
| 261 | |
| 262 | return tool_name |
Yifan Hong | 8e332ff | 2020-07-29 17:51:55 -0700 | [diff] [blame] | 263 | |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 264 | |
Tao Bao | 3945158 | 2017-05-04 11:10:47 -0700 | [diff] [blame] | 265 | def Run(args, verbose=None, **kwargs): |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 266 | """Creates and returns a subprocess.Popen object. |
Tao Bao | 3945158 | 2017-05-04 11:10:47 -0700 | [diff] [blame] | 267 | |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 268 | Args: |
| 269 | args: The command represented as a list of strings. |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 270 | verbose: Whether the commands should be shown. Default to the global |
| 271 | verbosity if unspecified. |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 272 | kwargs: Any additional args to be passed to subprocess.Popen(), such as env, |
| 273 | stdin, etc. stdout and stderr will default to subprocess.PIPE and |
| 274 | subprocess.STDOUT respectively unless caller specifies any of them. |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 275 | universal_newlines will default to True, as most of the users in |
| 276 | releasetools expect string output. |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 277 | |
| 278 | Returns: |
| 279 | A subprocess.Popen object. |
Tao Bao | 3945158 | 2017-05-04 11:10:47 -0700 | [diff] [blame] | 280 | """ |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 281 | if 'stdout' not in kwargs and 'stderr' not in kwargs: |
| 282 | kwargs['stdout'] = subprocess.PIPE |
| 283 | kwargs['stderr'] = subprocess.STDOUT |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 284 | if 'universal_newlines' not in kwargs: |
| 285 | kwargs['universal_newlines'] = True |
Yifan Hong | 8e332ff | 2020-07-29 17:51:55 -0700 | [diff] [blame] | 286 | |
Jiyong Park | c8c94ac | 2020-11-20 03:03:57 +0900 | [diff] [blame] | 287 | if args: |
| 288 | # Make a copy of args in case client relies on the content of args later. |
Yifan Hong | 8e332ff | 2020-07-29 17:51:55 -0700 | [diff] [blame] | 289 | args = args[:] |
Jiyong Park | c8c94ac | 2020-11-20 03:03:57 +0900 | [diff] [blame] | 290 | args[0] = FindHostToolPath(args[0]) |
Yifan Hong | 8e332ff | 2020-07-29 17:51:55 -0700 | [diff] [blame] | 291 | |
Kelvin Zhang | 766eea7 | 2021-06-03 09:36:08 -0400 | [diff] [blame] | 292 | if verbose is None: |
| 293 | verbose = OPTIONS.verbose |
| 294 | |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 295 | # Don't log any if caller explicitly says so. |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 296 | if verbose: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 297 | logger.info(" Running: \"%s\"", " ".join(args)) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 298 | return subprocess.Popen(args, **kwargs) |
| 299 | |
| 300 | |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 301 | def RunAndCheckOutput(args, verbose=None, **kwargs): |
| 302 | """Runs the given command and returns the output. |
| 303 | |
| 304 | Args: |
| 305 | args: The command represented as a list of strings. |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 306 | verbose: Whether the commands should be shown. Default to the global |
| 307 | verbosity if unspecified. |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 308 | kwargs: Any additional args to be passed to subprocess.Popen(), such as env, |
| 309 | stdin, etc. stdout and stderr will default to subprocess.PIPE and |
| 310 | subprocess.STDOUT respectively unless caller specifies any of them. |
| 311 | |
| 312 | Returns: |
| 313 | The output string. |
| 314 | |
| 315 | Raises: |
| 316 | ExternalError: On non-zero exit from the command. |
| 317 | """ |
Kelvin Zhang | c8ff84b | 2023-02-15 16:52:46 -0800 | [diff] [blame] | 318 | if verbose is None: |
| 319 | verbose = OPTIONS.verbose |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 320 | proc = Run(args, verbose=verbose, **kwargs) |
| 321 | output, _ = proc.communicate() |
Regnier, Philippe | 2f7e11e | 2019-05-22 10:10:57 +0800 | [diff] [blame] | 322 | if output is None: |
| 323 | output = "" |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 324 | # Don't log any if caller explicitly says so. |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 325 | if verbose: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 326 | logger.info("%s", output.rstrip()) |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 327 | if proc.returncode != 0: |
| 328 | raise ExternalError( |
| 329 | "Failed to run command '{}' (exit code {}):\n{}".format( |
| 330 | args, proc.returncode, output)) |
| 331 | return output |
| 332 | |
| 333 | |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 334 | def RoundUpTo4K(value): |
| 335 | rounded_up = value + 4095 |
| 336 | return rounded_up - (rounded_up % 4096) |
| 337 | |
| 338 | |
Ying Wang | 7e6d4e4 | 2010-12-13 16:25:36 -0800 | [diff] [blame] | 339 | def CloseInheritedPipes(): |
| 340 | """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds |
| 341 | before doing other work.""" |
| 342 | if platform.system() != "Darwin": |
| 343 | return |
| 344 | for d in range(3, 1025): |
| 345 | try: |
| 346 | stat = os.fstat(d) |
| 347 | if stat is not None: |
| 348 | pipebit = stat[0] & 0x1000 |
| 349 | if pipebit != 0: |
| 350 | os.close(d) |
| 351 | except OSError: |
| 352 | pass |
| 353 | |
| 354 | |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 355 | class BuildInfo(object): |
| 356 | """A class that holds the information for a given build. |
| 357 | |
| 358 | This class wraps up the property querying for a given source or target build. |
| 359 | It abstracts away the logic of handling OEM-specific properties, and caches |
| 360 | the commonly used properties such as fingerprint. |
| 361 | |
| 362 | There are two types of info dicts: a) build-time info dict, which is generated |
| 363 | at build time (i.e. included in a target_files zip); b) OEM info dict that is |
| 364 | specified at package generation time (via command line argument |
| 365 | '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not |
| 366 | having "oem_fingerprint_properties" in build-time info dict), all the queries |
| 367 | would be answered based on build-time info dict only. Otherwise if using |
| 368 | OEM-specific properties, some of them will be calculated from two info dicts. |
| 369 | |
| 370 | Users can query properties similarly as using a dict() (e.g. info['fstab']), |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 371 | or to query build properties via GetBuildProp() or GetPartitionBuildProp(). |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 372 | |
| 373 | Attributes: |
| 374 | info_dict: The build-time info dict. |
| 375 | is_ab: Whether it's a build that uses A/B OTA. |
| 376 | oem_dicts: A list of OEM dicts. |
| 377 | oem_props: A list of OEM properties that should be read from OEM dicts; None |
| 378 | if the build doesn't use any OEM-specific property. |
| 379 | fingerprint: The fingerprint of the build, which would be calculated based |
| 380 | on OEM properties if applicable. |
| 381 | device: The device name, which could come from OEM dicts if applicable. |
| 382 | """ |
| 383 | |
| 384 | _RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device", |
| 385 | "ro.product.manufacturer", "ro.product.model", |
| 386 | "ro.product.name"] |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 387 | _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [ |
| 388 | "product", "odm", "vendor", "system_ext", "system"] |
| 389 | _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [ |
| 390 | "product", "product_services", "odm", "vendor", "system"] |
| 391 | _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = [] |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 392 | |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 393 | # The length of vbmeta digest to append to the fingerprint |
| 394 | _VBMETA_DIGEST_SIZE_USED = 8 |
| 395 | |
| 396 | def __init__(self, info_dict, oem_dicts=None, use_legacy_id=False): |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 397 | """Initializes a BuildInfo instance with the given dicts. |
| 398 | |
| 399 | Note that it only wraps up the given dicts, without making copies. |
| 400 | |
| 401 | Arguments: |
| 402 | info_dict: The build-time info dict. |
| 403 | oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note |
| 404 | that it always uses the first dict to calculate the fingerprint or the |
| 405 | device name. The rest would be used for asserting OEM properties only |
| 406 | (e.g. one package can be installed on one of these devices). |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 407 | use_legacy_id: Use the legacy build id to construct the fingerprint. This |
| 408 | is used when we need a BuildInfo class, while the vbmeta digest is |
| 409 | unavailable. |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 410 | |
| 411 | Raises: |
| 412 | ValueError: On invalid inputs. |
| 413 | """ |
| 414 | self.info_dict = info_dict |
| 415 | self.oem_dicts = oem_dicts |
| 416 | |
| 417 | self._is_ab = info_dict.get("ab_update") == "true" |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 418 | self.use_legacy_id = use_legacy_id |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 419 | |
Hongguang Chen | d7c160f | 2020-05-03 21:24:26 -0700 | [diff] [blame] | 420 | # Skip _oem_props if oem_dicts is None to use BuildInfo in |
| 421 | # sign_target_files_apks |
| 422 | if self.oem_dicts: |
| 423 | self._oem_props = info_dict.get("oem_fingerprint_properties") |
| 424 | else: |
| 425 | self._oem_props = None |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 426 | |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 427 | def check_fingerprint(fingerprint): |
| 428 | if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)): |
| 429 | raise ValueError( |
| 430 | 'Invalid build fingerprint: "{}". See the requirement in Android CDD ' |
| 431 | "3.2.2. Build Parameters.".format(fingerprint)) |
| 432 | |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 433 | self._partition_fingerprints = {} |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 434 | for partition in PARTITIONS_WITH_BUILD_PROP: |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 435 | try: |
| 436 | fingerprint = self.CalculatePartitionFingerprint(partition) |
| 437 | check_fingerprint(fingerprint) |
| 438 | self._partition_fingerprints[partition] = fingerprint |
| 439 | except ExternalError: |
| 440 | continue |
| 441 | if "system" in self._partition_fingerprints: |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 442 | # system_other is not included in PARTITIONS_WITH_BUILD_PROP, but does |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 443 | # need a fingerprint when creating the image. |
| 444 | self._partition_fingerprints[ |
| 445 | "system_other"] = self._partition_fingerprints["system"] |
| 446 | |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 447 | # These two should be computed only after setting self._oem_props. |
| 448 | self._device = self.GetOemProperty("ro.product.device") |
| 449 | self._fingerprint = self.CalculateFingerprint() |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 450 | check_fingerprint(self._fingerprint) |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 451 | |
| 452 | @property |
| 453 | def is_ab(self): |
| 454 | return self._is_ab |
| 455 | |
| 456 | @property |
| 457 | def device(self): |
| 458 | return self._device |
| 459 | |
| 460 | @property |
| 461 | def fingerprint(self): |
| 462 | return self._fingerprint |
| 463 | |
| 464 | @property |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 465 | def is_vabc(self): |
Kelvin Zhang | e634bde | 2023-04-28 23:59:43 -0700 | [diff] [blame] | 466 | return self.info_dict.get("virtual_ab_compression") == "true" |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 467 | |
| 468 | @property |
Kelvin Zhang | a9a87ec | 2022-05-04 16:44:52 -0700 | [diff] [blame] | 469 | def is_android_r(self): |
| 470 | system_prop = self.info_dict.get("system.build.prop") |
| 471 | return system_prop and system_prop.GetProp("ro.build.version.release") == "11" |
| 472 | |
| 473 | @property |
Kelvin Zhang | 2f9a9ae | 2023-09-27 09:33:52 -0700 | [diff] [blame] | 474 | def is_release_key(self): |
| 475 | system_prop = self.info_dict.get("build.prop") |
| 476 | return system_prop and system_prop.GetProp("ro.build.tags") == "release-key" |
| 477 | |
| 478 | @property |
Kelvin Zhang | 8f83000 | 2023-08-16 13:16:48 -0700 | [diff] [blame] | 479 | def vabc_compression_param(self): |
| 480 | return self.get("virtual_ab_compression_method", "") |
| 481 | |
| 482 | @property |
David Anderson | 1c59617 | 2023-04-14 16:01:55 -0700 | [diff] [blame] | 483 | def vendor_api_level(self): |
| 484 | vendor_prop = self.info_dict.get("vendor.build.prop") |
| 485 | if not vendor_prop: |
| 486 | return -1 |
| 487 | |
| 488 | props = [ |
| 489 | "ro.board.api_level", |
| 490 | "ro.board.first_api_level", |
| 491 | "ro.product.first_api_level", |
| 492 | ] |
| 493 | for prop in props: |
| 494 | value = vendor_prop.GetProp(prop) |
| 495 | try: |
Kelvin Zhang | e634bde | 2023-04-28 23:59:43 -0700 | [diff] [blame] | 496 | return int(value) |
David Anderson | 1c59617 | 2023-04-14 16:01:55 -0700 | [diff] [blame] | 497 | except: |
Kelvin Zhang | e634bde | 2023-04-28 23:59:43 -0700 | [diff] [blame] | 498 | pass |
David Anderson | 1c59617 | 2023-04-14 16:01:55 -0700 | [diff] [blame] | 499 | return -1 |
| 500 | |
| 501 | @property |
Kelvin Zhang | ad42738 | 2021-08-12 16:19:09 -0700 | [diff] [blame] | 502 | def is_vabc_xor(self): |
| 503 | vendor_prop = self.info_dict.get("vendor.build.prop") |
| 504 | vabc_xor_enabled = vendor_prop and \ |
| 505 | vendor_prop.GetProp("ro.virtual_ab.compression.xor.enabled") == "true" |
| 506 | return vabc_xor_enabled |
| 507 | |
| 508 | @property |
Kelvin Zhang | 10eac08 | 2021-06-10 14:32:19 -0400 | [diff] [blame] | 509 | def vendor_suppressed_vabc(self): |
| 510 | vendor_prop = self.info_dict.get("vendor.build.prop") |
| 511 | vabc_suppressed = vendor_prop and \ |
| 512 | vendor_prop.GetProp("ro.vendor.build.dont_use_vabc") |
| 513 | return vabc_suppressed and vabc_suppressed.lower() == "true" |
| 514 | |
| 515 | @property |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 516 | def oem_props(self): |
| 517 | return self._oem_props |
| 518 | |
| 519 | def __getitem__(self, key): |
| 520 | return self.info_dict[key] |
| 521 | |
| 522 | def __setitem__(self, key, value): |
| 523 | self.info_dict[key] = value |
| 524 | |
| 525 | def get(self, key, default=None): |
| 526 | return self.info_dict.get(key, default) |
| 527 | |
| 528 | def items(self): |
| 529 | return self.info_dict.items() |
| 530 | |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 531 | def _GetRawBuildProp(self, prop, partition): |
| 532 | prop_file = '{}.build.prop'.format( |
| 533 | partition) if partition else 'build.prop' |
| 534 | partition_props = self.info_dict.get(prop_file) |
| 535 | if not partition_props: |
| 536 | return None |
| 537 | return partition_props.GetProp(prop) |
| 538 | |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 539 | def GetPartitionBuildProp(self, prop, partition): |
| 540 | """Returns the inquired build property for the provided partition.""" |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 541 | |
Kelvin Zhang | 8250d2c | 2022-03-23 19:46:09 +0000 | [diff] [blame] | 542 | # Boot image and init_boot image uses ro.[product.]bootimage instead of boot. |
Devin Moore | b5195ff | 2022-02-11 18:44:26 +0000 | [diff] [blame] | 543 | # This comes from the generic ramdisk |
Kelvin Zhang | 8250d2c | 2022-03-23 19:46:09 +0000 | [diff] [blame] | 544 | prop_partition = "bootimage" if partition == "boot" or partition == "init_boot" else partition |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 545 | |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 546 | # If provided a partition for this property, only look within that |
| 547 | # partition's build.prop. |
| 548 | if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS: |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 549 | prop = prop.replace("ro.product", "ro.product.{}".format(prop_partition)) |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 550 | else: |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 551 | prop = prop.replace("ro.", "ro.{}.".format(prop_partition)) |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 552 | |
| 553 | prop_val = self._GetRawBuildProp(prop, partition) |
| 554 | if prop_val is not None: |
| 555 | return prop_val |
| 556 | raise ExternalError("couldn't find %s in %s.build.prop" % |
| 557 | (prop, partition)) |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 558 | |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 559 | def GetBuildProp(self, prop): |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 560 | """Returns the inquired build property from the standard build.prop file.""" |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 561 | if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS: |
| 562 | return self._ResolveRoProductBuildProp(prop) |
| 563 | |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 564 | if prop == "ro.build.id": |
| 565 | return self._GetBuildId() |
| 566 | |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 567 | prop_val = self._GetRawBuildProp(prop, None) |
| 568 | if prop_val is not None: |
| 569 | return prop_val |
| 570 | |
| 571 | raise ExternalError("couldn't find %s in build.prop" % (prop,)) |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 572 | |
| 573 | def _ResolveRoProductBuildProp(self, prop): |
| 574 | """Resolves the inquired ro.product.* build property""" |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 575 | prop_val = self._GetRawBuildProp(prop, None) |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 576 | if prop_val: |
| 577 | return prop_val |
| 578 | |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 579 | default_source_order = self._GetRoProductPropsDefaultSourceOrder() |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 580 | source_order_val = self._GetRawBuildProp( |
| 581 | "ro.product.property_source_order", None) |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 582 | if source_order_val: |
| 583 | source_order = source_order_val.split(",") |
| 584 | else: |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 585 | source_order = default_source_order |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 586 | |
| 587 | # Check that all sources in ro.product.property_source_order are valid |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 588 | if any([x not in default_source_order for x in source_order]): |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 589 | raise ExternalError( |
| 590 | "Invalid ro.product.property_source_order '{}'".format(source_order)) |
| 591 | |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 592 | for source_partition in source_order: |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 593 | source_prop = prop.replace( |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 594 | "ro.product", "ro.product.{}".format(source_partition), 1) |
| 595 | prop_val = self._GetRawBuildProp(source_prop, source_partition) |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 596 | if prop_val: |
| 597 | return prop_val |
| 598 | |
| 599 | raise ExternalError("couldn't resolve {}".format(prop)) |
| 600 | |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 601 | def _GetRoProductPropsDefaultSourceOrder(self): |
| 602 | # NOTE: refer to CDDs and android.os.Build.VERSION for the definition and |
| 603 | # values of these properties for each Android release. |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 604 | android_codename = self._GetRawBuildProp("ro.build.version.codename", None) |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 605 | if android_codename == "REL": |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 606 | android_version = self._GetRawBuildProp("ro.build.version.release", None) |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 607 | if android_version == "10": |
| 608 | return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 |
| 609 | # NOTE: float() conversion of android_version will have rounding error. |
| 610 | # We are checking for "9" or less, and using "< 10" is well outside of |
| 611 | # possible floating point rounding. |
| 612 | try: |
| 613 | android_version_val = float(android_version) |
| 614 | except ValueError: |
| 615 | android_version_val = 0 |
| 616 | if android_version_val < 10: |
| 617 | return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY |
| 618 | return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT |
| 619 | |
Tianjie | b37c5be | 2020-10-15 21:27:10 -0700 | [diff] [blame] | 620 | def _GetPlatformVersion(self): |
| 621 | version_sdk = self.GetBuildProp("ro.build.version.sdk") |
| 622 | # init code switches to version_release_or_codename (see b/158483506). After |
| 623 | # API finalization, release_or_codename will be the same as release. This |
| 624 | # is the best effort to support pre-S dev stage builds. |
| 625 | if int(version_sdk) >= 30: |
| 626 | try: |
| 627 | return self.GetBuildProp("ro.build.version.release_or_codename") |
| 628 | except ExternalError: |
| 629 | logger.warning('Failed to find ro.build.version.release_or_codename') |
| 630 | |
| 631 | return self.GetBuildProp("ro.build.version.release") |
| 632 | |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 633 | def _GetBuildId(self): |
| 634 | build_id = self._GetRawBuildProp("ro.build.id", None) |
| 635 | if build_id: |
| 636 | return build_id |
| 637 | |
| 638 | legacy_build_id = self.GetBuildProp("ro.build.legacy.id") |
| 639 | if not legacy_build_id: |
| 640 | raise ExternalError("Couldn't find build id in property file") |
| 641 | |
| 642 | if self.use_legacy_id: |
| 643 | return legacy_build_id |
| 644 | |
| 645 | # Append the top 8 chars of vbmeta digest to the existing build id. The |
| 646 | # logic needs to match the one in init, so that OTA can deliver correctly. |
| 647 | avb_enable = self.info_dict.get("avb_enable") == "true" |
| 648 | if not avb_enable: |
| 649 | raise ExternalError("AVB isn't enabled when using legacy build id") |
| 650 | |
| 651 | vbmeta_digest = self.info_dict.get("vbmeta_digest") |
| 652 | if not vbmeta_digest: |
| 653 | raise ExternalError("Vbmeta digest isn't provided when using legacy build" |
| 654 | " id") |
| 655 | if len(vbmeta_digest) < self._VBMETA_DIGEST_SIZE_USED: |
| 656 | raise ExternalError("Invalid vbmeta digest " + vbmeta_digest) |
| 657 | |
| 658 | digest_prefix = vbmeta_digest[:self._VBMETA_DIGEST_SIZE_USED] |
| 659 | return legacy_build_id + '.' + digest_prefix |
| 660 | |
Tianjie | b37c5be | 2020-10-15 21:27:10 -0700 | [diff] [blame] | 661 | def _GetPartitionPlatformVersion(self, partition): |
| 662 | try: |
| 663 | return self.GetPartitionBuildProp("ro.build.version.release_or_codename", |
| 664 | partition) |
| 665 | except ExternalError: |
| 666 | return self.GetPartitionBuildProp("ro.build.version.release", |
| 667 | partition) |
| 668 | |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 669 | def GetOemProperty(self, key): |
| 670 | if self.oem_props is not None and key in self.oem_props: |
| 671 | return self.oem_dicts[0][key] |
| 672 | return self.GetBuildProp(key) |
| 673 | |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 674 | def GetPartitionFingerprint(self, partition): |
| 675 | return self._partition_fingerprints.get(partition, None) |
| 676 | |
| 677 | def CalculatePartitionFingerprint(self, partition): |
| 678 | try: |
| 679 | return self.GetPartitionBuildProp("ro.build.fingerprint", partition) |
| 680 | except ExternalError: |
| 681 | return "{}/{}/{}:{}/{}/{}:{}/{}".format( |
| 682 | self.GetPartitionBuildProp("ro.product.brand", partition), |
| 683 | self.GetPartitionBuildProp("ro.product.name", partition), |
| 684 | self.GetPartitionBuildProp("ro.product.device", partition), |
Tianjie | b37c5be | 2020-10-15 21:27:10 -0700 | [diff] [blame] | 685 | self._GetPartitionPlatformVersion(partition), |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 686 | self.GetPartitionBuildProp("ro.build.id", partition), |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 687 | self.GetPartitionBuildProp( |
| 688 | "ro.build.version.incremental", partition), |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 689 | self.GetPartitionBuildProp("ro.build.type", partition), |
| 690 | self.GetPartitionBuildProp("ro.build.tags", partition)) |
| 691 | |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 692 | def CalculateFingerprint(self): |
| 693 | if self.oem_props is None: |
| 694 | try: |
| 695 | return self.GetBuildProp("ro.build.fingerprint") |
| 696 | except ExternalError: |
| 697 | return "{}/{}/{}:{}/{}/{}:{}/{}".format( |
| 698 | self.GetBuildProp("ro.product.brand"), |
| 699 | self.GetBuildProp("ro.product.name"), |
| 700 | self.GetBuildProp("ro.product.device"), |
Tianjie | b37c5be | 2020-10-15 21:27:10 -0700 | [diff] [blame] | 701 | self._GetPlatformVersion(), |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 702 | self.GetBuildProp("ro.build.id"), |
| 703 | self.GetBuildProp("ro.build.version.incremental"), |
| 704 | self.GetBuildProp("ro.build.type"), |
| 705 | self.GetBuildProp("ro.build.tags")) |
| 706 | return "%s/%s/%s:%s" % ( |
| 707 | self.GetOemProperty("ro.product.brand"), |
| 708 | self.GetOemProperty("ro.product.name"), |
| 709 | self.GetOemProperty("ro.product.device"), |
| 710 | self.GetBuildProp("ro.build.thumbprint")) |
| 711 | |
| 712 | def WriteMountOemScript(self, script): |
| 713 | assert self.oem_props is not None |
| 714 | recovery_mount_options = self.info_dict.get("recovery_mount_options") |
| 715 | script.Mount("/oem", recovery_mount_options) |
| 716 | |
| 717 | def WriteDeviceAssertions(self, script, oem_no_mount): |
| 718 | # Read the property directly if not using OEM properties. |
| 719 | if not self.oem_props: |
| 720 | script.AssertDevice(self.device) |
| 721 | return |
| 722 | |
| 723 | # Otherwise assert OEM properties. |
| 724 | if not self.oem_dicts: |
| 725 | raise ExternalError( |
| 726 | "No OEM file provided to answer expected assertions") |
| 727 | |
| 728 | for prop in self.oem_props.split(): |
| 729 | values = [] |
| 730 | for oem_dict in self.oem_dicts: |
| 731 | if prop in oem_dict: |
| 732 | values.append(oem_dict[prop]) |
| 733 | if not values: |
| 734 | raise ExternalError( |
| 735 | "The OEM file is missing the property %s" % (prop,)) |
| 736 | script.AssertOemProperty(prop, values, oem_no_mount) |
| 737 | |
| 738 | |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 739 | def DoesInputFileContain(input_file, fn): |
| 740 | """Check whether the input target_files.zip contain an entry `fn`""" |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 741 | if isinstance(input_file, zipfile.ZipFile): |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 742 | return fn in input_file.namelist() |
Kelvin Zhang | 5ef2519 | 2022-10-19 11:25:22 -0700 | [diff] [blame] | 743 | elif zipfile.is_zipfile(input_file): |
| 744 | with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp: |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 745 | return fn in zfp.namelist() |
| 746 | else: |
| 747 | if not os.path.isdir(input_file): |
| 748 | raise ValueError( |
| 749 | "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file) |
| 750 | path = os.path.join(input_file, *fn.split("/")) |
| 751 | return os.path.exists(path) |
| 752 | |
| 753 | |
| 754 | def ReadBytesFromInputFile(input_file, fn): |
| 755 | """Reads the bytes of fn from input zipfile or directory.""" |
| 756 | if isinstance(input_file, zipfile.ZipFile): |
| 757 | return input_file.read(fn) |
| 758 | elif zipfile.is_zipfile(input_file): |
| 759 | with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp: |
| 760 | return zfp.read(fn) |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 761 | else: |
Kelvin Zhang | 5ef2519 | 2022-10-19 11:25:22 -0700 | [diff] [blame] | 762 | if not os.path.isdir(input_file): |
| 763 | raise ValueError( |
| 764 | "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file) |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 765 | path = os.path.join(input_file, *fn.split("/")) |
| 766 | try: |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 767 | with open(path, "rb") as f: |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 768 | return f.read() |
| 769 | except IOError as e: |
| 770 | if e.errno == errno.ENOENT: |
| 771 | raise KeyError(fn) |
| 772 | |
| 773 | |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 774 | def ReadFromInputFile(input_file, fn): |
| 775 | """Reads the str contents of fn from input zipfile or directory.""" |
| 776 | return ReadBytesFromInputFile(input_file, fn).decode() |
| 777 | |
| 778 | |
Kelvin Zhang | 6b10e15 | 2023-05-02 15:48:16 -0700 | [diff] [blame] | 779 | def WriteBytesToInputFile(input_file, fn, data): |
| 780 | """Write bytes |data| contents to fn of input zipfile or directory.""" |
| 781 | if isinstance(input_file, zipfile.ZipFile): |
| 782 | with input_file.open(fn, "w") as entry_fp: |
| 783 | return entry_fp.write(data) |
| 784 | elif zipfile.is_zipfile(input_file): |
| 785 | with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp: |
| 786 | with zfp.open(fn, "w") as entry_fp: |
| 787 | return entry_fp.write(data) |
| 788 | else: |
| 789 | if not os.path.isdir(input_file): |
| 790 | raise ValueError( |
| 791 | "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file) |
| 792 | path = os.path.join(input_file, *fn.split("/")) |
| 793 | try: |
| 794 | with open(path, "wb") as f: |
| 795 | return f.write(data) |
| 796 | except IOError as e: |
| 797 | if e.errno == errno.ENOENT: |
| 798 | raise KeyError(fn) |
| 799 | |
| 800 | |
| 801 | def WriteToInputFile(input_file, fn, str: str): |
| 802 | """Write str content to fn of input file or directory""" |
| 803 | return WriteBytesToInputFile(input_file, fn, str.encode()) |
| 804 | |
| 805 | |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 806 | def ExtractFromInputFile(input_file, fn): |
| 807 | """Extracts the contents of fn from input zipfile or directory into a file.""" |
| 808 | if isinstance(input_file, zipfile.ZipFile): |
| 809 | tmp_file = MakeTempFile(os.path.basename(fn)) |
Kelvin Zhang | 645dcb8 | 2021-02-09 17:52:50 -0500 | [diff] [blame] | 810 | with open(tmp_file, 'wb') as f: |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 811 | f.write(input_file.read(fn)) |
| 812 | return tmp_file |
Kelvin Zhang | eb147e0 | 2022-10-21 10:53:21 -0700 | [diff] [blame] | 813 | elif zipfile.is_zipfile(input_file): |
| 814 | with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp: |
| 815 | tmp_file = MakeTempFile(os.path.basename(fn)) |
| 816 | with open(tmp_file, "wb") as fp: |
| 817 | fp.write(zfp.read(fn)) |
| 818 | return tmp_file |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 819 | else: |
Kelvin Zhang | eb147e0 | 2022-10-21 10:53:21 -0700 | [diff] [blame] | 820 | if not os.path.isdir(input_file): |
| 821 | raise ValueError( |
| 822 | "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 823 | file = os.path.join(input_file, *fn.split("/")) |
| 824 | if not os.path.exists(file): |
| 825 | raise KeyError(fn) |
| 826 | return file |
| 827 | |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 828 | |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 829 | class RamdiskFormat(object): |
| 830 | LZ4 = 1 |
| 831 | GZ = 2 |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 832 | |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 833 | |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 834 | def GetRamdiskFormat(info_dict): |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 835 | if info_dict.get('lz4_ramdisks') == 'true': |
| 836 | ramdisk_format = RamdiskFormat.LZ4 |
| 837 | else: |
| 838 | ramdisk_format = RamdiskFormat.GZ |
| 839 | return ramdisk_format |
| 840 | |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 841 | |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 842 | def LoadInfoDict(input_file, repacking=False): |
| 843 | """Loads the key/value pairs from the given input target_files. |
| 844 | |
Tianjie | a85bdf0 | 2020-07-29 11:56:19 -0700 | [diff] [blame] | 845 | It reads `META/misc_info.txt` file in the target_files input, does validation |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 846 | checks and returns the parsed key/value pairs for to the given build. It's |
| 847 | usually called early when working on input target_files files, e.g. when |
| 848 | generating OTAs, or signing builds. Note that the function may be called |
| 849 | against an old target_files file (i.e. from past dessert releases). So the |
| 850 | property parsing needs to be backward compatible. |
| 851 | |
| 852 | In a `META/misc_info.txt`, a few properties are stored as links to the files |
| 853 | in the PRODUCT_OUT directory. It works fine with the build system. However, |
| 854 | they are no longer available when (re)generating images from target_files zip. |
| 855 | When `repacking` is True, redirect these properties to the actual files in the |
| 856 | unzipped directory. |
| 857 | |
| 858 | Args: |
| 859 | input_file: The input target_files file, which could be an open |
| 860 | zipfile.ZipFile instance, or a str for the dir that contains the files |
| 861 | unzipped from a target_files file. |
| 862 | repacking: Whether it's trying repack an target_files file after loading the |
| 863 | info dict (default: False). If so, it will rewrite a few loaded |
| 864 | properties (e.g. selinux_fc, root_dir) to point to the actual files in |
| 865 | target_files file. When doing repacking, `input_file` must be a dir. |
| 866 | |
| 867 | Returns: |
| 868 | A dict that contains the parsed key/value pairs. |
| 869 | |
| 870 | Raises: |
| 871 | AssertionError: On invalid input arguments. |
| 872 | ValueError: On malformed input values. |
| 873 | """ |
| 874 | if repacking: |
| 875 | assert isinstance(input_file, str), \ |
| 876 | "input_file must be a path str when doing repacking" |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 877 | |
Doug Zongker | c925382 | 2014-02-04 12:17:58 -0800 | [diff] [blame] | 878 | def read_helper(fn): |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 879 | return ReadFromInputFile(input_file, fn) |
Tao Bao | 6cd5473 | 2017-02-27 15:12:05 -0800 | [diff] [blame] | 880 | |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 881 | try: |
Michael Runge | 6e83611 | 2014-04-15 17:40:21 -0700 | [diff] [blame] | 882 | d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n")) |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 883 | except KeyError: |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 884 | raise ValueError("Failed to find META/misc_info.txt in input target-files") |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 885 | |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 886 | if "recovery_api_version" not in d: |
| 887 | raise ValueError("Failed to find 'recovery_api_version'") |
| 888 | if "fstab_version" not in d: |
| 889 | raise ValueError("Failed to find 'fstab_version'") |
Ken Sumrall | 3b07cf1 | 2013-02-19 17:35:29 -0800 | [diff] [blame] | 890 | |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 891 | if repacking: |
Daniel Norman | 72c626f | 2019-05-13 15:58:14 -0700 | [diff] [blame] | 892 | # "selinux_fc" properties should point to the file_contexts files |
| 893 | # (file_contexts.bin) under META/. |
| 894 | for key in d: |
| 895 | if key.endswith("selinux_fc"): |
| 896 | fc_basename = os.path.basename(d[key]) |
| 897 | fc_config = os.path.join(input_file, "META", fc_basename) |
| 898 | assert os.path.exists(fc_config) |
Tao Bao | 2c15d9e | 2015-07-09 11:51:16 -0700 | [diff] [blame] | 899 | |
Daniel Norman | 72c626f | 2019-05-13 15:58:14 -0700 | [diff] [blame] | 900 | d[key] = fc_config |
Tao Bao | 2c15d9e | 2015-07-09 11:51:16 -0700 | [diff] [blame] | 901 | |
Tom Cherry | d14b895 | 2018-08-09 14:26:00 -0700 | [diff] [blame] | 902 | # Similarly we need to redirect "root_dir", and "root_fs_config". |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 903 | d["root_dir"] = os.path.join(input_file, "ROOT") |
Tom Cherry | d14b895 | 2018-08-09 14:26:00 -0700 | [diff] [blame] | 904 | d["root_fs_config"] = os.path.join( |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 905 | input_file, "META", "root_filesystem_config.txt") |
Tao Bao | 84e7568 | 2015-07-19 02:38:53 -0700 | [diff] [blame] | 906 | |
David Anderson | 0ec64ac | 2019-12-06 12:21:18 -0800 | [diff] [blame] | 907 | # Redirect {partition}_base_fs_file for each of the named partitions. |
Yifan Hong | cfb917a | 2020-05-07 14:58:20 -0700 | [diff] [blame] | 908 | for part_name in ["system", "vendor", "system_ext", "product", "odm", |
Ramji Jiyani | 13a4137 | 2022-01-27 07:05:08 +0000 | [diff] [blame] | 909 | "vendor_dlkm", "odm_dlkm", "system_dlkm"]: |
David Anderson | 0ec64ac | 2019-12-06 12:21:18 -0800 | [diff] [blame] | 910 | key_name = part_name + "_base_fs_file" |
| 911 | if key_name not in d: |
| 912 | continue |
| 913 | basename = os.path.basename(d[key_name]) |
| 914 | base_fs_file = os.path.join(input_file, "META", basename) |
| 915 | if os.path.exists(base_fs_file): |
| 916 | d[key_name] = base_fs_file |
Tao Bao | b079b50 | 2016-05-03 08:01:19 -0700 | [diff] [blame] | 917 | else: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 918 | logger.warning( |
David Anderson | 0ec64ac | 2019-12-06 12:21:18 -0800 | [diff] [blame] | 919 | "Failed to find %s base fs file: %s", part_name, base_fs_file) |
| 920 | del d[key_name] |
Tao Bao | f54216f | 2016-03-29 15:12:37 -0700 | [diff] [blame] | 921 | |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 922 | def makeint(key): |
| 923 | if key in d: |
| 924 | d[key] = int(d[key], 0) |
| 925 | |
| 926 | makeint("recovery_api_version") |
| 927 | makeint("blocksize") |
| 928 | makeint("system_size") |
Daniel Rosenberg | f4eabc3 | 2014-07-10 15:42:38 -0700 | [diff] [blame] | 929 | makeint("vendor_size") |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 930 | makeint("userdata_size") |
Ying Wang | 9f8e8db | 2011-11-04 11:37:01 -0700 | [diff] [blame] | 931 | makeint("cache_size") |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 932 | makeint("recovery_size") |
Ken Sumrall | 3b07cf1 | 2013-02-19 17:35:29 -0800 | [diff] [blame] | 933 | makeint("fstab_version") |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 934 | |
Steve Muckle | 903a1ca | 2020-05-07 17:32:10 -0700 | [diff] [blame] | 935 | boot_images = "boot.img" |
| 936 | if "boot_images" in d: |
| 937 | boot_images = d["boot_images"] |
| 938 | for b in boot_images.split(): |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 939 | makeint(b.replace(".img", "_size")) |
Steve Muckle | 903a1ca | 2020-05-07 17:32:10 -0700 | [diff] [blame] | 940 | |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 941 | # Load recovery fstab if applicable. |
| 942 | d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper) |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 943 | ramdisk_format = GetRamdiskFormat(d) |
Tianjie Xu | cfa8622 | 2016-03-07 16:31:19 -0800 | [diff] [blame] | 944 | |
Tianjie Xu | 861f413 | 2018-09-12 11:49:33 -0700 | [diff] [blame] | 945 | # Tries to load the build props for all partitions with care_map, including |
| 946 | # system and vendor. |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 947 | for partition in PARTITIONS_WITH_BUILD_PROP: |
Bowgo Tsai | 71a4d5c | 2019-05-17 23:21:48 +0800 | [diff] [blame] | 948 | partition_prop = "{}.build.prop".format(partition) |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 949 | d[partition_prop] = PartitionBuildProps.FromInputFile( |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 950 | input_file, partition, ramdisk_format=ramdisk_format) |
Tianjie Xu | 861f413 | 2018-09-12 11:49:33 -0700 | [diff] [blame] | 951 | d["build.prop"] = d["system.build.prop"] |
Tao Bao | 12d87fc | 2018-01-31 12:18:52 -0800 | [diff] [blame] | 952 | |
Tao Bao | 12d87fc | 2018-01-31 12:18:52 -0800 | [diff] [blame] | 953 | if d.get("avb_enable") == "true": |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 954 | # Set the vbmeta digest if exists |
| 955 | try: |
| 956 | d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip() |
| 957 | except KeyError: |
| 958 | pass |
| 959 | |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 960 | try: |
| 961 | d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n") |
| 962 | except KeyError: |
| 963 | logger.warning("Can't find META/ab_partitions.txt") |
Doug Zongker | 1eb74dd | 2012-08-16 16:19:00 -0700 | [diff] [blame] | 964 | return d |
| 965 | |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 966 | |
Daniel Norman | 4cc9df6 | 2019-07-18 10:11:07 -0700 | [diff] [blame] | 967 | def LoadListFromFile(file_path): |
Kiyoung Kim | ebe7c9c | 2019-06-25 17:09:55 +0900 | [diff] [blame] | 968 | with open(file_path) as f: |
Daniel Norman | 4cc9df6 | 2019-07-18 10:11:07 -0700 | [diff] [blame] | 969 | return f.read().splitlines() |
Kiyoung Kim | ebe7c9c | 2019-06-25 17:09:55 +0900 | [diff] [blame] | 970 | |
Daniel Norman | 4cc9df6 | 2019-07-18 10:11:07 -0700 | [diff] [blame] | 971 | |
| 972 | def LoadDictionaryFromFile(file_path): |
| 973 | lines = LoadListFromFile(file_path) |
Kiyoung Kim | ebe7c9c | 2019-06-25 17:09:55 +0900 | [diff] [blame] | 974 | return LoadDictionaryFromLines(lines) |
| 975 | |
| 976 | |
Michael Runge | 6e83611 | 2014-04-15 17:40:21 -0700 | [diff] [blame] | 977 | def LoadDictionaryFromLines(lines): |
Doug Zongker | 1eb74dd | 2012-08-16 16:19:00 -0700 | [diff] [blame] | 978 | d = {} |
Michael Runge | 6e83611 | 2014-04-15 17:40:21 -0700 | [diff] [blame] | 979 | for line in lines: |
Doug Zongker | 1eb74dd | 2012-08-16 16:19:00 -0700 | [diff] [blame] | 980 | line = line.strip() |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 981 | if not line or line.startswith("#"): |
| 982 | continue |
Ying Wang | 114b46f | 2014-04-15 11:24:00 -0700 | [diff] [blame] | 983 | if "=" in line: |
| 984 | name, value = line.split("=", 1) |
| 985 | d[name] = value |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 986 | return d |
| 987 | |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 988 | |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 989 | class PartitionBuildProps(object): |
| 990 | """The class holds the build prop of a particular partition. |
| 991 | |
| 992 | This class loads the build.prop and holds the build properties for a given |
| 993 | partition. It also partially recognizes the 'import' statement in the |
| 994 | build.prop; and calculates alternative values of some specific build |
| 995 | properties during runtime. |
| 996 | |
| 997 | Attributes: |
| 998 | input_file: a zipped target-file or an unzipped target-file directory. |
| 999 | partition: name of the partition. |
| 1000 | props_allow_override: a list of build properties to search for the |
| 1001 | alternative values during runtime. |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1002 | build_props: a dict of build properties for the given partition. |
| 1003 | prop_overrides: a set of props that are overridden by import. |
| 1004 | placeholder_values: A dict of runtime variables' values to replace the |
| 1005 | placeholders in the build.prop file. We expect exactly one value for |
| 1006 | each of the variables. |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 1007 | ramdisk_format: If name is "boot", the format of ramdisk inside the |
| 1008 | boot image. Otherwise, its value is ignored. |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 1009 | Use lz4 to decompress by default. If its value is gzip, use gzip. |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1010 | """ |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 1011 | |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1012 | def __init__(self, input_file, name, placeholder_values=None): |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1013 | self.input_file = input_file |
| 1014 | self.partition = name |
| 1015 | self.props_allow_override = [props.format(name) for props in [ |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1016 | 'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']] |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1017 | self.build_props = {} |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1018 | self.prop_overrides = set() |
| 1019 | self.placeholder_values = {} |
| 1020 | if placeholder_values: |
| 1021 | self.placeholder_values = copy.deepcopy(placeholder_values) |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1022 | |
| 1023 | @staticmethod |
| 1024 | def FromDictionary(name, build_props): |
| 1025 | """Constructs an instance from a build prop dictionary.""" |
| 1026 | |
| 1027 | props = PartitionBuildProps("unknown", name) |
| 1028 | props.build_props = build_props.copy() |
| 1029 | return props |
| 1030 | |
| 1031 | @staticmethod |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 1032 | def FromInputFile(input_file, name, placeholder_values=None, ramdisk_format=RamdiskFormat.LZ4): |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1033 | """Loads the build.prop file and builds the attributes.""" |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1034 | |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1035 | if name in ("boot", "init_boot"): |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 1036 | data = PartitionBuildProps._ReadBootPropFile( |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1037 | input_file, name, ramdisk_format=ramdisk_format) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1038 | else: |
| 1039 | data = PartitionBuildProps._ReadPartitionPropFile(input_file, name) |
| 1040 | |
| 1041 | props = PartitionBuildProps(input_file, name, placeholder_values) |
| 1042 | props._LoadBuildProp(data) |
| 1043 | return props |
| 1044 | |
| 1045 | @staticmethod |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1046 | def _ReadBootPropFile(input_file, partition_name, ramdisk_format): |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1047 | """ |
| 1048 | Read build.prop for boot image from input_file. |
| 1049 | Return empty string if not found. |
| 1050 | """ |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1051 | image_path = 'IMAGES/' + partition_name + '.img' |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1052 | try: |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1053 | boot_img = ExtractFromInputFile(input_file, image_path) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1054 | except KeyError: |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1055 | logger.warning('Failed to read %s', image_path) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1056 | return '' |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 1057 | prop_file = GetBootImageBuildProp(boot_img, ramdisk_format=ramdisk_format) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1058 | if prop_file is None: |
| 1059 | return '' |
Kelvin Zhang | 645dcb8 | 2021-02-09 17:52:50 -0500 | [diff] [blame] | 1060 | with open(prop_file, "r") as f: |
| 1061 | return f.read() |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1062 | |
| 1063 | @staticmethod |
| 1064 | def _ReadPartitionPropFile(input_file, name): |
| 1065 | """ |
| 1066 | Read build.prop for name from input_file. |
| 1067 | Return empty string if not found. |
| 1068 | """ |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1069 | data = '' |
| 1070 | for prop_file in ['{}/etc/build.prop'.format(name.upper()), |
| 1071 | '{}/build.prop'.format(name.upper())]: |
| 1072 | try: |
| 1073 | data = ReadFromInputFile(input_file, prop_file) |
| 1074 | break |
| 1075 | except KeyError: |
| 1076 | logger.warning('Failed to read %s', prop_file) |
Kelvin Zhang | 4fc3aa0 | 2021-11-16 18:58:58 -0800 | [diff] [blame] | 1077 | if data == '': |
| 1078 | logger.warning("Failed to read build.prop for partition {}".format(name)) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1079 | return data |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1080 | |
Yifan Hong | 125d0b6 | 2020-09-24 17:07:03 -0700 | [diff] [blame] | 1081 | @staticmethod |
| 1082 | def FromBuildPropFile(name, build_prop_file): |
| 1083 | """Constructs an instance from a build prop file.""" |
| 1084 | |
| 1085 | props = PartitionBuildProps("unknown", name) |
| 1086 | with open(build_prop_file) as f: |
| 1087 | props._LoadBuildProp(f.read()) |
| 1088 | return props |
| 1089 | |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1090 | def _LoadBuildProp(self, data): |
| 1091 | for line in data.split('\n'): |
| 1092 | line = line.strip() |
| 1093 | if not line or line.startswith("#"): |
| 1094 | continue |
| 1095 | if line.startswith("import"): |
| 1096 | overrides = self._ImportParser(line) |
| 1097 | duplicates = self.prop_overrides.intersection(overrides.keys()) |
| 1098 | if duplicates: |
| 1099 | raise ValueError('prop {} is overridden multiple times'.format( |
| 1100 | ','.join(duplicates))) |
| 1101 | self.prop_overrides = self.prop_overrides.union(overrides.keys()) |
| 1102 | self.build_props.update(overrides) |
| 1103 | elif "=" in line: |
| 1104 | name, value = line.split("=", 1) |
| 1105 | if name in self.prop_overrides: |
| 1106 | raise ValueError('prop {} is set again after overridden by import ' |
| 1107 | 'statement'.format(name)) |
| 1108 | self.build_props[name] = value |
| 1109 | |
| 1110 | def _ImportParser(self, line): |
| 1111 | """Parses the build prop in a given import statement.""" |
| 1112 | |
| 1113 | tokens = line.split() |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 1114 | if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3): |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1115 | raise ValueError('Unrecognized import statement {}'.format(line)) |
Hongguang Chen | b4702b7 | 2020-05-13 18:05:20 -0700 | [diff] [blame] | 1116 | |
| 1117 | if len(tokens) == 3: |
| 1118 | logger.info("Import %s from %s, skip", tokens[2], tokens[1]) |
| 1119 | return {} |
| 1120 | |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1121 | import_path = tokens[1] |
| 1122 | if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path): |
Kelvin Zhang | 42ab828 | 2022-02-17 13:07:55 -0800 | [diff] [blame] | 1123 | logger.warn('Unrecognized import path {}'.format(line)) |
| 1124 | return {} |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1125 | |
| 1126 | # We only recognize a subset of import statement that the init process |
| 1127 | # supports. And we can loose the restriction based on how the dynamic |
| 1128 | # fingerprint is used in practice. The placeholder format should be |
| 1129 | # ${placeholder}, and its value should be provided by the caller through |
| 1130 | # the placeholder_values. |
| 1131 | for prop, value in self.placeholder_values.items(): |
| 1132 | prop_place_holder = '${{{}}}'.format(prop) |
| 1133 | if prop_place_holder in import_path: |
| 1134 | import_path = import_path.replace(prop_place_holder, value) |
| 1135 | if '$' in import_path: |
| 1136 | logger.info('Unresolved place holder in import path %s', import_path) |
| 1137 | return {} |
| 1138 | |
| 1139 | import_path = import_path.replace('/{}'.format(self.partition), |
| 1140 | self.partition.upper()) |
| 1141 | logger.info('Parsing build props override from %s', import_path) |
| 1142 | |
| 1143 | lines = ReadFromInputFile(self.input_file, import_path).split('\n') |
| 1144 | d = LoadDictionaryFromLines(lines) |
| 1145 | return {key: val for key, val in d.items() |
| 1146 | if key in self.props_allow_override} |
| 1147 | |
Kelvin Zhang | 5ef2519 | 2022-10-19 11:25:22 -0700 | [diff] [blame] | 1148 | def __getstate__(self): |
| 1149 | state = self.__dict__.copy() |
| 1150 | # Don't pickle baz |
| 1151 | if "input_file" in state and isinstance(state["input_file"], zipfile.ZipFile): |
| 1152 | state["input_file"] = state["input_file"].filename |
| 1153 | return state |
| 1154 | |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1155 | def GetProp(self, prop): |
| 1156 | return self.build_props.get(prop) |
| 1157 | |
| 1158 | |
Yi-Yo Chiang | 18650c7 | 2022-10-12 18:29:14 +0800 | [diff] [blame] | 1159 | def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path): |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 1160 | class Partition(object): |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1161 | def __init__(self, mount_point, fs_type, device, length, context, slotselect): |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 1162 | self.mount_point = mount_point |
| 1163 | self.fs_type = fs_type |
| 1164 | self.device = device |
| 1165 | self.length = length |
Tao Bao | 548eb76 | 2015-06-10 12:32:41 -0700 | [diff] [blame] | 1166 | self.context = context |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1167 | self.slotselect = slotselect |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 1168 | |
| 1169 | try: |
Tianjie Xu | cfa8622 | 2016-03-07 16:31:19 -0800 | [diff] [blame] | 1170 | data = read_helper(recovery_fstab_path) |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 1171 | except KeyError: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 1172 | logger.warning("Failed to find %s", recovery_fstab_path) |
Jeff Davidson | 033fbe2 | 2011-10-26 18:08:09 -0700 | [diff] [blame] | 1173 | data = "" |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 1174 | |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 1175 | assert fstab_version == 2 |
| 1176 | |
| 1177 | d = {} |
| 1178 | for line in data.split("\n"): |
| 1179 | line = line.strip() |
| 1180 | if not line or line.startswith("#"): |
| 1181 | continue |
| 1182 | |
| 1183 | # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags> |
| 1184 | pieces = line.split() |
| 1185 | if len(pieces) != 5: |
| 1186 | raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) |
| 1187 | |
| 1188 | # Ignore entries that are managed by vold. |
| 1189 | options = pieces[4] |
| 1190 | if "voldmanaged=" in options: |
| 1191 | continue |
| 1192 | |
| 1193 | # It's a good line, parse it. |
| 1194 | length = 0 |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1195 | slotselect = False |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 1196 | options = options.split(",") |
| 1197 | for i in options: |
| 1198 | if i.startswith("length="): |
| 1199 | length = int(i[7:]) |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1200 | elif i == "slotselect": |
| 1201 | slotselect = True |
Doug Zongker | 086cbb0 | 2011-02-17 15:54:20 -0800 | [diff] [blame] | 1202 | else: |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 1203 | # Ignore all unknown options in the unified fstab. |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 1204 | continue |
Ken Sumrall | 3b07cf1 | 2013-02-19 17:35:29 -0800 | [diff] [blame] | 1205 | |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 1206 | mount_flags = pieces[3] |
| 1207 | # Honor the SELinux context if present. |
| 1208 | context = None |
| 1209 | for i in mount_flags.split(","): |
| 1210 | if i.startswith("context="): |
| 1211 | context = i |
Doug Zongker | 086cbb0 | 2011-02-17 15:54:20 -0800 | [diff] [blame] | 1212 | |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 1213 | mount_point = pieces[1] |
| 1214 | d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2], |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1215 | device=pieces[0], length=length, context=context, |
| 1216 | slotselect=slotselect) |
Ken Sumrall | 3b07cf1 | 2013-02-19 17:35:29 -0800 | [diff] [blame] | 1217 | |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 1218 | return d |
| 1219 | |
| 1220 | |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1221 | def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper): |
| 1222 | """Finds the path to recovery fstab and loads its contents.""" |
| 1223 | # recovery fstab is only meaningful when installing an update via recovery |
| 1224 | # (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA. |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1225 | if info_dict.get('ab_update') == 'true' and \ |
| 1226 | info_dict.get("allow_non_ab") != "true": |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1227 | return None |
| 1228 | |
| 1229 | # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to |
| 1230 | # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both |
| 1231 | # cases, since it may load the info_dict from an old build (e.g. when |
| 1232 | # generating incremental OTAs from that build). |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1233 | if info_dict.get('no_recovery') != 'true': |
| 1234 | recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab' |
Kelvin Zhang | 2ab6986 | 2023-10-27 10:58:05 -0700 | [diff] [blame] | 1235 | if not DoesInputFileContain(input_file, recovery_fstab_path): |
| 1236 | recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab' |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1237 | return LoadRecoveryFSTab( |
Yi-Yo Chiang | 18650c7 | 2022-10-12 18:29:14 +0800 | [diff] [blame] | 1238 | read_helper, info_dict['fstab_version'], recovery_fstab_path) |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1239 | |
| 1240 | if info_dict.get('recovery_as_boot') == 'true': |
| 1241 | recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab' |
Kelvin Zhang | 2ab6986 | 2023-10-27 10:58:05 -0700 | [diff] [blame] | 1242 | if not DoesInputFileContain(input_file, recovery_fstab_path): |
| 1243 | recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab' |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1244 | return LoadRecoveryFSTab( |
Yi-Yo Chiang | 18650c7 | 2022-10-12 18:29:14 +0800 | [diff] [blame] | 1245 | read_helper, info_dict['fstab_version'], recovery_fstab_path) |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1246 | |
| 1247 | return None |
| 1248 | |
| 1249 | |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 1250 | def DumpInfoDict(d): |
| 1251 | for k, v in sorted(d.items()): |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 1252 | logger.info("%-25s = (%s) %s", k, type(v).__name__, v) |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 1253 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 1254 | |
Daniel Norman | 5541714 | 2019-11-25 16:04:36 -0800 | [diff] [blame] | 1255 | def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict): |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1256 | """Merges dynamic partition info variables. |
| 1257 | |
| 1258 | Args: |
| 1259 | framework_dict: The dictionary of dynamic partition info variables from the |
| 1260 | partial framework target files. |
| 1261 | vendor_dict: The dictionary of dynamic partition info variables from the |
| 1262 | partial vendor target files. |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1263 | |
| 1264 | Returns: |
| 1265 | The merged dynamic partition info dictionary. |
| 1266 | """ |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1267 | |
| 1268 | def uniq_concat(a, b): |
jiajia tang | e5ddfcd | 2022-06-21 10:36:12 +0800 | [diff] [blame] | 1269 | combined = set(a.split()) |
| 1270 | combined.update(set(b.split())) |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1271 | combined = [item.strip() for item in combined if item.strip()] |
| 1272 | return " ".join(sorted(combined)) |
| 1273 | |
| 1274 | if (framework_dict.get("use_dynamic_partitions") != |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 1275 | "true") or (vendor_dict.get("use_dynamic_partitions") != "true"): |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1276 | raise ValueError("Both dictionaries must have use_dynamic_partitions=true") |
| 1277 | |
| 1278 | merged_dict = {"use_dynamic_partitions": "true"} |
Kelvin Zhang | 6a683ce | 2022-05-02 12:19:45 -0700 | [diff] [blame] | 1279 | # For keys-value pairs that are the same, copy to merged dict |
| 1280 | for key in vendor_dict.keys(): |
| 1281 | if key in framework_dict and framework_dict[key] == vendor_dict[key]: |
| 1282 | merged_dict[key] = vendor_dict[key] |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1283 | |
| 1284 | merged_dict["dynamic_partition_list"] = uniq_concat( |
| 1285 | framework_dict.get("dynamic_partition_list", ""), |
| 1286 | vendor_dict.get("dynamic_partition_list", "")) |
| 1287 | |
| 1288 | # Super block devices are defined by the vendor dict. |
| 1289 | if "super_block_devices" in vendor_dict: |
| 1290 | merged_dict["super_block_devices"] = vendor_dict["super_block_devices"] |
jiajia tang | e5ddfcd | 2022-06-21 10:36:12 +0800 | [diff] [blame] | 1291 | for block_device in merged_dict["super_block_devices"].split(): |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1292 | key = "super_%s_device_size" % block_device |
| 1293 | if key not in vendor_dict: |
| 1294 | raise ValueError("Vendor dict does not contain required key %s." % key) |
| 1295 | merged_dict[key] = vendor_dict[key] |
| 1296 | |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1297 | # Partition groups and group sizes are defined by the vendor dict because |
| 1298 | # these values may vary for each board that uses a shared system image. |
| 1299 | merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"] |
jiajia tang | e5ddfcd | 2022-06-21 10:36:12 +0800 | [diff] [blame] | 1300 | for partition_group in merged_dict["super_partition_groups"].split(): |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1301 | # Set the partition group's size using the value from the vendor dict. |
Daniel Norman | 5541714 | 2019-11-25 16:04:36 -0800 | [diff] [blame] | 1302 | key = "super_%s_group_size" % partition_group |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1303 | if key not in vendor_dict: |
| 1304 | raise ValueError("Vendor dict does not contain required key %s." % key) |
| 1305 | merged_dict[key] = vendor_dict[key] |
| 1306 | |
| 1307 | # Set the partition group's partition list using a concatenation of the |
| 1308 | # framework and vendor partition lists. |
Daniel Norman | 5541714 | 2019-11-25 16:04:36 -0800 | [diff] [blame] | 1309 | key = "super_%s_partition_list" % partition_group |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1310 | merged_dict[key] = uniq_concat( |
| 1311 | framework_dict.get(key, ""), vendor_dict.get(key, "")) |
P Adarsh Reddy | 7e9b5c4 | 2019-12-20 15:07:24 +0530 | [diff] [blame] | 1312 | |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1313 | # Various other flags should be copied from the vendor dict, if defined. |
| 1314 | for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake", |
| 1315 | "super_metadata_device", "super_partition_error_limit", |
| 1316 | "super_partition_size"): |
| 1317 | if key in vendor_dict.keys(): |
| 1318 | merged_dict[key] = vendor_dict[key] |
| 1319 | |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1320 | return merged_dict |
| 1321 | |
| 1322 | |
Daniel Norman | 21c34f7 | 2020-11-11 17:25:50 -0800 | [diff] [blame] | 1323 | def PartitionMapFromTargetFiles(target_files_dir): |
| 1324 | """Builds a map from partition -> path within an extracted target files directory.""" |
| 1325 | # Keep possible_subdirs in sync with build/make/core/board_config.mk. |
| 1326 | possible_subdirs = { |
| 1327 | "system": ["SYSTEM"], |
| 1328 | "vendor": ["VENDOR", "SYSTEM/vendor"], |
| 1329 | "product": ["PRODUCT", "SYSTEM/product"], |
| 1330 | "system_ext": ["SYSTEM_EXT", "SYSTEM/system_ext"], |
| 1331 | "odm": ["ODM", "VENDOR/odm", "SYSTEM/vendor/odm"], |
| 1332 | "vendor_dlkm": [ |
| 1333 | "VENDOR_DLKM", "VENDOR/vendor_dlkm", "SYSTEM/vendor/vendor_dlkm" |
| 1334 | ], |
| 1335 | "odm_dlkm": ["ODM_DLKM", "VENDOR/odm_dlkm", "SYSTEM/vendor/odm_dlkm"], |
Ramji Jiyani | 13a4137 | 2022-01-27 07:05:08 +0000 | [diff] [blame] | 1336 | "system_dlkm": ["SYSTEM_DLKM", "SYSTEM/system_dlkm"], |
Daniel Norman | 21c34f7 | 2020-11-11 17:25:50 -0800 | [diff] [blame] | 1337 | } |
| 1338 | partition_map = {} |
| 1339 | for partition, subdirs in possible_subdirs.items(): |
| 1340 | for subdir in subdirs: |
| 1341 | if os.path.exists(os.path.join(target_files_dir, subdir)): |
| 1342 | partition_map[partition] = subdir |
| 1343 | break |
| 1344 | return partition_map |
| 1345 | |
| 1346 | |
Daniel Norman | d335156 | 2020-10-29 12:33:11 -0700 | [diff] [blame] | 1347 | def SharedUidPartitionViolations(uid_dict, partition_groups): |
| 1348 | """Checks for APK sharedUserIds that cross partition group boundaries. |
| 1349 | |
| 1350 | This uses a single or merged build's shareduid_violation_modules.json |
| 1351 | output file, as generated by find_shareduid_violation.py or |
| 1352 | core/tasks/find-shareduid-violation.mk. |
| 1353 | |
| 1354 | An error is defined as a sharedUserId that is found in a set of partitions |
| 1355 | that span more than one partition group. |
| 1356 | |
| 1357 | Args: |
| 1358 | uid_dict: A dictionary created by using the standard json module to read a |
| 1359 | complete shareduid_violation_modules.json file. |
| 1360 | partition_groups: A list of groups, where each group is a list of |
| 1361 | partitions. |
| 1362 | |
| 1363 | Returns: |
| 1364 | A list of error messages. |
| 1365 | """ |
| 1366 | errors = [] |
| 1367 | for uid, partitions in uid_dict.items(): |
| 1368 | found_in_groups = [ |
| 1369 | group for group in partition_groups |
| 1370 | if set(partitions.keys()) & set(group) |
| 1371 | ] |
| 1372 | if len(found_in_groups) > 1: |
| 1373 | errors.append( |
| 1374 | "APK sharedUserId \"%s\" found across partition groups in partitions \"%s\"" |
| 1375 | % (uid, ",".join(sorted(partitions.keys())))) |
| 1376 | return errors |
| 1377 | |
| 1378 | |
Daniel Norman | 21c34f7 | 2020-11-11 17:25:50 -0800 | [diff] [blame] | 1379 | def RunHostInitVerifier(product_out, partition_map): |
| 1380 | """Runs host_init_verifier on the init rc files within partitions. |
| 1381 | |
| 1382 | host_init_verifier searches the etc/init path within each partition. |
| 1383 | |
| 1384 | Args: |
| 1385 | product_out: PRODUCT_OUT directory, containing partition directories. |
| 1386 | partition_map: A map of partition name -> relative path within product_out. |
| 1387 | """ |
| 1388 | allowed_partitions = ("system", "system_ext", "product", "vendor", "odm") |
| 1389 | cmd = ["host_init_verifier"] |
| 1390 | for partition, path in partition_map.items(): |
| 1391 | if partition not in allowed_partitions: |
| 1392 | raise ExternalError("Unable to call host_init_verifier for partition %s" % |
| 1393 | partition) |
| 1394 | cmd.extend(["--out_%s" % partition, os.path.join(product_out, path)]) |
| 1395 | # Add --property-contexts if the file exists on the partition. |
| 1396 | property_contexts = "%s_property_contexts" % ( |
| 1397 | "plat" if partition == "system" else partition) |
| 1398 | property_contexts_path = os.path.join(product_out, path, "etc", "selinux", |
| 1399 | property_contexts) |
| 1400 | if os.path.exists(property_contexts_path): |
| 1401 | cmd.append("--property-contexts=%s" % property_contexts_path) |
| 1402 | # Add the passwd file if the file exists on the partition. |
| 1403 | passwd_path = os.path.join(product_out, path, "etc", "passwd") |
| 1404 | if os.path.exists(passwd_path): |
| 1405 | cmd.extend(["-p", passwd_path]) |
| 1406 | return RunAndCheckOutput(cmd) |
| 1407 | |
| 1408 | |
Kelvin Zhang | de53f7d | 2023-10-03 12:21:28 -0700 | [diff] [blame] | 1409 | def AppendAVBSigningArgs(cmd, partition, avb_salt=None): |
Bowgo Tsai | 3e599ea | 2017-05-26 18:30:04 +0800 | [diff] [blame] | 1410 | """Append signing arguments for avbtool.""" |
| 1411 | # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096" |
Kelvin Zhang | e634bde | 2023-04-28 23:59:43 -0700 | [diff] [blame] | 1412 | key_path = ResolveAVBSigningPathArgs( |
| 1413 | OPTIONS.info_dict.get("avb_" + partition + "_key_path")) |
Bowgo Tsai | 3e599ea | 2017-05-26 18:30:04 +0800 | [diff] [blame] | 1414 | algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm") |
| 1415 | if key_path and algorithm: |
| 1416 | cmd.extend(["--key", key_path, "--algorithm", algorithm]) |
Kelvin Zhang | de53f7d | 2023-10-03 12:21:28 -0700 | [diff] [blame] | 1417 | if avb_salt is None: |
| 1418 | avb_salt = OPTIONS.info_dict.get("avb_salt") |
Tao Bao | 2b6dfd6 | 2017-09-27 17:17:43 -0700 | [diff] [blame] | 1419 | # make_vbmeta_image doesn't like "--salt" (and it's not needed). |
Tao Bao | 744c4c7 | 2018-08-20 21:09:07 -0700 | [diff] [blame] | 1420 | if avb_salt and not partition.startswith("vbmeta"): |
Tao Bao | 2b6dfd6 | 2017-09-27 17:17:43 -0700 | [diff] [blame] | 1421 | cmd.extend(["--salt", avb_salt]) |
Bowgo Tsai | 3e599ea | 2017-05-26 18:30:04 +0800 | [diff] [blame] | 1422 | |
| 1423 | |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1424 | def ResolveAVBSigningPathArgs(split_args): |
| 1425 | |
| 1426 | def ResolveBinaryPath(path): |
| 1427 | if os.path.exists(path): |
| 1428 | return path |
Kelvin Zhang | 97a5afe | 2023-06-27 10:30:48 -0700 | [diff] [blame] | 1429 | if OPTIONS.search_path: |
| 1430 | new_path = os.path.join(OPTIONS.search_path, path) |
| 1431 | if os.path.exists(new_path): |
| 1432 | return new_path |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1433 | raise ExternalError( |
Kelvin Zhang | 43df080 | 2023-07-24 13:16:03 -0700 | [diff] [blame] | 1434 | "Failed to find {}".format(path)) |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1435 | |
| 1436 | if not split_args: |
| 1437 | return split_args |
| 1438 | |
| 1439 | if isinstance(split_args, list): |
| 1440 | for index, arg in enumerate(split_args[:-1]): |
| 1441 | if arg == '--signing_helper': |
| 1442 | signing_helper_path = split_args[index + 1] |
| 1443 | split_args[index + 1] = ResolveBinaryPath(signing_helper_path) |
| 1444 | break |
| 1445 | elif isinstance(split_args, str): |
| 1446 | split_args = ResolveBinaryPath(split_args) |
| 1447 | |
| 1448 | return split_args |
| 1449 | |
| 1450 | |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1451 | def GetAvbPartitionArg(partition, image, info_dict=None): |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1452 | """Returns the VBMeta arguments for one partition. |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1453 | |
| 1454 | It sets up the VBMeta argument by including the partition descriptor from the |
| 1455 | given 'image', or by configuring the partition as a chained partition. |
| 1456 | |
| 1457 | Args: |
| 1458 | partition: The name of the partition (e.g. "system"). |
| 1459 | image: The path to the partition image. |
| 1460 | info_dict: A dict returned by common.LoadInfoDict(). Will use |
| 1461 | OPTIONS.info_dict if None has been given. |
| 1462 | |
| 1463 | Returns: |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1464 | A list of VBMeta arguments for one partition. |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1465 | """ |
| 1466 | if info_dict is None: |
| 1467 | info_dict = OPTIONS.info_dict |
| 1468 | |
| 1469 | # Check if chain partition is used. |
| 1470 | key_path = info_dict.get("avb_" + partition + "_key_path") |
cfig | 1aeef72 | 2019-09-20 22:45:06 +0800 | [diff] [blame] | 1471 | if not key_path: |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 1472 | return [AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image] |
cfig | 1aeef72 | 2019-09-20 22:45:06 +0800 | [diff] [blame] | 1473 | |
| 1474 | # For a non-A/B device, we don't chain /recovery nor include its descriptor |
| 1475 | # into vbmeta.img. The recovery image will be configured on an independent |
| 1476 | # boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION. |
| 1477 | # See details at |
| 1478 | # https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery. |
Tao Bao | 3612c88 | 2019-10-14 17:49:31 -0700 | [diff] [blame] | 1479 | if info_dict.get("ab_update") != "true" and partition == "recovery": |
cfig | 1aeef72 | 2019-09-20 22:45:06 +0800 | [diff] [blame] | 1480 | return [] |
| 1481 | |
| 1482 | # Otherwise chain the partition into vbmeta. |
| 1483 | chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict) |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 1484 | return [AVB_ARG_NAME_CHAIN_PARTITION, chained_partition_arg] |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1485 | |
| 1486 | |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1487 | def GetAvbPartitionsArg(partitions, |
| 1488 | resolve_rollback_index_location_conflict=False, |
| 1489 | info_dict=None): |
| 1490 | """Returns the VBMeta arguments for all AVB partitions. |
| 1491 | |
| 1492 | It sets up the VBMeta argument by calling GetAvbPartitionArg of all |
| 1493 | partitions. |
| 1494 | |
| 1495 | Args: |
| 1496 | partitions: A dict of all AVB partitions. |
| 1497 | resolve_rollback_index_location_conflict: If true, resolve conflicting avb |
| 1498 | rollback index locations by assigning the smallest unused value. |
| 1499 | info_dict: A dict returned by common.LoadInfoDict(). |
| 1500 | |
| 1501 | Returns: |
| 1502 | A list of VBMeta arguments for all partitions. |
| 1503 | """ |
| 1504 | # An AVB partition will be linked into a vbmeta partition by either |
| 1505 | # AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG or AVB_ARG_NAME_CHAIN_PARTITION, there |
| 1506 | # should be no other cases. |
| 1507 | valid_args = { |
| 1508 | AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG: [], |
| 1509 | AVB_ARG_NAME_CHAIN_PARTITION: [] |
| 1510 | } |
| 1511 | |
| 1512 | for partition, path in partitions.items(): |
| 1513 | avb_partition_arg = GetAvbPartitionArg(partition, path, info_dict) |
| 1514 | if not avb_partition_arg: |
| 1515 | continue |
| 1516 | arg_name, arg_value = avb_partition_arg |
| 1517 | assert arg_name in valid_args |
| 1518 | valid_args[arg_name].append(arg_value) |
| 1519 | |
| 1520 | # Copy the arguments for non-chained AVB partitions directly without |
| 1521 | # intervention. |
| 1522 | avb_args = [] |
| 1523 | for image in valid_args[AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG]: |
| 1524 | avb_args.extend([AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image]) |
| 1525 | |
| 1526 | # Handle chained AVB partitions. The rollback index location might be |
| 1527 | # adjusted if two partitions use the same value. This may happen when mixing |
| 1528 | # a shared system image with other vendor images. |
| 1529 | used_index_loc = set() |
| 1530 | for chained_partition_arg in valid_args[AVB_ARG_NAME_CHAIN_PARTITION]: |
| 1531 | if resolve_rollback_index_location_conflict: |
| 1532 | while chained_partition_arg.rollback_index_location in used_index_loc: |
| 1533 | chained_partition_arg.rollback_index_location += 1 |
| 1534 | |
| 1535 | used_index_loc.add(chained_partition_arg.rollback_index_location) |
| 1536 | avb_args.extend([AVB_ARG_NAME_CHAIN_PARTITION, |
| 1537 | chained_partition_arg.to_string()]) |
| 1538 | |
| 1539 | return avb_args |
| 1540 | |
| 1541 | |
Tao Bao | 02a0859 | 2018-07-22 12:40:45 -0700 | [diff] [blame] | 1542 | def GetAvbChainedPartitionArg(partition, info_dict, key=None): |
| 1543 | """Constructs and returns the arg to build or verify a chained partition. |
| 1544 | |
| 1545 | Args: |
| 1546 | partition: The partition name. |
| 1547 | info_dict: The info dict to look up the key info and rollback index |
| 1548 | location. |
| 1549 | key: The key to be used for building or verifying the partition. Defaults to |
| 1550 | the key listed in info_dict. |
| 1551 | |
| 1552 | Returns: |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1553 | An AvbChainedPartitionArg object with rollback_index_location and |
| 1554 | pubkey_path that can be used to build or verify vbmeta image. |
Tao Bao | 02a0859 | 2018-07-22 12:40:45 -0700 | [diff] [blame] | 1555 | """ |
| 1556 | if key is None: |
| 1557 | key = info_dict["avb_" + partition + "_key_path"] |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1558 | key = ResolveAVBSigningPathArgs(key) |
Tao Bao | 1ac886e | 2019-06-26 11:58:22 -0700 | [diff] [blame] | 1559 | pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key) |
Tao Bao | 02a0859 | 2018-07-22 12:40:45 -0700 | [diff] [blame] | 1560 | rollback_index_location = info_dict[ |
| 1561 | "avb_" + partition + "_rollback_index_location"] |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1562 | return AvbChainedPartitionArg( |
| 1563 | partition=partition, |
| 1564 | rollback_index_location=int(rollback_index_location), |
| 1565 | pubkey_path=pubkey_path) |
Tao Bao | 02a0859 | 2018-07-22 12:40:45 -0700 | [diff] [blame] | 1566 | |
| 1567 | |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1568 | def BuildVBMeta(image_path, partitions, name, needed_partitions, |
| 1569 | resolve_rollback_index_location_conflict=False): |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1570 | """Creates a VBMeta image. |
| 1571 | |
| 1572 | It generates the requested VBMeta image. The requested image could be for |
| 1573 | top-level or chained VBMeta image, which is determined based on the name. |
| 1574 | |
| 1575 | Args: |
| 1576 | image_path: The output path for the new VBMeta image. |
| 1577 | partitions: A dict that's keyed by partition names with image paths as |
Hongguang Chen | f23364d | 2020-04-27 18:36:36 -0700 | [diff] [blame] | 1578 | values. Only valid partition names are accepted, as partitions listed |
| 1579 | in common.AVB_PARTITIONS and custom partitions listed in |
| 1580 | OPTIONS.info_dict.get("avb_custom_images_partition_list") |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1581 | name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'. |
| 1582 | needed_partitions: Partitions whose descriptors should be included into the |
| 1583 | generated VBMeta image. |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1584 | resolve_rollback_index_location_conflict: If true, resolve conflicting avb |
| 1585 | rollback index locations by assigning the smallest unused value. |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1586 | |
| 1587 | Raises: |
| 1588 | AssertionError: On invalid input args. |
| 1589 | """ |
| 1590 | avbtool = OPTIONS.info_dict["avb_avbtool"] |
| 1591 | cmd = [avbtool, "make_vbmeta_image", "--output", image_path] |
| 1592 | AppendAVBSigningArgs(cmd, name) |
| 1593 | |
Hongguang Chen | f23364d | 2020-04-27 18:36:36 -0700 | [diff] [blame] | 1594 | custom_partitions = OPTIONS.info_dict.get( |
| 1595 | "avb_custom_images_partition_list", "").strip().split() |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 1596 | custom_avb_partitions = ["vbmeta_" + part for part in OPTIONS.info_dict.get( |
| 1597 | "avb_custom_vbmeta_images_partition_list", "").strip().split()] |
Hongguang Chen | f23364d | 2020-04-27 18:36:36 -0700 | [diff] [blame] | 1598 | |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1599 | avb_partitions = {} |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1600 | for partition, path in partitions.items(): |
| 1601 | if partition not in needed_partitions: |
| 1602 | continue |
| 1603 | assert (partition in AVB_PARTITIONS or |
Hongguang Chen | f23364d | 2020-04-27 18:36:36 -0700 | [diff] [blame] | 1604 | partition in AVB_VBMETA_PARTITIONS or |
Kelvin Zhang | b81b4e3 | 2023-01-10 10:37:56 -0800 | [diff] [blame] | 1605 | partition in custom_avb_partitions or |
Hongguang Chen | f23364d | 2020-04-27 18:36:36 -0700 | [diff] [blame] | 1606 | partition in custom_partitions), \ |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1607 | 'Unknown partition: {}'.format(partition) |
| 1608 | assert os.path.exists(path), \ |
| 1609 | 'Failed to find {} for {}'.format(path, partition) |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1610 | avb_partitions[partition] = path |
| 1611 | cmd.extend(GetAvbPartitionsArg(avb_partitions, |
| 1612 | resolve_rollback_index_location_conflict)) |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1613 | |
| 1614 | args = OPTIONS.info_dict.get("avb_{}_args".format(name)) |
| 1615 | if args and args.strip(): |
| 1616 | split_args = shlex.split(args) |
| 1617 | for index, arg in enumerate(split_args[:-1]): |
Ivan Lozano | b021b2a | 2020-07-28 09:31:06 -0400 | [diff] [blame] | 1618 | # Check that the image file exists. Some images might be defined |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1619 | # as a path relative to source tree, which may not be available at the |
| 1620 | # same location when running this script (we have the input target_files |
| 1621 | # zip only). For such cases, we additionally scan other locations (e.g. |
| 1622 | # IMAGES/, RADIO/, etc) before bailing out. |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 1623 | if arg == AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG: |
Tianjie Xu | eaed60c | 2020-03-12 00:33:28 -0700 | [diff] [blame] | 1624 | chained_image = split_args[index + 1] |
| 1625 | if os.path.exists(chained_image): |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1626 | continue |
| 1627 | found = False |
| 1628 | for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']: |
| 1629 | alt_path = os.path.join( |
Tianjie Xu | eaed60c | 2020-03-12 00:33:28 -0700 | [diff] [blame] | 1630 | OPTIONS.input_tmp, dir_name, os.path.basename(chained_image)) |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1631 | if os.path.exists(alt_path): |
| 1632 | split_args[index + 1] = alt_path |
| 1633 | found = True |
| 1634 | break |
Tianjie Xu | eaed60c | 2020-03-12 00:33:28 -0700 | [diff] [blame] | 1635 | assert found, 'Failed to find {}'.format(chained_image) |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1636 | |
| 1637 | split_args = ResolveAVBSigningPathArgs(split_args) |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1638 | cmd.extend(split_args) |
| 1639 | |
| 1640 | RunAndCheckOutput(cmd) |
| 1641 | |
| 1642 | |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1643 | def _MakeRamdisk(sourcedir, fs_config_file=None, |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1644 | dev_node_file=None, |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1645 | ramdisk_format=RamdiskFormat.GZ): |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1646 | ramdisk_img = tempfile.NamedTemporaryFile() |
| 1647 | |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1648 | cmd = ["mkbootfs"] |
| 1649 | |
| 1650 | if fs_config_file and os.access(fs_config_file, os.F_OK): |
| 1651 | cmd.extend(["-f", fs_config_file]) |
| 1652 | |
| 1653 | if dev_node_file and os.access(dev_node_file, os.F_OK): |
| 1654 | cmd.extend(["-n", dev_node_file]) |
| 1655 | |
| 1656 | cmd.append(os.path.join(sourcedir, "RAMDISK")) |
| 1657 | |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1658 | p1 = Run(cmd, stdout=subprocess.PIPE) |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1659 | if ramdisk_format == RamdiskFormat.LZ4: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 1660 | p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout, |
J. Avila | 98cd4cc | 2020-06-10 20:09:10 +0000 | [diff] [blame] | 1661 | stdout=ramdisk_img.file.fileno()) |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1662 | elif ramdisk_format == RamdiskFormat.GZ: |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 1663 | p2 = Run(["gzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno()) |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1664 | else: |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 1665 | raise ValueError("Only support lz4 or gzip ramdisk format.") |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1666 | |
| 1667 | p2.wait() |
| 1668 | p1.wait() |
| 1669 | assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,) |
J. Avila | 98cd4cc | 2020-06-10 20:09:10 +0000 | [diff] [blame] | 1670 | assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,) |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1671 | |
| 1672 | return ramdisk_img |
| 1673 | |
| 1674 | |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1675 | def _BuildBootableImage(image_name, sourcedir, fs_config_file, |
| 1676 | dev_node_file=None, info_dict=None, |
Tao Bao | d42e97e | 2016-11-30 12:11:57 -0800 | [diff] [blame] | 1677 | has_ramdisk=False, two_step_image=False): |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1678 | """Build a bootable image from the specified sourcedir. |
Doug Zongker | e1c31ba | 2009-06-23 17:40:35 -0700 | [diff] [blame] | 1679 | |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1680 | Take a kernel, cmdline, and optionally a ramdisk directory from the input (in |
Tao Bao | d42e97e | 2016-11-30 12:11:57 -0800 | [diff] [blame] | 1681 | 'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if |
| 1682 | we are building a two-step special image (i.e. building a recovery image to |
| 1683 | be loaded into /boot in two-step OTAs). |
| 1684 | |
| 1685 | Return the image data, or None if sourcedir does not appear to contains files |
| 1686 | for building the requested image. |
| 1687 | """ |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1688 | |
Yifan Hong | 63c5ca1 | 2020-10-08 11:54:02 -0700 | [diff] [blame] | 1689 | if info_dict is None: |
| 1690 | info_dict = OPTIONS.info_dict |
| 1691 | |
Steve Muckle | 9793cf6 | 2020-04-08 18:27:00 -0700 | [diff] [blame] | 1692 | # "boot" or "recovery", without extension. |
| 1693 | partition_name = os.path.basename(sourcedir).lower() |
| 1694 | |
Yifan Hong | 63c5ca1 | 2020-10-08 11:54:02 -0700 | [diff] [blame] | 1695 | kernel = None |
Steve Muckle | 9793cf6 | 2020-04-08 18:27:00 -0700 | [diff] [blame] | 1696 | if partition_name == "recovery": |
Yifan Hong | 63c5ca1 | 2020-10-08 11:54:02 -0700 | [diff] [blame] | 1697 | if info_dict.get("exclude_kernel_from_recovery_image") == "true": |
| 1698 | logger.info("Excluded kernel binary from recovery image.") |
| 1699 | else: |
| 1700 | kernel = "kernel" |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1701 | elif partition_name == "init_boot": |
| 1702 | pass |
Steve Muckle | 9793cf6 | 2020-04-08 18:27:00 -0700 | [diff] [blame] | 1703 | else: |
| 1704 | kernel = image_name.replace("boot", "kernel") |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 1705 | kernel = kernel.replace(".img", "") |
Yifan Hong | 63c5ca1 | 2020-10-08 11:54:02 -0700 | [diff] [blame] | 1706 | if kernel and not os.access(os.path.join(sourcedir, kernel), os.F_OK): |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1707 | return None |
| 1708 | |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1709 | kernel_path = os.path.join(sourcedir, kernel) if kernel else None |
| 1710 | |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1711 | if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK): |
Doug Zongker | e1c31ba | 2009-06-23 17:40:35 -0700 | [diff] [blame] | 1712 | return None |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1713 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1714 | img = tempfile.NamedTemporaryFile() |
| 1715 | |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1716 | if has_ramdisk: |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 1717 | ramdisk_format = GetRamdiskFormat(info_dict) |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1718 | ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, dev_node_file, |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1719 | ramdisk_format=ramdisk_format) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1720 | |
Bjorn Andersson | 612e2cd | 2012-11-25 16:53:44 -0800 | [diff] [blame] | 1721 | # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set |
| 1722 | mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" |
| 1723 | |
Yifan Hong | 63c5ca1 | 2020-10-08 11:54:02 -0700 | [diff] [blame] | 1724 | cmd = [mkbootimg] |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1725 | if kernel_path is not None: |
| 1726 | cmd.extend(["--kernel", kernel_path]) |
Doug Zongker | 38a649f | 2009-06-17 09:07:09 -0700 | [diff] [blame] | 1727 | |
Benoit Fradin | a45a868 | 2014-07-14 21:00:43 +0200 | [diff] [blame] | 1728 | fn = os.path.join(sourcedir, "second") |
| 1729 | if os.access(fn, os.F_OK): |
| 1730 | cmd.append("--second") |
| 1731 | cmd.append(fn) |
| 1732 | |
Hridya Valsaraju | 9683b2f | 2019-01-22 18:08:59 -0800 | [diff] [blame] | 1733 | fn = os.path.join(sourcedir, "dtb") |
| 1734 | if os.access(fn, os.F_OK): |
| 1735 | cmd.append("--dtb") |
| 1736 | cmd.append(fn) |
| 1737 | |
Doug Zongker | 171f1cd | 2009-06-15 22:36:37 -0700 | [diff] [blame] | 1738 | fn = os.path.join(sourcedir, "cmdline") |
| 1739 | if os.access(fn, os.F_OK): |
Doug Zongker | 38a649f | 2009-06-17 09:07:09 -0700 | [diff] [blame] | 1740 | cmd.append("--cmdline") |
| 1741 | cmd.append(open(fn).read().rstrip("\n")) |
| 1742 | |
| 1743 | fn = os.path.join(sourcedir, "base") |
| 1744 | if os.access(fn, os.F_OK): |
| 1745 | cmd.append("--base") |
| 1746 | cmd.append(open(fn).read().rstrip("\n")) |
| 1747 | |
Ying Wang | 4de6b5b | 2010-08-25 14:29:34 -0700 | [diff] [blame] | 1748 | fn = os.path.join(sourcedir, "pagesize") |
| 1749 | if os.access(fn, os.F_OK): |
| 1750 | cmd.append("--pagesize") |
| 1751 | cmd.append(open(fn).read().rstrip("\n")) |
| 1752 | |
Steve Muckle | f84668e | 2020-03-16 19:13:46 -0700 | [diff] [blame] | 1753 | if partition_name == "recovery": |
| 1754 | args = info_dict.get("recovery_mkbootimg_args") |
P.Adarsh Reddy | d8e24ee | 2020-05-04 19:40:16 +0530 | [diff] [blame] | 1755 | if not args: |
| 1756 | # Fall back to "mkbootimg_args" for recovery image |
| 1757 | # in case "recovery_mkbootimg_args" is not set. |
| 1758 | args = info_dict.get("mkbootimg_args") |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1759 | elif partition_name == "init_boot": |
| 1760 | args = info_dict.get("mkbootimg_init_args") |
Steve Muckle | f84668e | 2020-03-16 19:13:46 -0700 | [diff] [blame] | 1761 | else: |
| 1762 | args = info_dict.get("mkbootimg_args") |
Doug Zongker | d513160 | 2012-08-02 14:46:42 -0700 | [diff] [blame] | 1763 | if args and args.strip(): |
Jianxun Zhang | 0984949 | 2013-04-17 15:19:19 -0700 | [diff] [blame] | 1764 | cmd.extend(shlex.split(args)) |
Doug Zongker | d513160 | 2012-08-02 14:46:42 -0700 | [diff] [blame] | 1765 | |
Yi-Yo Chiang | 24da1a4 | 2022-02-22 19:51:15 +0800 | [diff] [blame] | 1766 | args = info_dict.get("mkbootimg_version_args") |
| 1767 | if args and args.strip(): |
| 1768 | cmd.extend(shlex.split(args)) |
Sami Tolvanen | 3303d90 | 2016-03-15 16:49:30 +0000 | [diff] [blame] | 1769 | |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1770 | if has_ramdisk: |
| 1771 | cmd.extend(["--ramdisk", ramdisk_img.name]) |
| 1772 | |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1773 | img_unsigned = None |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 1774 | if info_dict.get("vboot"): |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1775 | img_unsigned = tempfile.NamedTemporaryFile() |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1776 | cmd.extend(["--output", img_unsigned.name]) |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1777 | else: |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1778 | cmd.extend(["--output", img.name]) |
Doug Zongker | 38a649f | 2009-06-17 09:07:09 -0700 | [diff] [blame] | 1779 | |
Chen, ZhiminX | 752439b | 2018-09-23 22:10:47 +0800 | [diff] [blame] | 1780 | if partition_name == "recovery": |
| 1781 | if info_dict.get("include_recovery_dtbo") == "true": |
| 1782 | fn = os.path.join(sourcedir, "recovery_dtbo") |
| 1783 | cmd.extend(["--recovery_dtbo", fn]) |
| 1784 | if info_dict.get("include_recovery_acpio") == "true": |
| 1785 | fn = os.path.join(sourcedir, "recovery_acpio") |
| 1786 | cmd.extend(["--recovery_acpio", fn]) |
Hridya Valsaraju | e74a38b | 2018-03-21 12:15:11 -0700 | [diff] [blame] | 1787 | |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 1788 | RunAndCheckOutput(cmd) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1789 | |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1790 | # Sign the image if vboot is non-empty. |
hungweichen | 22e3b01 | 2022-08-19 06:35:43 +0000 | [diff] [blame] | 1791 | if info_dict.get("vboot"): |
Tao Bao | bf70c31 | 2017-07-11 17:27:55 -0700 | [diff] [blame] | 1792 | path = "/" + partition_name |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1793 | img_keyblock = tempfile.NamedTemporaryFile() |
Tao Bao | 4f104d1 | 2017-02-17 23:21:31 -0800 | [diff] [blame] | 1794 | # We have switched from the prebuilt futility binary to using the tool |
| 1795 | # (futility-host) built from the source. Override the setting in the old |
| 1796 | # TF.zip. |
| 1797 | futility = info_dict["futility"] |
| 1798 | if futility.startswith("prebuilts/"): |
| 1799 | futility = "futility-host" |
| 1800 | cmd = [info_dict["vboot_signer_cmd"], futility, |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1801 | img_unsigned.name, info_dict["vboot_key"] + ".vbpubk", |
Furquan Shaikh | 852b8de | 2015-08-10 11:43:45 -0700 | [diff] [blame] | 1802 | info_dict["vboot_key"] + ".vbprivk", |
| 1803 | info_dict["vboot_subkey"] + ".vbprivk", |
| 1804 | img_keyblock.name, |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1805 | img.name] |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 1806 | RunAndCheckOutput(cmd) |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1807 | |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 1808 | # Clean up the temp files. |
| 1809 | img_unsigned.close() |
| 1810 | img_keyblock.close() |
| 1811 | |
David Zeuthen | 8fecb28 | 2017-12-01 16:24:01 -0500 | [diff] [blame] | 1812 | # AVB: if enabled, calculate and add hash to boot.img or recovery.img. |
Bowgo Tsai | 3e599ea | 2017-05-26 18:30:04 +0800 | [diff] [blame] | 1813 | if info_dict.get("avb_enable") == "true": |
Tao Bao | f88e0ce | 2019-03-18 14:01:38 -0700 | [diff] [blame] | 1814 | avbtool = info_dict["avb_avbtool"] |
Steve Muckle | 903a1ca | 2020-05-07 17:32:10 -0700 | [diff] [blame] | 1815 | if partition_name == "recovery": |
| 1816 | part_size = info_dict["recovery_size"] |
| 1817 | else: |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 1818 | part_size = info_dict[image_name.replace(".img", "_size")] |
David Zeuthen | 2ce63ed | 2016-09-15 13:43:54 -0400 | [diff] [blame] | 1819 | cmd = [avbtool, "add_hash_footer", "--image", img.name, |
Tao Bao | bf70c31 | 2017-07-11 17:27:55 -0700 | [diff] [blame] | 1820 | "--partition_size", str(part_size), "--partition_name", |
| 1821 | partition_name] |
Kelvin Zhang | de53f7d | 2023-10-03 12:21:28 -0700 | [diff] [blame] | 1822 | salt = None |
| 1823 | if kernel_path is not None: |
| 1824 | with open(kernel_path, "rb") as fp: |
| 1825 | salt = sha256(fp.read()).hexdigest() |
| 1826 | AppendAVBSigningArgs(cmd, partition_name, salt) |
David Zeuthen | 8fecb28 | 2017-12-01 16:24:01 -0500 | [diff] [blame] | 1827 | args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args") |
David Zeuthen | 2ce63ed | 2016-09-15 13:43:54 -0400 | [diff] [blame] | 1828 | if args and args.strip(): |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1829 | split_args = ResolveAVBSigningPathArgs(shlex.split(args)) |
| 1830 | cmd.extend(split_args) |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 1831 | RunAndCheckOutput(cmd) |
David Zeuthen | d995f4b | 2016-01-29 16:59:17 -0500 | [diff] [blame] | 1832 | |
| 1833 | img.seek(os.SEEK_SET, 0) |
| 1834 | data = img.read() |
| 1835 | |
| 1836 | if has_ramdisk: |
| 1837 | ramdisk_img.close() |
| 1838 | img.close() |
| 1839 | |
| 1840 | return data |
| 1841 | |
| 1842 | |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1843 | def _SignBootableImage(image_path, prebuilt_name, partition_name, |
| 1844 | info_dict=None): |
| 1845 | """Performs AVB signing for a prebuilt boot.img. |
| 1846 | |
| 1847 | Args: |
| 1848 | image_path: The full path of the image, e.g., /path/to/boot.img. |
| 1849 | prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img, |
Bowgo Tsai | 88fc2bd | 2022-01-05 20:19:25 +0800 | [diff] [blame] | 1850 | boot-5.10.img, recovery.img or init_boot.img. |
| 1851 | partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'. |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1852 | info_dict: The information dict read from misc_info.txt. |
| 1853 | """ |
| 1854 | if info_dict is None: |
| 1855 | info_dict = OPTIONS.info_dict |
| 1856 | |
| 1857 | # AVB: if enabled, calculate and add hash to boot.img or recovery.img. |
| 1858 | if info_dict.get("avb_enable") == "true": |
| 1859 | avbtool = info_dict["avb_avbtool"] |
| 1860 | if partition_name == "recovery": |
| 1861 | part_size = info_dict["recovery_size"] |
| 1862 | else: |
| 1863 | part_size = info_dict[prebuilt_name.replace(".img", "_size")] |
| 1864 | |
| 1865 | cmd = [avbtool, "add_hash_footer", "--image", image_path, |
| 1866 | "--partition_size", str(part_size), "--partition_name", |
| 1867 | partition_name] |
Kelvin Zhang | 160762a | 2023-10-17 12:27:56 -0700 | [diff] [blame] | 1868 | # Use sha256 of the kernel as salt for reproducible builds |
| 1869 | with tempfile.TemporaryDirectory() as tmpdir: |
| 1870 | RunAndCheckOutput(["unpack_bootimg", "--boot_img", image_path, "--out", tmpdir]) |
| 1871 | for filename in ["kernel", "ramdisk", "vendor_ramdisk00"]: |
| 1872 | path = os.path.join(tmpdir, filename) |
| 1873 | if os.path.exists(path) and os.path.getsize(path): |
Kelvin Zhang | 9f9ac4e | 2023-11-01 10:12:03 -0700 | [diff] [blame] | 1874 | print("Using {} as salt for avb footer of {}".format( |
| 1875 | filename, partition_name)) |
Kelvin Zhang | 160762a | 2023-10-17 12:27:56 -0700 | [diff] [blame] | 1876 | with open(path, "rb") as fp: |
| 1877 | salt = sha256(fp.read()).hexdigest() |
Kelvin Zhang | 9f9ac4e | 2023-11-01 10:12:03 -0700 | [diff] [blame] | 1878 | break |
Kelvin Zhang | 160762a | 2023-10-17 12:27:56 -0700 | [diff] [blame] | 1879 | AppendAVBSigningArgs(cmd, partition_name, salt) |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1880 | args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args") |
| 1881 | if args and args.strip(): |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1882 | split_args = ResolveAVBSigningPathArgs(shlex.split(args)) |
| 1883 | cmd.extend(split_args) |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1884 | RunAndCheckOutput(cmd) |
| 1885 | |
| 1886 | |
Bowgo Tsai | 88fc2bd | 2022-01-05 20:19:25 +0800 | [diff] [blame] | 1887 | def HasRamdisk(partition_name, info_dict=None): |
| 1888 | """Returns true/false to see if a bootable image should have a ramdisk. |
| 1889 | |
| 1890 | Args: |
| 1891 | partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'. |
| 1892 | info_dict: The information dict read from misc_info.txt. |
| 1893 | """ |
| 1894 | if info_dict is None: |
| 1895 | info_dict = OPTIONS.info_dict |
| 1896 | |
| 1897 | if partition_name != "boot": |
| 1898 | return True # init_boot.img or recovery.img has a ramdisk. |
| 1899 | |
| 1900 | if info_dict.get("recovery_as_boot") == "true": |
| 1901 | return True # the recovery-as-boot boot.img has a RECOVERY ramdisk. |
| 1902 | |
Yi-Yo Chiang | 92a517d | 2023-12-01 07:02:17 +0000 | [diff] [blame] | 1903 | if info_dict.get("gki_boot_image_without_ramdisk") == "true": |
| 1904 | return False # A GKI boot.img has no ramdisk since Android-13. |
| 1905 | |
Bowgo Tsai | 88fc2bd | 2022-01-05 20:19:25 +0800 | [diff] [blame] | 1906 | if info_dict.get("init_boot") == "true": |
| 1907 | # The ramdisk is moved to the init_boot.img, so there is NO |
| 1908 | # ramdisk in the boot.img or boot-<kernel version>.img. |
| 1909 | return False |
| 1910 | |
| 1911 | return True |
| 1912 | |
| 1913 | |
Doug Zongker | d513160 | 2012-08-02 14:46:42 -0700 | [diff] [blame] | 1914 | def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir, |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1915 | info_dict=None, two_step_image=False, |
| 1916 | dev_nodes=False): |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1917 | """Return a File object with the desired bootable image. |
| 1918 | |
| 1919 | Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name', |
| 1920 | otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from |
| 1921 | the source files in 'unpack_dir'/'tree_subdir'.""" |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1922 | |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1923 | if info_dict is None: |
| 1924 | info_dict = OPTIONS.info_dict |
| 1925 | |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 1926 | prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name) |
| 1927 | if os.path.exists(prebuilt_path): |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 1928 | logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name) |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 1929 | return File.FromLocalFile(name, prebuilt_path) |
Doug Zongker | 6f1d031 | 2014-08-22 08:07:12 -0700 | [diff] [blame] | 1930 | |
| 1931 | prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) |
| 1932 | if os.path.exists(prebuilt_path): |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 1933 | logger.info("using prebuilt %s from IMAGES...", prebuilt_name) |
Doug Zongker | 6f1d031 | 2014-08-22 08:07:12 -0700 | [diff] [blame] | 1934 | return File.FromLocalFile(name, prebuilt_path) |
| 1935 | |
Bowgo Tsai | 88fc2bd | 2022-01-05 20:19:25 +0800 | [diff] [blame] | 1936 | partition_name = tree_subdir.lower() |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1937 | prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name) |
| 1938 | if os.path.exists(prebuilt_path): |
| 1939 | logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name) |
| 1940 | signed_img = MakeTempFile() |
| 1941 | shutil.copy(prebuilt_path, signed_img) |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1942 | _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict) |
| 1943 | return File.FromLocalFile(name, signed_img) |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1944 | |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1945 | logger.info("building image from target_files %s...", tree_subdir) |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1946 | |
Bowgo Tsai | 88fc2bd | 2022-01-05 20:19:25 +0800 | [diff] [blame] | 1947 | has_ramdisk = HasRamdisk(partition_name, info_dict) |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1948 | |
Doug Zongker | 6f1d031 | 2014-08-22 08:07:12 -0700 | [diff] [blame] | 1949 | fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt" |
Steve Muckle | 9793cf6 | 2020-04-08 18:27:00 -0700 | [diff] [blame] | 1950 | data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir), |
David Zeuthen | 2ce63ed | 2016-09-15 13:43:54 -0400 | [diff] [blame] | 1951 | os.path.join(unpack_dir, fs_config), |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1952 | os.path.join(unpack_dir, 'META/ramdisk_node_list') |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 1953 | if dev_nodes else None, |
Tao Bao | d42e97e | 2016-11-30 12:11:57 -0800 | [diff] [blame] | 1954 | info_dict, has_ramdisk, two_step_image) |
Doug Zongker | 6f1d031 | 2014-08-22 08:07:12 -0700 | [diff] [blame] | 1955 | if data: |
| 1956 | return File(name, data) |
| 1957 | return None |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 1958 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1959 | |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 1960 | def _BuildVendorBootImage(sourcedir, partition_name, info_dict=None): |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1961 | """Build a vendor boot image from the specified sourcedir. |
| 1962 | |
| 1963 | Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and |
| 1964 | turn them into a vendor boot image. |
| 1965 | |
| 1966 | Return the image data, or None if sourcedir does not appear to contains files |
| 1967 | for building the requested image. |
| 1968 | """ |
| 1969 | |
| 1970 | if info_dict is None: |
| 1971 | info_dict = OPTIONS.info_dict |
| 1972 | |
| 1973 | img = tempfile.NamedTemporaryFile() |
| 1974 | |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 1975 | ramdisk_format = GetRamdiskFormat(info_dict) |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1976 | ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format) |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1977 | |
| 1978 | # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set |
| 1979 | mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" |
| 1980 | |
| 1981 | cmd = [mkbootimg] |
| 1982 | |
| 1983 | fn = os.path.join(sourcedir, "dtb") |
| 1984 | if os.access(fn, os.F_OK): |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 1985 | has_vendor_kernel_boot = (info_dict.get( |
| 1986 | "vendor_kernel_boot", "").lower() == "true") |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 1987 | |
| 1988 | # Pack dtb into vendor_kernel_boot if building vendor_kernel_boot. |
| 1989 | # Otherwise pack dtb into vendor_boot. |
| 1990 | if not has_vendor_kernel_boot or partition_name == "vendor_kernel_boot": |
| 1991 | cmd.append("--dtb") |
| 1992 | cmd.append(fn) |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1993 | |
| 1994 | fn = os.path.join(sourcedir, "vendor_cmdline") |
| 1995 | if os.access(fn, os.F_OK): |
| 1996 | cmd.append("--vendor_cmdline") |
| 1997 | cmd.append(open(fn).read().rstrip("\n")) |
| 1998 | |
| 1999 | fn = os.path.join(sourcedir, "base") |
| 2000 | if os.access(fn, os.F_OK): |
| 2001 | cmd.append("--base") |
| 2002 | cmd.append(open(fn).read().rstrip("\n")) |
| 2003 | |
| 2004 | fn = os.path.join(sourcedir, "pagesize") |
| 2005 | if os.access(fn, os.F_OK): |
| 2006 | cmd.append("--pagesize") |
| 2007 | cmd.append(open(fn).read().rstrip("\n")) |
| 2008 | |
| 2009 | args = info_dict.get("mkbootimg_args") |
| 2010 | if args and args.strip(): |
| 2011 | cmd.extend(shlex.split(args)) |
| 2012 | |
| 2013 | args = info_dict.get("mkbootimg_version_args") |
| 2014 | if args and args.strip(): |
| 2015 | cmd.extend(shlex.split(args)) |
| 2016 | |
| 2017 | cmd.extend(["--vendor_ramdisk", ramdisk_img.name]) |
| 2018 | cmd.extend(["--vendor_boot", img.name]) |
| 2019 | |
Devin Moore | 5050901 | 2021-01-13 10:45:04 -0800 | [diff] [blame] | 2020 | fn = os.path.join(sourcedir, "vendor_bootconfig") |
| 2021 | if os.access(fn, os.F_OK): |
| 2022 | cmd.append("--vendor_bootconfig") |
| 2023 | cmd.append(fn) |
| 2024 | |
Yo Chiang | d21e7dc | 2020-12-10 18:42:47 +0800 | [diff] [blame] | 2025 | ramdisk_fragment_imgs = [] |
| 2026 | fn = os.path.join(sourcedir, "vendor_ramdisk_fragments") |
| 2027 | if os.access(fn, os.F_OK): |
| 2028 | ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n")) |
| 2029 | for ramdisk_fragment in ramdisk_fragments: |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 2030 | fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", |
| 2031 | ramdisk_fragment, "mkbootimg_args") |
Yo Chiang | d21e7dc | 2020-12-10 18:42:47 +0800 | [diff] [blame] | 2032 | cmd.extend(shlex.split(open(fn).read().rstrip("\n"))) |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 2033 | fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", |
| 2034 | ramdisk_fragment, "prebuilt_ramdisk") |
Yo Chiang | d21e7dc | 2020-12-10 18:42:47 +0800 | [diff] [blame] | 2035 | # Use prebuilt image if found, else create ramdisk from supplied files. |
| 2036 | if os.access(fn, os.F_OK): |
| 2037 | ramdisk_fragment_pathname = fn |
| 2038 | else: |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 2039 | ramdisk_fragment_root = os.path.join( |
| 2040 | sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment) |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 2041 | ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root, |
| 2042 | ramdisk_format=ramdisk_format) |
Yo Chiang | d21e7dc | 2020-12-10 18:42:47 +0800 | [diff] [blame] | 2043 | ramdisk_fragment_imgs.append(ramdisk_fragment_img) |
| 2044 | ramdisk_fragment_pathname = ramdisk_fragment_img.name |
| 2045 | cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname]) |
| 2046 | |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2047 | RunAndCheckOutput(cmd) |
| 2048 | |
| 2049 | # AVB: if enabled, calculate and add hash. |
| 2050 | if info_dict.get("avb_enable") == "true": |
| 2051 | avbtool = info_dict["avb_avbtool"] |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 2052 | part_size = info_dict[f'{partition_name}_size'] |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2053 | cmd = [avbtool, "add_hash_footer", "--image", img.name, |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 2054 | "--partition_size", str(part_size), "--partition_name", partition_name] |
| 2055 | AppendAVBSigningArgs(cmd, partition_name) |
| 2056 | args = info_dict.get(f'avb_{partition_name}_add_hash_footer_args') |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2057 | if args and args.strip(): |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 2058 | split_args = ResolveAVBSigningPathArgs(shlex.split(args)) |
| 2059 | cmd.extend(split_args) |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2060 | RunAndCheckOutput(cmd) |
| 2061 | |
| 2062 | img.seek(os.SEEK_SET, 0) |
| 2063 | data = img.read() |
| 2064 | |
Yo Chiang | d21e7dc | 2020-12-10 18:42:47 +0800 | [diff] [blame] | 2065 | for f in ramdisk_fragment_imgs: |
| 2066 | f.close() |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2067 | ramdisk_img.close() |
| 2068 | img.close() |
| 2069 | |
| 2070 | return data |
| 2071 | |
| 2072 | |
| 2073 | def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir, |
| 2074 | info_dict=None): |
| 2075 | """Return a File object with the desired vendor boot image. |
| 2076 | |
| 2077 | Look for it under 'unpack_dir'/IMAGES, otherwise construct it from |
| 2078 | the source files in 'unpack_dir'/'tree_subdir'.""" |
| 2079 | |
| 2080 | prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) |
| 2081 | if os.path.exists(prebuilt_path): |
| 2082 | logger.info("using prebuilt %s from IMAGES...", prebuilt_name) |
| 2083 | return File.FromLocalFile(name, prebuilt_path) |
| 2084 | |
| 2085 | logger.info("building image from target_files %s...", tree_subdir) |
| 2086 | |
| 2087 | if info_dict is None: |
| 2088 | info_dict = OPTIONS.info_dict |
| 2089 | |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2090 | data = _BuildVendorBootImage( |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 2091 | os.path.join(unpack_dir, tree_subdir), "vendor_boot", info_dict) |
| 2092 | if data: |
| 2093 | return File(name, data) |
| 2094 | return None |
| 2095 | |
| 2096 | |
| 2097 | def GetVendorKernelBootImage(name, prebuilt_name, unpack_dir, tree_subdir, |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2098 | info_dict=None): |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 2099 | """Return a File object with the desired vendor kernel boot image. |
| 2100 | |
| 2101 | Look for it under 'unpack_dir'/IMAGES, otherwise construct it from |
| 2102 | the source files in 'unpack_dir'/'tree_subdir'.""" |
| 2103 | |
| 2104 | prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) |
| 2105 | if os.path.exists(prebuilt_path): |
| 2106 | logger.info("using prebuilt %s from IMAGES...", prebuilt_name) |
| 2107 | return File.FromLocalFile(name, prebuilt_path) |
| 2108 | |
| 2109 | logger.info("building image from target_files %s...", tree_subdir) |
| 2110 | |
| 2111 | if info_dict is None: |
| 2112 | info_dict = OPTIONS.info_dict |
| 2113 | |
| 2114 | data = _BuildVendorBootImage( |
| 2115 | os.path.join(unpack_dir, tree_subdir), "vendor_kernel_boot", info_dict) |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2116 | if data: |
| 2117 | return File(name, data) |
| 2118 | return None |
| 2119 | |
| 2120 | |
Narayan Kamath | a07bf04 | 2017-08-14 14:49:21 +0100 | [diff] [blame] | 2121 | def Gunzip(in_filename, out_filename): |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 2122 | """Gunzips the given gzip compressed file to a given output file.""" |
| 2123 | with gzip.open(in_filename, "rb") as in_file, \ |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2124 | open(out_filename, "wb") as out_file: |
Narayan Kamath | a07bf04 | 2017-08-14 14:49:21 +0100 | [diff] [blame] | 2125 | shutil.copyfileobj(in_file, out_file) |
| 2126 | |
| 2127 | |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2128 | def UnzipSingleFile(input_zip: zipfile.ZipFile, info: zipfile.ZipInfo, dirname: str): |
| 2129 | # According to https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/6297838#6297838 |
| 2130 | # higher bits of |external_attr| are unix file permission and types |
| 2131 | unix_filetype = info.external_attr >> 16 |
Kelvin Zhang | 4cb28f6 | 2023-07-10 12:30:53 -0700 | [diff] [blame] | 2132 | file_perm = unix_filetype & 0o777 |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2133 | |
| 2134 | def CheckMask(a, mask): |
| 2135 | return (a & mask) == mask |
| 2136 | |
| 2137 | def IsSymlink(a): |
| 2138 | return CheckMask(a, stat.S_IFLNK) |
Kelvin Zhang | 4cb28f6 | 2023-07-10 12:30:53 -0700 | [diff] [blame] | 2139 | |
| 2140 | def IsDir(a): |
| 2141 | return CheckMask(a, stat.S_IFDIR) |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2142 | # python3.11 zipfile implementation doesn't handle symlink correctly |
| 2143 | if not IsSymlink(unix_filetype): |
Kelvin Zhang | 4cb28f6 | 2023-07-10 12:30:53 -0700 | [diff] [blame] | 2144 | target = input_zip.extract(info, dirname) |
| 2145 | # We want to ensure that the file is at least read/writable by owner and readable by all users |
| 2146 | if IsDir(unix_filetype): |
| 2147 | os.chmod(target, file_perm | 0o755) |
| 2148 | else: |
| 2149 | os.chmod(target, file_perm | 0o644) |
| 2150 | return target |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2151 | if dirname is None: |
| 2152 | dirname = os.getcwd() |
| 2153 | target = os.path.join(dirname, info.filename) |
| 2154 | os.makedirs(os.path.dirname(target), exist_ok=True) |
Kelvin Zhang | 4cb28f6 | 2023-07-10 12:30:53 -0700 | [diff] [blame] | 2155 | if os.path.exists(target): |
| 2156 | os.unlink(target) |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2157 | os.symlink(input_zip.read(info).decode(), target) |
Kelvin Zhang | 4cb28f6 | 2023-07-10 12:30:53 -0700 | [diff] [blame] | 2158 | return target |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2159 | |
| 2160 | |
Tao Bao | 0ff15de | 2019-03-20 11:26:06 -0700 | [diff] [blame] | 2161 | def UnzipToDir(filename, dirname, patterns=None): |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2162 | """Unzips the archive to the given directory. |
| 2163 | |
| 2164 | Args: |
| 2165 | filename: The name of the zip file to unzip. |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2166 | dirname: Where the unziped files will land. |
Tao Bao | 0ff15de | 2019-03-20 11:26:06 -0700 | [diff] [blame] | 2167 | patterns: Files to unzip from the archive. If omitted, will unzip the entire |
| 2168 | archvie. Non-matching patterns will be filtered out. If there's no match |
| 2169 | after the filtering, no file will be unzipped. |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2170 | """ |
Kelvin Zhang | 7c9205b | 2023-06-05 09:58:16 -0700 | [diff] [blame] | 2171 | with zipfile.ZipFile(filename, allowZip64=True, mode="r") as input_zip: |
Tao Bao | 0ff15de | 2019-03-20 11:26:06 -0700 | [diff] [blame] | 2172 | # Filter out non-matching patterns. unzip will complain otherwise. |
Kelvin Zhang | 38d0c37 | 2023-06-14 12:53:29 -0700 | [diff] [blame] | 2173 | entries = input_zip.infolist() |
| 2174 | # b/283033491 |
| 2175 | # Per https://en.wikipedia.org/wiki/ZIP_(file_format)#Central_directory_file_header |
| 2176 | # In zip64 mode, central directory record's header_offset field might be |
| 2177 | # set to 0xFFFFFFFF if header offset is > 2^32. In this case, the extra |
| 2178 | # fields will contain an 8 byte little endian integer at offset 20 |
| 2179 | # to indicate the actual local header offset. |
| 2180 | # As of python3.11, python does not handle zip64 central directories |
| 2181 | # correctly, so we will manually do the parsing here. |
Kelvin Zhang | 1e77424 | 2023-06-17 09:18:15 -0700 | [diff] [blame] | 2182 | |
| 2183 | # ZIP64 central directory extra field has two required fields: |
| 2184 | # 2 bytes header ID and 2 bytes size field. Thes two require fields have |
| 2185 | # a total size of 4 bytes. Then it has three other 8 bytes field, followed |
| 2186 | # by a 4 byte disk number field. The last disk number field is not required |
| 2187 | # to be present, but if it is present, the total size of extra field will be |
| 2188 | # divisible by 8(because 2+2+4+8*n is always going to be multiple of 8) |
| 2189 | # Most extra fields are optional, but when they appear, their must appear |
| 2190 | # in the order defined by zip64 spec. Since file header offset is the 2nd |
| 2191 | # to last field in zip64 spec, it will only be at last 8 bytes or last 12-4 |
| 2192 | # bytes, depending on whether disk number is present. |
Kelvin Zhang | 38d0c37 | 2023-06-14 12:53:29 -0700 | [diff] [blame] | 2193 | for entry in entries: |
Kelvin Zhang | 1e77424 | 2023-06-17 09:18:15 -0700 | [diff] [blame] | 2194 | if entry.header_offset == 0xFFFFFFFF: |
| 2195 | if len(entry.extra) % 8 == 0: |
| 2196 | entry.header_offset = int.from_bytes(entry.extra[-12:-4], "little") |
| 2197 | else: |
| 2198 | entry.header_offset = int.from_bytes(entry.extra[-8:], "little") |
Kelvin Zhang | 7c9205b | 2023-06-05 09:58:16 -0700 | [diff] [blame] | 2199 | if patterns is not None: |
Kelvin Zhang | 38d0c37 | 2023-06-14 12:53:29 -0700 | [diff] [blame] | 2200 | filtered = [info for info in entries if any( |
| 2201 | [fnmatch.fnmatch(info.filename, p) for p in patterns])] |
Tao Bao | 0ff15de | 2019-03-20 11:26:06 -0700 | [diff] [blame] | 2202 | |
Kelvin Zhang | 7c9205b | 2023-06-05 09:58:16 -0700 | [diff] [blame] | 2203 | # There isn't any matching files. Don't unzip anything. |
| 2204 | if not filtered: |
| 2205 | return |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2206 | for info in filtered: |
| 2207 | UnzipSingleFile(input_zip, info, dirname) |
Kelvin Zhang | 7c9205b | 2023-06-05 09:58:16 -0700 | [diff] [blame] | 2208 | else: |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2209 | for info in entries: |
| 2210 | UnzipSingleFile(input_zip, info, dirname) |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2211 | |
| 2212 | |
Daniel Norman | 78554ea | 2021-09-14 10:29:38 -0700 | [diff] [blame] | 2213 | def UnzipTemp(filename, patterns=None): |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2214 | """Unzips the given archive into a temporary directory and returns the name. |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2215 | |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2216 | Args: |
| 2217 | filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into |
| 2218 | a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES. |
| 2219 | |
Daniel Norman | 78554ea | 2021-09-14 10:29:38 -0700 | [diff] [blame] | 2220 | patterns: Files to unzip from the archive. If omitted, will unzip the entire |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2221 | archvie. |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2222 | |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2223 | Returns: |
Tao Bao | dba59ee | 2018-01-09 13:21:02 -0800 | [diff] [blame] | 2224 | The name of the temporary directory. |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2225 | """ |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2226 | |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2227 | tmp = MakeTempDir(prefix="targetfiles-") |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2228 | m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE) |
| 2229 | if m: |
Daniel Norman | 78554ea | 2021-09-14 10:29:38 -0700 | [diff] [blame] | 2230 | UnzipToDir(m.group(1), tmp, patterns) |
| 2231 | UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), patterns) |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2232 | filename = m.group(1) |
| 2233 | else: |
Daniel Norman | 78554ea | 2021-09-14 10:29:38 -0700 | [diff] [blame] | 2234 | UnzipToDir(filename, tmp, patterns) |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2235 | |
Tao Bao | dba59ee | 2018-01-09 13:21:02 -0800 | [diff] [blame] | 2236 | return tmp |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2237 | |
| 2238 | |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2239 | def GetUserImage(which, tmpdir, input_zip, |
| 2240 | info_dict=None, |
| 2241 | allow_shared_blocks=None, |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2242 | reset_file_map=False): |
| 2243 | """Returns an Image object suitable for passing to BlockImageDiff. |
| 2244 | |
| 2245 | This function loads the specified image from the given path. If the specified |
| 2246 | image is sparse, it also performs additional processing for OTA purpose. For |
| 2247 | example, it always adds block 0 to clobbered blocks list. It also detects |
| 2248 | files that cannot be reconstructed from the block list, for whom we should |
| 2249 | avoid applying imgdiff. |
| 2250 | |
| 2251 | Args: |
| 2252 | which: The partition name. |
| 2253 | tmpdir: The directory that contains the prebuilt image and block map file. |
| 2254 | input_zip: The target-files ZIP archive. |
| 2255 | info_dict: The dict to be looked up for relevant info. |
| 2256 | allow_shared_blocks: If image is sparse, whether having shared blocks is |
| 2257 | allowed. If none, it is looked up from info_dict. |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2258 | reset_file_map: If true and image is sparse, reset file map before returning |
| 2259 | the image. |
| 2260 | Returns: |
| 2261 | A Image object. If it is a sparse image and reset_file_map is False, the |
| 2262 | image will have file_map info loaded. |
| 2263 | """ |
Tao Bao | c1a1ec3 | 2019-06-18 16:29:37 -0700 | [diff] [blame] | 2264 | if info_dict is None: |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2265 | info_dict = LoadInfoDict(input_zip) |
| 2266 | |
Kelvin Zhang | 0452128 | 2023-03-02 09:42:52 -0800 | [diff] [blame] | 2267 | is_sparse = IsSparseImage(os.path.join(tmpdir, "IMAGES", which + ".img")) |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2268 | |
| 2269 | # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain |
| 2270 | # shared blocks (i.e. some blocks will show up in multiple files' block |
| 2271 | # list). We can only allocate such shared blocks to the first "owner", and |
| 2272 | # disable imgdiff for all later occurrences. |
| 2273 | if allow_shared_blocks is None: |
| 2274 | allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true" |
| 2275 | |
| 2276 | if is_sparse: |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2277 | img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks) |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2278 | if reset_file_map: |
| 2279 | img.ResetFileMap() |
| 2280 | return img |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2281 | return GetNonSparseImage(which, tmpdir) |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2282 | |
| 2283 | |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2284 | def GetNonSparseImage(which, tmpdir): |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2285 | """Returns a Image object suitable for passing to BlockImageDiff. |
| 2286 | |
| 2287 | This function loads the specified non-sparse image from the given path. |
| 2288 | |
| 2289 | Args: |
| 2290 | which: The partition name. |
| 2291 | tmpdir: The directory that contains the prebuilt image and block map file. |
| 2292 | Returns: |
| 2293 | A Image object. |
| 2294 | """ |
| 2295 | path = os.path.join(tmpdir, "IMAGES", which + ".img") |
| 2296 | mappath = os.path.join(tmpdir, "IMAGES", which + ".map") |
| 2297 | |
| 2298 | # The image and map files must have been created prior to calling |
| 2299 | # ota_from_target_files.py (since LMP). |
| 2300 | assert os.path.exists(path) and os.path.exists(mappath) |
| 2301 | |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2302 | return images.FileImage(path) |
Tianjie Xu | 41976c7 | 2019-07-03 13:57:01 -0700 | [diff] [blame] | 2303 | |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2304 | |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2305 | def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks): |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2306 | """Returns a SparseImage object suitable for passing to BlockImageDiff. |
| 2307 | |
| 2308 | This function loads the specified sparse image from the given path, and |
| 2309 | performs additional processing for OTA purpose. For example, it always adds |
| 2310 | block 0 to clobbered blocks list. It also detects files that cannot be |
| 2311 | reconstructed from the block list, for whom we should avoid applying imgdiff. |
| 2312 | |
| 2313 | Args: |
Tao Bao | b2de7d9 | 2019-04-10 10:01:47 -0700 | [diff] [blame] | 2314 | which: The partition name, e.g. "system", "vendor". |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2315 | tmpdir: The directory that contains the prebuilt image and block map file. |
| 2316 | input_zip: The target-files ZIP archive. |
Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 2317 | allow_shared_blocks: Whether having shared blocks is allowed. |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2318 | Returns: |
| 2319 | A SparseImage object, with file_map info loaded. |
| 2320 | """ |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2321 | path = os.path.join(tmpdir, "IMAGES", which + ".img") |
| 2322 | mappath = os.path.join(tmpdir, "IMAGES", which + ".map") |
| 2323 | |
| 2324 | # The image and map files must have been created prior to calling |
| 2325 | # ota_from_target_files.py (since LMP). |
| 2326 | assert os.path.exists(path) and os.path.exists(mappath) |
| 2327 | |
| 2328 | # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add |
| 2329 | # it to clobbered_blocks so that it will be written to the target |
| 2330 | # unconditionally. Note that they are still part of care_map. (Bug: 20939131) |
| 2331 | clobbered_blocks = "0" |
| 2332 | |
Tianjie Xu | 67c7cbb | 2018-08-30 00:32:07 -0700 | [diff] [blame] | 2333 | image = sparse_img.SparseImage( |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2334 | path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks) |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2335 | |
| 2336 | # block.map may contain less blocks, because mke2fs may skip allocating blocks |
| 2337 | # if they contain all zeros. We can't reconstruct such a file from its block |
| 2338 | # list. Tag such entries accordingly. (Bug: 65213616) |
| 2339 | for entry in image.file_map: |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2340 | # Skip artificial names, such as "__ZERO", "__NONZERO-1". |
Tao Bao | d3554e6 | 2018-07-10 15:31:22 -0700 | [diff] [blame] | 2341 | if not entry.startswith('/'): |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2342 | continue |
| 2343 | |
Tom Cherry | d14b895 | 2018-08-09 14:26:00 -0700 | [diff] [blame] | 2344 | # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the |
| 2345 | # filename listed in system.map may contain an additional leading slash |
| 2346 | # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent |
| 2347 | # results. |
wangshumin | 71af07a | 2021-02-24 11:08:47 +0800 | [diff] [blame] | 2348 | # And handle another special case, where files not under /system |
Tom Cherry | d14b895 | 2018-08-09 14:26:00 -0700 | [diff] [blame] | 2349 | # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip. |
wangshumin | 71af07a | 2021-02-24 11:08:47 +0800 | [diff] [blame] | 2350 | arcname = entry.lstrip('/') |
| 2351 | if which == 'system' and not arcname.startswith('system'): |
Tao Bao | d3554e6 | 2018-07-10 15:31:22 -0700 | [diff] [blame] | 2352 | arcname = 'ROOT/' + arcname |
wangshumin | 71af07a | 2021-02-24 11:08:47 +0800 | [diff] [blame] | 2353 | else: |
| 2354 | arcname = arcname.replace(which, which.upper(), 1) |
Tao Bao | d3554e6 | 2018-07-10 15:31:22 -0700 | [diff] [blame] | 2355 | |
| 2356 | assert arcname in input_zip.namelist(), \ |
| 2357 | "Failed to find the ZIP entry for {}".format(entry) |
| 2358 | |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2359 | info = input_zip.getinfo(arcname) |
| 2360 | ranges = image.file_map[entry] |
Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 2361 | |
| 2362 | # If a RangeSet has been tagged as using shared blocks while loading the |
Tao Bao | 2a20f34 | 2018-12-03 15:08:23 -0800 | [diff] [blame] | 2363 | # image, check the original block list to determine its completeness. Note |
| 2364 | # that the 'incomplete' flag would be tagged to the original RangeSet only. |
Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 2365 | if ranges.extra.get('uses_shared_blocks'): |
Tao Bao | 2a20f34 | 2018-12-03 15:08:23 -0800 | [diff] [blame] | 2366 | ranges = ranges.extra['uses_shared_blocks'] |
Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 2367 | |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2368 | if RoundUpTo4K(info.file_size) > ranges.size() * 4096: |
| 2369 | ranges.extra['incomplete'] = True |
| 2370 | |
| 2371 | return image |
| 2372 | |
| 2373 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2374 | def GetKeyPasswords(keylist): |
| 2375 | """Given a list of keys, prompt the user to enter passwords for |
| 2376 | those which require them. Return a {key: password} dict. password |
| 2377 | will be None if the key has no password.""" |
| 2378 | |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2379 | no_passwords = [] |
| 2380 | need_passwords = [] |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2381 | key_passwords = {} |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2382 | devnull = open("/dev/null", "w+b") |
Cole Faust | b820bcd | 2021-10-28 13:59:48 -0700 | [diff] [blame] | 2383 | |
| 2384 | # sorted() can't compare strings to None, so convert Nones to strings |
| 2385 | for k in sorted(keylist, key=lambda x: x if x is not None else ""): |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 2386 | # We don't need a password for things that aren't really keys. |
Jooyung Han | 8caba5e | 2021-10-27 03:58:09 +0900 | [diff] [blame] | 2387 | if k in SPECIAL_CERT_STRINGS or k is None: |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2388 | no_passwords.append(k) |
Doug Zongker | 43874f8 | 2009-04-14 14:05:15 -0700 | [diff] [blame] | 2389 | continue |
| 2390 | |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2391 | p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix, |
Doug Zongker | 602a84e | 2009-06-18 08:35:12 -0700 | [diff] [blame] | 2392 | "-inform", "DER", "-nocrypt"], |
| 2393 | stdin=devnull.fileno(), |
| 2394 | stdout=devnull.fileno(), |
| 2395 | stderr=subprocess.STDOUT) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2396 | p.communicate() |
| 2397 | if p.returncode == 0: |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2398 | # Definitely an unencrypted key. |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2399 | no_passwords.append(k) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2400 | else: |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2401 | p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix, |
| 2402 | "-inform", "DER", "-passin", "pass:"], |
| 2403 | stdin=devnull.fileno(), |
| 2404 | stdout=devnull.fileno(), |
| 2405 | stderr=subprocess.PIPE) |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2406 | _, stderr = p.communicate() |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2407 | if p.returncode == 0: |
| 2408 | # Encrypted key with empty string as password. |
| 2409 | key_passwords[k] = '' |
| 2410 | elif stderr.startswith('Error decrypting key'): |
| 2411 | # Definitely encrypted key. |
| 2412 | # It would have said "Error reading key" if it didn't parse correctly. |
| 2413 | need_passwords.append(k) |
| 2414 | else: |
| 2415 | # Potentially, a type of key that openssl doesn't understand. |
| 2416 | # We'll let the routines in signapk.jar handle it. |
| 2417 | no_passwords.append(k) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2418 | devnull.close() |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2419 | |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2420 | key_passwords.update(PasswordManager().GetPasswords(need_passwords)) |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 2421 | key_passwords.update(dict.fromkeys(no_passwords)) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2422 | return key_passwords |
| 2423 | |
| 2424 | |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2425 | def GetMinSdkVersion(apk_name): |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2426 | """Gets the minSdkVersion declared in the APK. |
| 2427 | |
Martin Stjernholm | 58472e8 | 2022-01-07 22:08:47 +0000 | [diff] [blame] | 2428 | It calls OPTIONS.aapt2_path to query the embedded minSdkVersion from the given |
| 2429 | APK file. This can be both a decimal number (API Level) or a codename. |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2430 | |
| 2431 | Args: |
| 2432 | apk_name: The APK filename. |
| 2433 | |
| 2434 | Returns: |
| 2435 | The parsed SDK version string. |
| 2436 | |
| 2437 | Raises: |
| 2438 | ExternalError: On failing to obtain the min SDK version. |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2439 | """ |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2440 | proc = Run( |
Martin Stjernholm | 58472e8 | 2022-01-07 22:08:47 +0000 | [diff] [blame] | 2441 | [OPTIONS.aapt2_path, "dump", "badging", apk_name], stdout=subprocess.PIPE, |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2442 | stderr=subprocess.PIPE) |
| 2443 | stdoutdata, stderrdata = proc.communicate() |
| 2444 | if proc.returncode != 0: |
| 2445 | raise ExternalError( |
Kelvin Zhang | 21118bb | 2022-02-12 09:40:35 -0800 | [diff] [blame] | 2446 | "Failed to obtain minSdkVersion for {}: aapt2 return code {}:\n{}\n{}".format( |
| 2447 | apk_name, proc.returncode, stdoutdata, stderrdata)) |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2448 | |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2449 | for line in stdoutdata.split("\n"): |
James Wu | c5e321a | 2023-08-01 17:45:35 +0000 | [diff] [blame] | 2450 | # Due to ag/24161708, looking for lines such as minSdkVersion:'23',minSdkVersion:'M' |
| 2451 | # or sdkVersion:'23', sdkVersion:'M'. |
| 2452 | m = re.match(r'(?:minSdkVersion|sdkVersion):\'([^\']*)\'', line) |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2453 | if m: |
| 2454 | return m.group(1) |
changho.shin | 0f12536 | 2019-07-08 10:59:00 +0900 | [diff] [blame] | 2455 | raise ExternalError("No minSdkVersion returned by aapt2") |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2456 | |
| 2457 | |
| 2458 | def GetMinSdkVersionInt(apk_name, codename_to_api_level_map): |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2459 | """Returns the minSdkVersion declared in the APK as a number (API Level). |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2460 | |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2461 | If minSdkVersion is set to a codename, it is translated to a number using the |
| 2462 | provided map. |
| 2463 | |
| 2464 | Args: |
| 2465 | apk_name: The APK filename. |
| 2466 | |
| 2467 | Returns: |
| 2468 | The parsed SDK version number. |
| 2469 | |
| 2470 | Raises: |
| 2471 | ExternalError: On failing to get the min SDK version number. |
| 2472 | """ |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2473 | version = GetMinSdkVersion(apk_name) |
| 2474 | try: |
| 2475 | return int(version) |
| 2476 | except ValueError: |
Paul Duffin | a03f126 | 2023-02-01 12:12:51 +0000 | [diff] [blame] | 2477 | # Not a decimal number. |
| 2478 | # |
| 2479 | # It could be either a straight codename, e.g. |
| 2480 | # UpsideDownCake |
| 2481 | # |
| 2482 | # Or a codename with API fingerprint SHA, e.g. |
| 2483 | # UpsideDownCake.e7d3947f14eb9dc4fec25ff6c5f8563e |
| 2484 | # |
| 2485 | # Extract the codename and try and map it to a version number. |
| 2486 | split = version.split(".") |
| 2487 | codename = split[0] |
| 2488 | if codename in codename_to_api_level_map: |
| 2489 | return codename_to_api_level_map[codename] |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2490 | raise ExternalError( |
Paul Duffin | a03f126 | 2023-02-01 12:12:51 +0000 | [diff] [blame] | 2491 | "Unknown codename: '{}' from minSdkVersion: '{}'. Known codenames: {}".format( |
| 2492 | codename, version, codename_to_api_level_map)) |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2493 | |
| 2494 | |
| 2495 | def SignFile(input_name, output_name, key, password, min_api_level=None, |
Tao Bao | ffc9a30 | 2019-03-22 23:16:58 -0700 | [diff] [blame] | 2496 | codename_to_api_level_map=None, whole_file=False, |
| 2497 | extra_signapk_args=None): |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2498 | """Sign the input_name zip/jar/apk, producing output_name. Use the |
| 2499 | given key and password (the latter may be None if the key does not |
| 2500 | have a password. |
| 2501 | |
Doug Zongker | 951495f | 2009-08-14 12:44:19 -0700 | [diff] [blame] | 2502 | If whole_file is true, use the "-w" option to SignApk to embed a |
| 2503 | signature that covers the whole file in the archive comment of the |
| 2504 | zip file. |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2505 | |
| 2506 | min_api_level is the API Level (int) of the oldest platform this file may end |
| 2507 | up on. If not specified for an APK, the API Level is obtained by interpreting |
| 2508 | the minSdkVersion attribute of the APK's AndroidManifest.xml. |
| 2509 | |
| 2510 | codename_to_api_level_map is needed to translate the codename which may be |
| 2511 | encountered as the APK's minSdkVersion. |
Tao Bao | ffc9a30 | 2019-03-22 23:16:58 -0700 | [diff] [blame] | 2512 | |
| 2513 | Caller may optionally specify extra args to be passed to SignApk, which |
| 2514 | defaults to OPTIONS.extra_signapk_args if omitted. |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2515 | """ |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 2516 | if codename_to_api_level_map is None: |
| 2517 | codename_to_api_level_map = {} |
Tao Bao | ffc9a30 | 2019-03-22 23:16:58 -0700 | [diff] [blame] | 2518 | if extra_signapk_args is None: |
| 2519 | extra_signapk_args = OPTIONS.extra_signapk_args |
Doug Zongker | 951495f | 2009-08-14 12:44:19 -0700 | [diff] [blame] | 2520 | |
Alex Klyubin | 9667b18 | 2015-12-10 13:38:50 -0800 | [diff] [blame] | 2521 | java_library_path = os.path.join( |
| 2522 | OPTIONS.search_path, OPTIONS.signapk_shared_library_path) |
| 2523 | |
Tao Bao | e95540e | 2016-11-08 12:08:53 -0800 | [diff] [blame] | 2524 | cmd = ([OPTIONS.java_path] + OPTIONS.java_args + |
| 2525 | ["-Djava.library.path=" + java_library_path, |
| 2526 | "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] + |
Tao Bao | ffc9a30 | 2019-03-22 23:16:58 -0700 | [diff] [blame] | 2527 | extra_signapk_args) |
Doug Zongker | 951495f | 2009-08-14 12:44:19 -0700 | [diff] [blame] | 2528 | if whole_file: |
| 2529 | cmd.append("-w") |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2530 | |
| 2531 | min_sdk_version = min_api_level |
| 2532 | if min_sdk_version is None: |
| 2533 | if not whole_file: |
| 2534 | min_sdk_version = GetMinSdkVersionInt( |
| 2535 | input_name, codename_to_api_level_map) |
| 2536 | if min_sdk_version is not None: |
| 2537 | cmd.extend(["--min-sdk-version", str(min_sdk_version)]) |
| 2538 | |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2539 | cmd.extend([key + OPTIONS.public_key_suffix, |
| 2540 | key + OPTIONS.private_key_suffix, |
Alex Klyubin | eb756d7 | 2015-12-04 09:21:08 -0800 | [diff] [blame] | 2541 | input_name, output_name]) |
Doug Zongker | 951495f | 2009-08-14 12:44:19 -0700 | [diff] [blame] | 2542 | |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 2543 | proc = Run(cmd, stdin=subprocess.PIPE) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2544 | if password is not None: |
| 2545 | password += "\n" |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 2546 | stdoutdata, _ = proc.communicate(password) |
| 2547 | if proc.returncode != 0: |
Tao Bao | 8092198 | 2018-03-21 21:02:19 -0700 | [diff] [blame] | 2548 | raise ExternalError( |
Kelvin Zhang | 197772f | 2022-04-26 15:15:11 -0700 | [diff] [blame] | 2549 | "Failed to run {}: return code {}:\n{}".format(cmd, |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2550 | proc.returncode, stdoutdata)) |
| 2551 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2552 | |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 2553 | def CheckSize(data, target, info_dict): |
Tao Bao | 9dd909e | 2017-11-14 11:27:32 -0800 | [diff] [blame] | 2554 | """Checks the data string passed against the max size limit. |
Doug Zongker | c77a9ad | 2010-09-16 11:28:43 -0700 | [diff] [blame] | 2555 | |
Tao Bao | 9dd909e | 2017-11-14 11:27:32 -0800 | [diff] [blame] | 2556 | For non-AVB images, raise exception if the data is too big. Print a warning |
| 2557 | if the data is nearing the maximum size. |
| 2558 | |
| 2559 | For AVB images, the actual image size should be identical to the limit. |
| 2560 | |
| 2561 | Args: |
| 2562 | data: A string that contains all the data for the partition. |
| 2563 | target: The partition name. The ".img" suffix is optional. |
| 2564 | info_dict: The dict to be looked up for relevant info. |
| 2565 | """ |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2566 | if target.endswith(".img"): |
| 2567 | target = target[:-4] |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 2568 | mount_point = "/" + target |
| 2569 | |
Ying Wang | f8824af | 2014-06-03 14:07:27 -0700 | [diff] [blame] | 2570 | fs_type = None |
| 2571 | limit = None |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 2572 | if info_dict["fstab"]: |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2573 | if mount_point == "/userdata": |
| 2574 | mount_point = "/data" |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 2575 | p = info_dict["fstab"][mount_point] |
| 2576 | fs_type = p.fs_type |
Andrew Boie | 0f9aec8 | 2012-02-14 09:32:52 -0800 | [diff] [blame] | 2577 | device = p.device |
| 2578 | if "/" in device: |
| 2579 | device = device[device.rfind("/")+1:] |
Kelvin Zhang | 8c9166a | 2023-10-31 13:42:15 -0700 | [diff] [blame] | 2580 | limit = info_dict.get(device + "_size", 0) |
| 2581 | if isinstance(limit, str): |
| 2582 | limit = int(limit, 0) |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2583 | if not fs_type or not limit: |
| 2584 | return |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2585 | |
Andrew Boie | 0f9aec8 | 2012-02-14 09:32:52 -0800 | [diff] [blame] | 2586 | size = len(data) |
Tao Bao | 9dd909e | 2017-11-14 11:27:32 -0800 | [diff] [blame] | 2587 | # target could be 'userdata' or 'cache'. They should follow the non-AVB image |
| 2588 | # path. |
| 2589 | if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS: |
| 2590 | if size != limit: |
| 2591 | raise ExternalError( |
| 2592 | "Mismatching image size for %s: expected %d actual %d" % ( |
| 2593 | target, limit, size)) |
| 2594 | else: |
| 2595 | pct = float(size) * 100.0 / limit |
| 2596 | msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit) |
| 2597 | if pct >= 99.0: |
| 2598 | raise ExternalError(msg) |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2599 | |
| 2600 | if pct >= 95.0: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 2601 | logger.warning("\n WARNING: %s\n", msg) |
| 2602 | else: |
| 2603 | logger.info(" %s", msg) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2604 | |
| 2605 | |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 2606 | def ReadApkCerts(tf_zip): |
Tao Bao | 818ddf5 | 2018-01-05 11:17:34 -0800 | [diff] [blame] | 2607 | """Parses the APK certs info from a given target-files zip. |
| 2608 | |
| 2609 | Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a |
| 2610 | tuple with the following elements: (1) a dictionary that maps packages to |
| 2611 | certs (based on the "certificate" and "private_key" attributes in the file; |
| 2612 | (2) a string representing the extension of compressed APKs in the target files |
| 2613 | (e.g ".gz", ".bro"). |
| 2614 | |
| 2615 | Args: |
| 2616 | tf_zip: The input target_files ZipFile (already open). |
| 2617 | |
| 2618 | Returns: |
| 2619 | (certmap, ext): certmap is a dictionary that maps packages to certs; ext is |
| 2620 | the extension string of compressed APKs (e.g. ".gz"), or None if there's |
| 2621 | no compressed APKs. |
| 2622 | """ |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 2623 | certmap = {} |
Narayan Kamath | a07bf04 | 2017-08-14 14:49:21 +0100 | [diff] [blame] | 2624 | compressed_extension = None |
| 2625 | |
Tao Bao | 0f99033 | 2017-09-08 19:02:54 -0700 | [diff] [blame] | 2626 | # META/apkcerts.txt contains the info for _all_ the packages known at build |
| 2627 | # time. Filter out the ones that are not installed. |
| 2628 | installed_files = set() |
| 2629 | for name in tf_zip.namelist(): |
| 2630 | basename = os.path.basename(name) |
| 2631 | if basename: |
| 2632 | installed_files.add(basename) |
| 2633 | |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 2634 | for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'): |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 2635 | line = line.strip() |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2636 | if not line: |
| 2637 | continue |
Tao Bao | 818ddf5 | 2018-01-05 11:17:34 -0800 | [diff] [blame] | 2638 | m = re.match( |
| 2639 | r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+' |
Bill Peckham | 5c7b034 | 2020-04-03 15:36:23 -0700 | [diff] [blame] | 2640 | r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?' |
| 2641 | r'(\s+partition="(?P<PARTITION>.*?)")?$', |
Tao Bao | 818ddf5 | 2018-01-05 11:17:34 -0800 | [diff] [blame] | 2642 | line) |
| 2643 | if not m: |
| 2644 | continue |
Narayan Kamath | a07bf04 | 2017-08-14 14:49:21 +0100 | [diff] [blame] | 2645 | |
Tao Bao | 818ddf5 | 2018-01-05 11:17:34 -0800 | [diff] [blame] | 2646 | matches = m.groupdict() |
| 2647 | cert = matches["CERT"] |
| 2648 | privkey = matches["PRIVKEY"] |
| 2649 | name = matches["NAME"] |
| 2650 | this_compressed_extension = matches["COMPRESSED"] |
| 2651 | |
| 2652 | public_key_suffix_len = len(OPTIONS.public_key_suffix) |
| 2653 | private_key_suffix_len = len(OPTIONS.private_key_suffix) |
| 2654 | if cert in SPECIAL_CERT_STRINGS and not privkey: |
| 2655 | certmap[name] = cert |
| 2656 | elif (cert.endswith(OPTIONS.public_key_suffix) and |
| 2657 | privkey.endswith(OPTIONS.private_key_suffix) and |
| 2658 | cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]): |
| 2659 | certmap[name] = cert[:-public_key_suffix_len] |
| 2660 | else: |
| 2661 | raise ValueError("Failed to parse line from apkcerts.txt:\n" + line) |
| 2662 | |
| 2663 | if not this_compressed_extension: |
| 2664 | continue |
| 2665 | |
| 2666 | # Only count the installed files. |
| 2667 | filename = name + '.' + this_compressed_extension |
| 2668 | if filename not in installed_files: |
| 2669 | continue |
| 2670 | |
| 2671 | # Make sure that all the values in the compression map have the same |
| 2672 | # extension. We don't support multiple compression methods in the same |
| 2673 | # system image. |
| 2674 | if compressed_extension: |
| 2675 | if this_compressed_extension != compressed_extension: |
| 2676 | raise ValueError( |
| 2677 | "Multiple compressed extensions: {} vs {}".format( |
| 2678 | compressed_extension, this_compressed_extension)) |
| 2679 | else: |
| 2680 | compressed_extension = this_compressed_extension |
| 2681 | |
| 2682 | return (certmap, |
| 2683 | ("." + compressed_extension) if compressed_extension else None) |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 2684 | |
| 2685 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2686 | COMMON_DOCSTRING = """ |
Tao Bao | 30df8b4 | 2018-04-23 15:32:53 -0700 | [diff] [blame] | 2687 | Global options |
| 2688 | |
| 2689 | -p (--path) <dir> |
| 2690 | Prepend <dir>/bin to the list of places to search for binaries run by this |
| 2691 | script, and expect to find jars in <dir>/framework. |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2692 | |
Doug Zongker | 05d3dea | 2009-06-22 11:32:31 -0700 | [diff] [blame] | 2693 | -s (--device_specific) <file> |
Tao Bao | 30df8b4 | 2018-04-23 15:32:53 -0700 | [diff] [blame] | 2694 | Path to the Python module containing device-specific releasetools code. |
Doug Zongker | 05d3dea | 2009-06-22 11:32:31 -0700 | [diff] [blame] | 2695 | |
Tao Bao | 30df8b4 | 2018-04-23 15:32:53 -0700 | [diff] [blame] | 2696 | -x (--extra) <key=value> |
| 2697 | Add a key/value pair to the 'extras' dict, which device-specific extension |
| 2698 | code may look at. |
Doug Zongker | 8bec09e | 2009-11-30 15:37:14 -0800 | [diff] [blame] | 2699 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2700 | -v (--verbose) |
| 2701 | Show command lines being executed. |
| 2702 | |
| 2703 | -h (--help) |
| 2704 | Display this usage message and exit. |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 2705 | |
| 2706 | --logfile <file> |
| 2707 | Put verbose logs to specified file (regardless of --verbose option.) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2708 | """ |
| 2709 | |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2710 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2711 | def Usage(docstring): |
Tao Bao | 89fbb0f | 2017-01-10 10:47:58 -0800 | [diff] [blame] | 2712 | print(docstring.rstrip("\n")) |
| 2713 | print(COMMON_DOCSTRING) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2714 | |
| 2715 | |
| 2716 | def ParseOptions(argv, |
| 2717 | docstring, |
| 2718 | extra_opts="", extra_long_opts=(), |
Kelvin Zhang | c68c6b9 | 2023-11-14 10:54:50 -0800 | [diff] [blame] | 2719 | extra_option_handler: Iterable[OptionHandler] = None): |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2720 | """Parse the options in argv and return any arguments that aren't |
| 2721 | flags. docstring is the calling module's docstring, to be displayed |
| 2722 | for errors and -h. extra_opts and extra_long_opts are for flags |
| 2723 | defined by the caller, which are processed by passing them to |
| 2724 | extra_option_handler.""" |
Kelvin Zhang | c68c6b9 | 2023-11-14 10:54:50 -0800 | [diff] [blame] | 2725 | extra_long_opts = list(extra_long_opts) |
| 2726 | if not isinstance(extra_option_handler, Iterable): |
| 2727 | extra_option_handler = [extra_option_handler] |
| 2728 | |
| 2729 | for handler in extra_option_handler: |
| 2730 | if isinstance(handler, OptionHandler): |
| 2731 | extra_long_opts.extend(handler.extra_long_opts) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2732 | |
| 2733 | try: |
| 2734 | opts, args = getopt.getopt( |
Doug Zongker | 8bec09e | 2009-11-30 15:37:14 -0800 | [diff] [blame] | 2735 | argv, "hvp:s:x:" + extra_opts, |
Alex Klyubin | 9667b18 | 2015-12-10 13:38:50 -0800 | [diff] [blame] | 2736 | ["help", "verbose", "path=", "signapk_path=", |
Thiébaud Weksteen | 62865ca | 2023-10-18 11:08:47 +1100 | [diff] [blame] | 2737 | "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=", |
Tianjie Xu | 88a759d | 2020-01-23 10:47:54 -0800 | [diff] [blame] | 2738 | "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=", |
Baligh Uddin | 601ddea | 2015-06-09 15:48:14 -0700 | [diff] [blame] | 2739 | "private_key_suffix=", "boot_signer_path=", "boot_signer_args=", |
| 2740 | "verity_signer_path=", "verity_signer_args=", "device_specific=", |
Jan Monsch | e147d48 | 2021-06-23 12:30:35 +0200 | [diff] [blame] | 2741 | "extra=", "logfile="] + list(extra_long_opts)) |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2742 | except getopt.GetoptError as err: |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2743 | Usage(docstring) |
Tao Bao | 89fbb0f | 2017-01-10 10:47:58 -0800 | [diff] [blame] | 2744 | print("**", str(err), "**") |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2745 | sys.exit(2) |
| 2746 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2747 | for o, a in opts: |
| 2748 | if o in ("-h", "--help"): |
| 2749 | Usage(docstring) |
| 2750 | sys.exit() |
| 2751 | elif o in ("-v", "--verbose"): |
| 2752 | OPTIONS.verbose = True |
| 2753 | elif o in ("-p", "--path"): |
Doug Zongker | 602a84e | 2009-06-18 08:35:12 -0700 | [diff] [blame] | 2754 | OPTIONS.search_path = a |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2755 | elif o in ("--signapk_path",): |
| 2756 | OPTIONS.signapk_path = a |
Alex Klyubin | 9667b18 | 2015-12-10 13:38:50 -0800 | [diff] [blame] | 2757 | elif o in ("--signapk_shared_library_path",): |
| 2758 | OPTIONS.signapk_shared_library_path = a |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2759 | elif o in ("--extra_signapk_args",): |
| 2760 | OPTIONS.extra_signapk_args = shlex.split(a) |
Martin Stjernholm | 58472e8 | 2022-01-07 22:08:47 +0000 | [diff] [blame] | 2761 | elif o in ("--aapt2_path",): |
| 2762 | OPTIONS.aapt2_path = a |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2763 | elif o in ("--java_path",): |
| 2764 | OPTIONS.java_path = a |
Baligh Uddin | 339ee49 | 2014-09-05 11:18:07 -0700 | [diff] [blame] | 2765 | elif o in ("--java_args",): |
Tao Bao | e95540e | 2016-11-08 12:08:53 -0800 | [diff] [blame] | 2766 | OPTIONS.java_args = shlex.split(a) |
Tianjie Xu | 88a759d | 2020-01-23 10:47:54 -0800 | [diff] [blame] | 2767 | elif o in ("--android_jar_path",): |
| 2768 | OPTIONS.android_jar_path = a |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2769 | elif o in ("--public_key_suffix",): |
| 2770 | OPTIONS.public_key_suffix = a |
| 2771 | elif o in ("--private_key_suffix",): |
| 2772 | OPTIONS.private_key_suffix = a |
Baligh Uddin | e204868 | 2014-11-20 09:52:05 -0800 | [diff] [blame] | 2773 | elif o in ("--boot_signer_path",): |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2774 | raise ValueError( |
| 2775 | "--boot_signer_path is no longer supported, please switch to AVB") |
Baligh Uddin | 601ddea | 2015-06-09 15:48:14 -0700 | [diff] [blame] | 2776 | elif o in ("--boot_signer_args",): |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2777 | raise ValueError( |
| 2778 | "--boot_signer_args is no longer supported, please switch to AVB") |
Baligh Uddin | 601ddea | 2015-06-09 15:48:14 -0700 | [diff] [blame] | 2779 | elif o in ("--verity_signer_path",): |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2780 | raise ValueError( |
| 2781 | "--verity_signer_path is no longer supported, please switch to AVB") |
Baligh Uddin | 601ddea | 2015-06-09 15:48:14 -0700 | [diff] [blame] | 2782 | elif o in ("--verity_signer_args",): |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2783 | raise ValueError( |
| 2784 | "--verity_signer_args is no longer supported, please switch to AVB") |
Doug Zongker | 05d3dea | 2009-06-22 11:32:31 -0700 | [diff] [blame] | 2785 | elif o in ("-s", "--device_specific"): |
| 2786 | OPTIONS.device_specific = a |
Doug Zongker | 5ecba70 | 2009-12-03 16:36:20 -0800 | [diff] [blame] | 2787 | elif o in ("-x", "--extra"): |
Doug Zongker | 8bec09e | 2009-11-30 15:37:14 -0800 | [diff] [blame] | 2788 | key, value = a.split("=", 1) |
| 2789 | OPTIONS.extras[key] = value |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 2790 | elif o in ("--logfile",): |
| 2791 | OPTIONS.logfile = a |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2792 | else: |
Kelvin Zhang | c68c6b9 | 2023-11-14 10:54:50 -0800 | [diff] [blame] | 2793 | if extra_option_handler is None: |
| 2794 | raise ValueError("unknown option \"%s\"" % (o,)) |
| 2795 | success = False |
| 2796 | for handler in extra_option_handler: |
| 2797 | if isinstance(handler, OptionHandler): |
| 2798 | if handler.handler(o, a): |
| 2799 | success = True |
| 2800 | break |
| 2801 | elif handler(o, a): |
| 2802 | success = True |
| 2803 | if not success: |
| 2804 | raise ValueError("unknown option \"%s\"" % (o,)) |
| 2805 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2806 | |
Doug Zongker | 8544877 | 2014-09-09 14:59:20 -0700 | [diff] [blame] | 2807 | if OPTIONS.search_path: |
| 2808 | os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") + |
| 2809 | os.pathsep + os.environ["PATH"]) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2810 | |
| 2811 | return args |
| 2812 | |
| 2813 | |
Tao Bao | 4c851b1 | 2016-09-19 13:54:38 -0700 | [diff] [blame] | 2814 | def MakeTempFile(prefix='tmp', suffix=''): |
Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 2815 | """Make a temp file and add it to the list of things to be deleted |
| 2816 | when Cleanup() is called. Return the filename.""" |
| 2817 | fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix) |
| 2818 | os.close(fd) |
| 2819 | OPTIONS.tempfiles.append(fn) |
| 2820 | return fn |
| 2821 | |
| 2822 | |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2823 | def MakeTempDir(prefix='tmp', suffix=''): |
| 2824 | """Makes a temporary dir that will be cleaned up with a call to Cleanup(). |
| 2825 | |
| 2826 | Returns: |
| 2827 | The absolute pathname of the new directory. |
| 2828 | """ |
| 2829 | dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix) |
| 2830 | OPTIONS.tempfiles.append(dir_name) |
| 2831 | return dir_name |
| 2832 | |
| 2833 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2834 | def Cleanup(): |
| 2835 | for i in OPTIONS.tempfiles: |
Kelvin Zhang | 2268091 | 2023-05-19 13:12:59 -0700 | [diff] [blame] | 2836 | if not os.path.exists(i): |
| 2837 | continue |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2838 | if os.path.isdir(i): |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2839 | shutil.rmtree(i, ignore_errors=True) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2840 | else: |
| 2841 | os.remove(i) |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2842 | del OPTIONS.tempfiles[:] |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2843 | |
| 2844 | |
| 2845 | class PasswordManager(object): |
| 2846 | def __init__(self): |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 2847 | self.editor = os.getenv("EDITOR") |
| 2848 | self.pwfile = os.getenv("ANDROID_PW_FILE") |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2849 | |
| 2850 | def GetPasswords(self, items): |
| 2851 | """Get passwords corresponding to each string in 'items', |
| 2852 | returning a dict. (The dict may have keys in addition to the |
| 2853 | values in 'items'.) |
| 2854 | |
| 2855 | Uses the passwords in $ANDROID_PW_FILE if available, letting the |
| 2856 | user edit that file to add more needed passwords. If no editor is |
| 2857 | available, or $ANDROID_PW_FILE isn't define, prompts the user |
| 2858 | interactively in the ordinary way. |
| 2859 | """ |
| 2860 | |
| 2861 | current = self.ReadFile() |
| 2862 | |
| 2863 | first = True |
| 2864 | while True: |
| 2865 | missing = [] |
| 2866 | for i in items: |
| 2867 | if i not in current or not current[i]: |
| 2868 | missing.append(i) |
| 2869 | # Are all the passwords already in the file? |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2870 | if not missing: |
| 2871 | return current |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2872 | |
| 2873 | for i in missing: |
| 2874 | current[i] = "" |
| 2875 | |
| 2876 | if not first: |
Tao Bao | 89fbb0f | 2017-01-10 10:47:58 -0800 | [diff] [blame] | 2877 | print("key file %s still missing some passwords." % (self.pwfile,)) |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 2878 | if sys.version_info[0] >= 3: |
| 2879 | raw_input = input # pylint: disable=redefined-builtin |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2880 | answer = raw_input("try to edit again? [y]> ").strip() |
| 2881 | if answer and answer[0] not in 'yY': |
| 2882 | raise RuntimeError("key passwords unavailable") |
| 2883 | first = False |
| 2884 | |
| 2885 | current = self.UpdateAndReadFile(current) |
| 2886 | |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2887 | def PromptResult(self, current): # pylint: disable=no-self-use |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2888 | """Prompt the user to enter a value (password) for each key in |
| 2889 | 'current' whose value is fales. Returns a new dict with all the |
| 2890 | values. |
| 2891 | """ |
| 2892 | result = {} |
Tao Bao | 3888428 | 2019-07-10 22:20:56 -0700 | [diff] [blame] | 2893 | for k, v in sorted(current.items()): |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2894 | if v: |
| 2895 | result[k] = v |
| 2896 | else: |
| 2897 | while True: |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2898 | result[k] = getpass.getpass( |
| 2899 | "Enter password for %s key> " % k).strip() |
| 2900 | if result[k]: |
| 2901 | break |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2902 | return result |
| 2903 | |
| 2904 | def UpdateAndReadFile(self, current): |
| 2905 | if not self.editor or not self.pwfile: |
| 2906 | return self.PromptResult(current) |
| 2907 | |
| 2908 | f = open(self.pwfile, "w") |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2909 | os.chmod(self.pwfile, 0o600) |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2910 | f.write("# Enter key passwords between the [[[ ]]] brackets.\n") |
| 2911 | f.write("# (Additional spaces are harmless.)\n\n") |
| 2912 | |
| 2913 | first_line = None |
Tao Bao | 3888428 | 2019-07-10 22:20:56 -0700 | [diff] [blame] | 2914 | sorted_list = sorted([(not v, k, v) for (k, v) in current.items()]) |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2915 | for i, (_, k, v) in enumerate(sorted_list): |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2916 | f.write("[[[ %s ]]] %s\n" % (v, k)) |
| 2917 | if not v and first_line is None: |
| 2918 | # position cursor on first line with no password. |
| 2919 | first_line = i + 4 |
| 2920 | f.close() |
| 2921 | |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 2922 | RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile]) |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2923 | |
| 2924 | return self.ReadFile() |
| 2925 | |
| 2926 | def ReadFile(self): |
| 2927 | result = {} |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2928 | if self.pwfile is None: |
| 2929 | return result |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2930 | try: |
| 2931 | f = open(self.pwfile, "r") |
| 2932 | for line in f: |
| 2933 | line = line.strip() |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2934 | if not line or line[0] == '#': |
| 2935 | continue |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2936 | m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line) |
| 2937 | if not m: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 2938 | logger.warning("Failed to parse password file: %s", line) |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2939 | else: |
| 2940 | result[m.group(2)] = m.group(1) |
| 2941 | f.close() |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2942 | except IOError as e: |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2943 | if e.errno != errno.ENOENT: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 2944 | logger.exception("Error reading password file:") |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2945 | return result |
Doug Zongker | 048e7ca | 2009-06-15 14:31:53 -0700 | [diff] [blame] | 2946 | |
| 2947 | |
Dan Albert | 8e0178d | 2015-01-27 15:53:15 -0800 | [diff] [blame] | 2948 | def ZipWrite(zip_file, filename, arcname=None, perms=0o644, |
| 2949 | compress_type=None): |
Dan Albert | 8e0178d | 2015-01-27 15:53:15 -0800 | [diff] [blame] | 2950 | |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 2951 | # http://b/18015246 |
| 2952 | # Python 2.7's zipfile implementation wrongly thinks that zip64 is required |
| 2953 | # for files larger than 2GiB. We can work around this by adjusting their |
| 2954 | # limit. Note that `zipfile.writestr()` will not work for strings larger than |
| 2955 | # 2GiB. The Python interpreter sometimes rejects strings that large (though |
| 2956 | # it isn't clear to me exactly what circumstances cause this). |
| 2957 | # `zipfile.write()` must be used directly to work around this. |
| 2958 | # |
| 2959 | # This mess can be avoided if we port to python3. |
| 2960 | saved_zip64_limit = zipfile.ZIP64_LIMIT |
| 2961 | zipfile.ZIP64_LIMIT = (1 << 32) - 1 |
| 2962 | |
Dan Albert | 8e0178d | 2015-01-27 15:53:15 -0800 | [diff] [blame] | 2963 | if compress_type is None: |
| 2964 | compress_type = zip_file.compression |
| 2965 | if arcname is None: |
| 2966 | arcname = filename |
| 2967 | |
| 2968 | saved_stat = os.stat(filename) |
| 2969 | |
| 2970 | try: |
| 2971 | # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the |
| 2972 | # file to be zipped and reset it when we're done. |
| 2973 | os.chmod(filename, perms) |
| 2974 | |
| 2975 | # Use a fixed timestamp so the output is repeatable. |
Bryan Henry | e6d547d | 2018-07-31 18:32:00 -0700 | [diff] [blame] | 2976 | # Note: Use of fromtimestamp rather than utcfromtimestamp here is |
| 2977 | # intentional. zip stores datetimes in local time without a time zone |
| 2978 | # attached, so we need "epoch" but in the local time zone to get 2009/01/01 |
| 2979 | # in the zip archive. |
| 2980 | local_epoch = datetime.datetime.fromtimestamp(0) |
| 2981 | timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds() |
Dan Albert | 8e0178d | 2015-01-27 15:53:15 -0800 | [diff] [blame] | 2982 | os.utime(filename, (timestamp, timestamp)) |
| 2983 | |
| 2984 | zip_file.write(filename, arcname=arcname, compress_type=compress_type) |
| 2985 | finally: |
| 2986 | os.chmod(filename, saved_stat.st_mode) |
| 2987 | os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime)) |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 2988 | zipfile.ZIP64_LIMIT = saved_zip64_limit |
Dan Albert | 8e0178d | 2015-01-27 15:53:15 -0800 | [diff] [blame] | 2989 | |
| 2990 | |
Tao Bao | 58c1b96 | 2015-05-20 09:32:18 -0700 | [diff] [blame] | 2991 | def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None, |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 2992 | compress_type=None): |
| 2993 | """Wrap zipfile.writestr() function to work around the zip64 limit. |
| 2994 | |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 2995 | Even with the ZIP64_LIMIT workaround, it won't allow writing a string |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 2996 | longer than 2GiB. It gives 'OverflowError: size does not fit in an int' |
| 2997 | when calling crc32(bytes). |
| 2998 | |
| 2999 | But it still works fine to write a shorter string into a large zip file. |
| 3000 | We should use ZipWrite() whenever possible, and only use ZipWriteStr() |
| 3001 | when we know the string won't be too long. |
| 3002 | """ |
| 3003 | |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 3004 | saved_zip64_limit = zipfile.ZIP64_LIMIT |
| 3005 | zipfile.ZIP64_LIMIT = (1 << 32) - 1 |
| 3006 | |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3007 | if not isinstance(zinfo_or_arcname, zipfile.ZipInfo): |
| 3008 | zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname) |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 3009 | zinfo.compress_type = zip_file.compression |
Tao Bao | 58c1b96 | 2015-05-20 09:32:18 -0700 | [diff] [blame] | 3010 | if perms is None: |
Tao Bao | 2a41058 | 2015-07-10 17:18:23 -0700 | [diff] [blame] | 3011 | perms = 0o100644 |
Geremy Condra | 36bd365 | 2014-02-06 19:45:10 -0800 | [diff] [blame] | 3012 | else: |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3013 | zinfo = zinfo_or_arcname |
Tao Bao | c1a1ec3 | 2019-06-18 16:29:37 -0700 | [diff] [blame] | 3014 | # Python 2 and 3 behave differently when calling ZipFile.writestr() with |
| 3015 | # zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for |
| 3016 | # such a case (since |
| 3017 | # https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9), |
| 3018 | # which seems to make more sense. Otherwise the entry will have 0o000 as the |
| 3019 | # permission bits. We follow the logic in Python 3 to get consistent |
| 3020 | # behavior between using the two versions. |
| 3021 | if not zinfo.external_attr: |
| 3022 | zinfo.external_attr = 0o600 << 16 |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3023 | |
| 3024 | # If compress_type is given, it overrides the value in zinfo. |
| 3025 | if compress_type is not None: |
| 3026 | zinfo.compress_type = compress_type |
| 3027 | |
Tao Bao | 58c1b96 | 2015-05-20 09:32:18 -0700 | [diff] [blame] | 3028 | # If perms is given, it has a priority. |
| 3029 | if perms is not None: |
Tao Bao | 2a41058 | 2015-07-10 17:18:23 -0700 | [diff] [blame] | 3030 | # If perms doesn't set the file type, mark it as a regular file. |
| 3031 | if perms & 0o770000 == 0: |
| 3032 | perms |= 0o100000 |
Tao Bao | 58c1b96 | 2015-05-20 09:32:18 -0700 | [diff] [blame] | 3033 | zinfo.external_attr = perms << 16 |
| 3034 | |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3035 | # Use a fixed timestamp so the output is repeatable. |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3036 | zinfo.date_time = (2009, 1, 1, 0, 0, 0) |
| 3037 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 3038 | zip_file.writestr(zinfo, data) |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 3039 | zipfile.ZIP64_LIMIT = saved_zip64_limit |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3040 | |
Kelvin Zhang | b84d2aa | 2023-11-06 10:53:41 -0800 | [diff] [blame] | 3041 | def ZipExclude(input_zip, output_zip, entries, force=False): |
| 3042 | """Deletes entries from a ZIP file. |
| 3043 | |
| 3044 | Args: |
| 3045 | zip_filename: The name of the ZIP file. |
| 3046 | entries: The name of the entry, or the list of names to be deleted. |
| 3047 | """ |
| 3048 | if isinstance(entries, str): |
| 3049 | entries = [entries] |
| 3050 | # If list is empty, nothing to do |
| 3051 | if not entries: |
| 3052 | shutil.copy(input_zip, output_zip) |
| 3053 | return |
| 3054 | |
| 3055 | with zipfile.ZipFile(input_zip, 'r') as zin: |
| 3056 | if not force and len(set(zin.namelist()).intersection(entries)) == 0: |
| 3057 | raise ExternalError( |
| 3058 | "Failed to delete zip entries, name not matched: %s" % entries) |
| 3059 | |
| 3060 | fd, new_zipfile = tempfile.mkstemp(dir=os.path.dirname(input_zip)) |
| 3061 | os.close(fd) |
| 3062 | cmd = ["zip2zip", "-i", input_zip, "-o", new_zipfile] |
| 3063 | for entry in entries: |
| 3064 | cmd.append("-x") |
| 3065 | cmd.append(entry) |
| 3066 | RunAndCheckOutput(cmd) |
| 3067 | os.replace(new_zipfile, output_zip) |
| 3068 | |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3069 | |
Kelvin Zhang | 1caead0 | 2022-09-23 10:06:03 -0700 | [diff] [blame] | 3070 | def ZipDelete(zip_filename, entries, force=False): |
Tao Bao | 89d7ab2 | 2017-12-14 17:05:33 -0800 | [diff] [blame] | 3071 | """Deletes entries from a ZIP file. |
| 3072 | |
Tao Bao | 89d7ab2 | 2017-12-14 17:05:33 -0800 | [diff] [blame] | 3073 | Args: |
| 3074 | zip_filename: The name of the ZIP file. |
| 3075 | entries: The name of the entry, or the list of names to be deleted. |
Tao Bao | 89d7ab2 | 2017-12-14 17:05:33 -0800 | [diff] [blame] | 3076 | """ |
Tao Bao | c1a1ec3 | 2019-06-18 16:29:37 -0700 | [diff] [blame] | 3077 | if isinstance(entries, str): |
Tao Bao | 89d7ab2 | 2017-12-14 17:05:33 -0800 | [diff] [blame] | 3078 | entries = [entries] |
Kelvin Zhang | 7087614 | 2022-02-09 16:05:29 -0800 | [diff] [blame] | 3079 | # If list is empty, nothing to do |
| 3080 | if not entries: |
| 3081 | return |
Wei Li | 8895f9e | 2022-10-10 17:13:17 -0700 | [diff] [blame] | 3082 | |
Kelvin Zhang | b84d2aa | 2023-11-06 10:53:41 -0800 | [diff] [blame] | 3083 | ZipExclude(zip_filename, zip_filename, entries, force) |
Tao Bao | 89d7ab2 | 2017-12-14 17:05:33 -0800 | [diff] [blame] | 3084 | |
| 3085 | |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 3086 | def ZipClose(zip_file): |
| 3087 | # http://b/18015246 |
| 3088 | # zipfile also refers to ZIP64_LIMIT during close() when it writes out the |
| 3089 | # central directory. |
| 3090 | saved_zip64_limit = zipfile.ZIP64_LIMIT |
| 3091 | zipfile.ZIP64_LIMIT = (1 << 32) - 1 |
| 3092 | |
| 3093 | zip_file.close() |
| 3094 | |
| 3095 | zipfile.ZIP64_LIMIT = saved_zip64_limit |
| 3096 | |
| 3097 | |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 3098 | class DeviceSpecificParams(object): |
| 3099 | module = None |
| 3100 | |
| 3101 | def __init__(self, **kwargs): |
| 3102 | """Keyword arguments to the constructor become attributes of this |
| 3103 | object, which is passed to all functions in the device-specific |
| 3104 | module.""" |
| 3105 | for k, v in kwargs.items(): |
| 3106 | setattr(self, k, v) |
| 3107 | self.extras = OPTIONS.extras |
| 3108 | |
| 3109 | if self.module is None: |
| 3110 | path = OPTIONS.device_specific |
| 3111 | if not path: |
| 3112 | return |
| 3113 | try: |
| 3114 | if os.path.isdir(path): |
| 3115 | info = imp.find_module("releasetools", [path]) |
| 3116 | else: |
| 3117 | d, f = os.path.split(path) |
| 3118 | b, x = os.path.splitext(f) |
| 3119 | if x == ".py": |
| 3120 | f = b |
| 3121 | info = imp.find_module(f, [d]) |
| 3122 | logger.info("loaded device-specific extensions from %s", path) |
| 3123 | self.module = imp.load_module("device_specific", *info) |
| 3124 | except ImportError: |
| 3125 | logger.info("unable to load device-specific module; assuming none") |
| 3126 | |
| 3127 | def _DoCall(self, function_name, *args, **kwargs): |
| 3128 | """Call the named function in the device-specific module, passing |
| 3129 | the given args and kwargs. The first argument to the call will be |
| 3130 | the DeviceSpecific object itself. If there is no module, or the |
| 3131 | module does not define the function, return the value of the |
| 3132 | 'default' kwarg (which itself defaults to None).""" |
| 3133 | if self.module is None or not hasattr(self.module, function_name): |
| 3134 | return kwargs.get("default") |
| 3135 | return getattr(self.module, function_name)(*((self,) + args), **kwargs) |
| 3136 | |
| 3137 | def FullOTA_Assertions(self): |
| 3138 | """Called after emitting the block of assertions at the top of a |
| 3139 | full OTA package. Implementations can add whatever additional |
| 3140 | assertions they like.""" |
| 3141 | return self._DoCall("FullOTA_Assertions") |
| 3142 | |
| 3143 | def FullOTA_InstallBegin(self): |
| 3144 | """Called at the start of full OTA installation.""" |
| 3145 | return self._DoCall("FullOTA_InstallBegin") |
| 3146 | |
| 3147 | def FullOTA_GetBlockDifferences(self): |
| 3148 | """Called during full OTA installation and verification. |
| 3149 | Implementation should return a list of BlockDifference objects describing |
| 3150 | the update on each additional partitions. |
| 3151 | """ |
| 3152 | return self._DoCall("FullOTA_GetBlockDifferences") |
| 3153 | |
| 3154 | def FullOTA_InstallEnd(self): |
| 3155 | """Called at the end of full OTA installation; typically this is |
| 3156 | used to install the image for the device's baseband processor.""" |
| 3157 | return self._DoCall("FullOTA_InstallEnd") |
| 3158 | |
| 3159 | def IncrementalOTA_Assertions(self): |
| 3160 | """Called after emitting the block of assertions at the top of an |
| 3161 | incremental OTA package. Implementations can add whatever |
| 3162 | additional assertions they like.""" |
| 3163 | return self._DoCall("IncrementalOTA_Assertions") |
| 3164 | |
| 3165 | def IncrementalOTA_VerifyBegin(self): |
| 3166 | """Called at the start of the verification phase of incremental |
| 3167 | OTA installation; additional checks can be placed here to abort |
| 3168 | the script before any changes are made.""" |
| 3169 | return self._DoCall("IncrementalOTA_VerifyBegin") |
| 3170 | |
| 3171 | def IncrementalOTA_VerifyEnd(self): |
| 3172 | """Called at the end of the verification phase of incremental OTA |
| 3173 | installation; additional checks can be placed here to abort the |
| 3174 | script before any changes are made.""" |
| 3175 | return self._DoCall("IncrementalOTA_VerifyEnd") |
| 3176 | |
| 3177 | def IncrementalOTA_InstallBegin(self): |
| 3178 | """Called at the start of incremental OTA installation (after |
| 3179 | verification is complete).""" |
| 3180 | return self._DoCall("IncrementalOTA_InstallBegin") |
| 3181 | |
| 3182 | def IncrementalOTA_GetBlockDifferences(self): |
| 3183 | """Called during incremental OTA installation and verification. |
| 3184 | Implementation should return a list of BlockDifference objects describing |
| 3185 | the update on each additional partitions. |
| 3186 | """ |
| 3187 | return self._DoCall("IncrementalOTA_GetBlockDifferences") |
| 3188 | |
| 3189 | def IncrementalOTA_InstallEnd(self): |
| 3190 | """Called at the end of incremental OTA installation; typically |
| 3191 | this is used to install the image for the device's baseband |
| 3192 | processor.""" |
| 3193 | return self._DoCall("IncrementalOTA_InstallEnd") |
| 3194 | |
| 3195 | def VerifyOTA_Assertions(self): |
| 3196 | return self._DoCall("VerifyOTA_Assertions") |
| 3197 | |
| 3198 | |
Doug Zongker | ea5d7a9 | 2010-09-12 15:26:16 -0700 | [diff] [blame] | 3199 | class File(object): |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 3200 | def __init__(self, name, data, compress_size=None): |
Doug Zongker | ea5d7a9 | 2010-09-12 15:26:16 -0700 | [diff] [blame] | 3201 | self.name = name |
| 3202 | self.data = data |
| 3203 | self.size = len(data) |
YOUNG HO CHA | ccc5c40 | 2016-10-13 13:40:46 +0900 | [diff] [blame] | 3204 | self.compress_size = compress_size or self.size |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 3205 | self.sha1 = sha1(data).hexdigest() |
| 3206 | |
| 3207 | @classmethod |
| 3208 | def FromLocalFile(cls, name, diskname): |
| 3209 | f = open(diskname, "rb") |
| 3210 | data = f.read() |
| 3211 | f.close() |
| 3212 | return File(name, data) |
Doug Zongker | ea5d7a9 | 2010-09-12 15:26:16 -0700 | [diff] [blame] | 3213 | |
| 3214 | def WriteToTemp(self): |
| 3215 | t = tempfile.NamedTemporaryFile() |
| 3216 | t.write(self.data) |
| 3217 | t.flush() |
| 3218 | return t |
| 3219 | |
Dan Willemsen | 2ee00d5 | 2017-03-05 19:51:56 -0800 | [diff] [blame] | 3220 | def WriteToDir(self, d): |
| 3221 | with open(os.path.join(d, self.name), "wb") as fp: |
| 3222 | fp.write(self.data) |
| 3223 | |
Geremy Condra | 36bd365 | 2014-02-06 19:45:10 -0800 | [diff] [blame] | 3224 | def AddToZip(self, z, compression=None): |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3225 | ZipWriteStr(z, self.name, self.data, compress_type=compression) |
Doug Zongker | ea5d7a9 | 2010-09-12 15:26:16 -0700 | [diff] [blame] | 3226 | |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 3227 | |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 3228 | DIFF_PROGRAM_BY_EXT = { |
| 3229 | ".gz": "imgdiff", |
| 3230 | ".zip": ["imgdiff", "-z"], |
| 3231 | ".jar": ["imgdiff", "-z"], |
| 3232 | ".apk": ["imgdiff", "-z"], |
| 3233 | ".img": "imgdiff", |
| 3234 | } |
| 3235 | |
| 3236 | |
| 3237 | class Difference(object): |
| 3238 | def __init__(self, tf, sf, diff_program=None): |
| 3239 | self.tf = tf |
| 3240 | self.sf = sf |
| 3241 | self.patch = None |
| 3242 | self.diff_program = diff_program |
| 3243 | |
| 3244 | def ComputePatch(self): |
| 3245 | """Compute the patch (as a string of data) needed to turn sf into |
| 3246 | tf. Returns the same tuple as GetPatch().""" |
| 3247 | |
| 3248 | tf = self.tf |
| 3249 | sf = self.sf |
| 3250 | |
| 3251 | if self.diff_program: |
| 3252 | diff_program = self.diff_program |
| 3253 | else: |
| 3254 | ext = os.path.splitext(tf.name)[1] |
| 3255 | diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff") |
| 3256 | |
| 3257 | ttemp = tf.WriteToTemp() |
| 3258 | stemp = sf.WriteToTemp() |
| 3259 | |
| 3260 | ext = os.path.splitext(tf.name)[1] |
| 3261 | |
| 3262 | try: |
| 3263 | ptemp = tempfile.NamedTemporaryFile() |
| 3264 | if isinstance(diff_program, list): |
| 3265 | cmd = copy.copy(diff_program) |
| 3266 | else: |
| 3267 | cmd = [diff_program] |
| 3268 | cmd.append(stemp.name) |
| 3269 | cmd.append(ttemp.name) |
| 3270 | cmd.append(ptemp.name) |
| 3271 | p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) |
| 3272 | err = [] |
| 3273 | |
| 3274 | def run(): |
| 3275 | _, e = p.communicate() |
| 3276 | if e: |
| 3277 | err.append(e) |
| 3278 | th = threading.Thread(target=run) |
| 3279 | th.start() |
| 3280 | th.join(timeout=300) # 5 mins |
| 3281 | if th.is_alive(): |
| 3282 | logger.warning("diff command timed out") |
| 3283 | p.terminate() |
| 3284 | th.join(5) |
| 3285 | if th.is_alive(): |
| 3286 | p.kill() |
| 3287 | th.join() |
| 3288 | |
| 3289 | if p.returncode != 0: |
| 3290 | logger.warning("Failure running %s:\n%s\n", cmd, "".join(err)) |
| 3291 | self.patch = None |
| 3292 | return None, None, None |
| 3293 | diff = ptemp.read() |
| 3294 | finally: |
| 3295 | ptemp.close() |
| 3296 | stemp.close() |
| 3297 | ttemp.close() |
| 3298 | |
| 3299 | self.patch = diff |
| 3300 | return self.tf, self.sf, self.patch |
| 3301 | |
| 3302 | def GetPatch(self): |
| 3303 | """Returns a tuple of (target_file, source_file, patch_data). |
| 3304 | |
| 3305 | patch_data may be None if ComputePatch hasn't been called, or if |
| 3306 | computing the patch failed. |
| 3307 | """ |
| 3308 | return self.tf, self.sf, self.patch |
| 3309 | |
| 3310 | |
| 3311 | def ComputeDifferences(diffs): |
| 3312 | """Call ComputePatch on all the Difference objects in 'diffs'.""" |
| 3313 | logger.info("%d diffs to compute", len(diffs)) |
| 3314 | |
| 3315 | # Do the largest files first, to try and reduce the long-pole effect. |
| 3316 | by_size = [(i.tf.size, i) for i in diffs] |
| 3317 | by_size.sort(reverse=True) |
| 3318 | by_size = [i[1] for i in by_size] |
| 3319 | |
| 3320 | lock = threading.Lock() |
| 3321 | diff_iter = iter(by_size) # accessed under lock |
| 3322 | |
| 3323 | def worker(): |
| 3324 | try: |
| 3325 | lock.acquire() |
| 3326 | for d in diff_iter: |
| 3327 | lock.release() |
| 3328 | start = time.time() |
| 3329 | d.ComputePatch() |
| 3330 | dur = time.time() - start |
| 3331 | lock.acquire() |
| 3332 | |
| 3333 | tf, sf, patch = d.GetPatch() |
| 3334 | if sf.name == tf.name: |
| 3335 | name = tf.name |
| 3336 | else: |
| 3337 | name = "%s (%s)" % (tf.name, sf.name) |
| 3338 | if patch is None: |
| 3339 | logger.error("patching failed! %40s", name) |
| 3340 | else: |
| 3341 | logger.info( |
| 3342 | "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch), |
| 3343 | tf.size, 100.0 * len(patch) / tf.size, name) |
| 3344 | lock.release() |
| 3345 | except Exception: |
| 3346 | logger.exception("Failed to compute diff from worker") |
| 3347 | raise |
| 3348 | |
| 3349 | # start worker threads; wait for them all to finish. |
| 3350 | threads = [threading.Thread(target=worker) |
| 3351 | for i in range(OPTIONS.worker_threads)] |
| 3352 | for th in threads: |
| 3353 | th.start() |
| 3354 | while threads: |
| 3355 | threads.pop().join() |
| 3356 | |
| 3357 | |
| 3358 | class BlockDifference(object): |
| 3359 | def __init__(self, partition, tgt, src=None, check_first_block=False, |
| 3360 | version=None, disable_imgdiff=False): |
| 3361 | self.tgt = tgt |
| 3362 | self.src = src |
| 3363 | self.partition = partition |
| 3364 | self.check_first_block = check_first_block |
| 3365 | self.disable_imgdiff = disable_imgdiff |
| 3366 | |
| 3367 | if version is None: |
| 3368 | version = max( |
| 3369 | int(i) for i in |
| 3370 | OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) |
| 3371 | assert version >= 3 |
| 3372 | self.version = version |
| 3373 | |
| 3374 | b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads, |
| 3375 | version=self.version, |
| 3376 | disable_imgdiff=self.disable_imgdiff) |
| 3377 | self.path = os.path.join(MakeTempDir(), partition) |
| 3378 | b.Compute(self.path) |
| 3379 | self._required_cache = b.max_stashed_size |
| 3380 | self.touched_src_ranges = b.touched_src_ranges |
| 3381 | self.touched_src_sha1 = b.touched_src_sha1 |
| 3382 | |
| 3383 | # On devices with dynamic partitions, for new partitions, |
| 3384 | # src is None but OPTIONS.source_info_dict is not. |
| 3385 | if OPTIONS.source_info_dict is None: |
| 3386 | is_dynamic_build = OPTIONS.info_dict.get( |
| 3387 | "use_dynamic_partitions") == "true" |
| 3388 | is_dynamic_source = False |
| 3389 | else: |
| 3390 | is_dynamic_build = OPTIONS.source_info_dict.get( |
| 3391 | "use_dynamic_partitions") == "true" |
| 3392 | is_dynamic_source = partition in shlex.split( |
| 3393 | OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip()) |
| 3394 | |
| 3395 | is_dynamic_target = partition in shlex.split( |
| 3396 | OPTIONS.info_dict.get("dynamic_partition_list", "").strip()) |
| 3397 | |
| 3398 | # For dynamic partitions builds, check partition list in both source |
| 3399 | # and target build because new partitions may be added, and existing |
| 3400 | # partitions may be removed. |
| 3401 | is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target) |
| 3402 | |
| 3403 | if is_dynamic: |
| 3404 | self.device = 'map_partition("%s")' % partition |
| 3405 | else: |
| 3406 | if OPTIONS.source_info_dict is None: |
| 3407 | _, device_expr = GetTypeAndDeviceExpr("/" + partition, |
| 3408 | OPTIONS.info_dict) |
| 3409 | else: |
| 3410 | _, device_expr = GetTypeAndDeviceExpr("/" + partition, |
| 3411 | OPTIONS.source_info_dict) |
| 3412 | self.device = device_expr |
| 3413 | |
| 3414 | @property |
| 3415 | def required_cache(self): |
| 3416 | return self._required_cache |
| 3417 | |
| 3418 | def WriteScript(self, script, output_zip, progress=None, |
| 3419 | write_verify_script=False): |
| 3420 | if not self.src: |
| 3421 | # write the output unconditionally |
| 3422 | script.Print("Patching %s image unconditionally..." % (self.partition,)) |
| 3423 | else: |
| 3424 | script.Print("Patching %s image after verification." % (self.partition,)) |
| 3425 | |
| 3426 | if progress: |
| 3427 | script.ShowProgress(progress, 0) |
| 3428 | self._WriteUpdate(script, output_zip) |
| 3429 | |
| 3430 | if write_verify_script: |
| 3431 | self.WritePostInstallVerifyScript(script) |
| 3432 | |
| 3433 | def WriteStrictVerifyScript(self, script): |
| 3434 | """Verify all the blocks in the care_map, including clobbered blocks. |
| 3435 | |
| 3436 | This differs from the WriteVerifyScript() function: a) it prints different |
| 3437 | error messages; b) it doesn't allow half-way updated images to pass the |
| 3438 | verification.""" |
| 3439 | |
| 3440 | partition = self.partition |
| 3441 | script.Print("Verifying %s..." % (partition,)) |
| 3442 | ranges = self.tgt.care_map |
| 3443 | ranges_str = ranges.to_string_raw() |
| 3444 | script.AppendExtra( |
| 3445 | 'range_sha1(%s, "%s") == "%s" && ui_print(" Verified.") || ' |
| 3446 | 'ui_print("%s has unexpected contents.");' % ( |
| 3447 | self.device, ranges_str, |
| 3448 | self.tgt.TotalSha1(include_clobbered_blocks=True), |
| 3449 | self.partition)) |
| 3450 | script.AppendExtra("") |
| 3451 | |
| 3452 | def WriteVerifyScript(self, script, touched_blocks_only=False): |
| 3453 | partition = self.partition |
| 3454 | |
| 3455 | # full OTA |
| 3456 | if not self.src: |
| 3457 | script.Print("Image %s will be patched unconditionally." % (partition,)) |
| 3458 | |
| 3459 | # incremental OTA |
| 3460 | else: |
| 3461 | if touched_blocks_only: |
| 3462 | ranges = self.touched_src_ranges |
| 3463 | expected_sha1 = self.touched_src_sha1 |
| 3464 | else: |
| 3465 | ranges = self.src.care_map.subtract(self.src.clobbered_blocks) |
| 3466 | expected_sha1 = self.src.TotalSha1() |
| 3467 | |
| 3468 | # No blocks to be checked, skipping. |
| 3469 | if not ranges: |
| 3470 | return |
| 3471 | |
| 3472 | ranges_str = ranges.to_string_raw() |
| 3473 | script.AppendExtra( |
| 3474 | 'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, ' |
| 3475 | 'package_extract_file("%s.transfer.list"), "%s.new.dat", ' |
| 3476 | '"%s.patch.dat")) then' % ( |
| 3477 | self.device, ranges_str, expected_sha1, |
| 3478 | self.device, partition, partition, partition)) |
| 3479 | script.Print('Verified %s image...' % (partition,)) |
| 3480 | script.AppendExtra('else') |
| 3481 | |
| 3482 | if self.version >= 4: |
| 3483 | |
| 3484 | # Bug: 21124327 |
| 3485 | # When generating incrementals for the system and vendor partitions in |
| 3486 | # version 4 or newer, explicitly check the first block (which contains |
| 3487 | # the superblock) of the partition to see if it's what we expect. If |
| 3488 | # this check fails, give an explicit log message about the partition |
| 3489 | # having been remounted R/W (the most likely explanation). |
| 3490 | if self.check_first_block: |
| 3491 | script.AppendExtra('check_first_block(%s);' % (self.device,)) |
| 3492 | |
| 3493 | # If version >= 4, try block recovery before abort update |
| 3494 | if partition == "system": |
| 3495 | code = ErrorCode.SYSTEM_RECOVER_FAILURE |
| 3496 | else: |
| 3497 | code = ErrorCode.VENDOR_RECOVER_FAILURE |
| 3498 | script.AppendExtra(( |
| 3499 | 'ifelse (block_image_recover({device}, "{ranges}") && ' |
| 3500 | 'block_image_verify({device}, ' |
| 3501 | 'package_extract_file("{partition}.transfer.list"), ' |
| 3502 | '"{partition}.new.dat", "{partition}.patch.dat"), ' |
| 3503 | 'ui_print("{partition} recovered successfully."), ' |
| 3504 | 'abort("E{code}: {partition} partition fails to recover"));\n' |
| 3505 | 'endif;').format(device=self.device, ranges=ranges_str, |
| 3506 | partition=partition, code=code)) |
| 3507 | |
| 3508 | # Abort the OTA update. Note that the incremental OTA cannot be applied |
| 3509 | # even if it may match the checksum of the target partition. |
| 3510 | # a) If version < 3, operations like move and erase will make changes |
| 3511 | # unconditionally and damage the partition. |
| 3512 | # b) If version >= 3, it won't even reach here. |
| 3513 | else: |
| 3514 | if partition == "system": |
| 3515 | code = ErrorCode.SYSTEM_VERIFICATION_FAILURE |
| 3516 | else: |
| 3517 | code = ErrorCode.VENDOR_VERIFICATION_FAILURE |
| 3518 | script.AppendExtra(( |
| 3519 | 'abort("E%d: %s partition has unexpected contents");\n' |
| 3520 | 'endif;') % (code, partition)) |
| 3521 | |
| 3522 | def WritePostInstallVerifyScript(self, script): |
| 3523 | partition = self.partition |
| 3524 | script.Print('Verifying the updated %s image...' % (partition,)) |
| 3525 | # Unlike pre-install verification, clobbered_blocks should not be ignored. |
| 3526 | ranges = self.tgt.care_map |
| 3527 | ranges_str = ranges.to_string_raw() |
| 3528 | script.AppendExtra( |
| 3529 | 'if range_sha1(%s, "%s") == "%s" then' % ( |
| 3530 | self.device, ranges_str, |
| 3531 | self.tgt.TotalSha1(include_clobbered_blocks=True))) |
| 3532 | |
| 3533 | # Bug: 20881595 |
| 3534 | # Verify that extended blocks are really zeroed out. |
| 3535 | if self.tgt.extended: |
| 3536 | ranges_str = self.tgt.extended.to_string_raw() |
| 3537 | script.AppendExtra( |
| 3538 | 'if range_sha1(%s, "%s") == "%s" then' % ( |
| 3539 | self.device, ranges_str, |
| 3540 | self._HashZeroBlocks(self.tgt.extended.size()))) |
| 3541 | script.Print('Verified the updated %s image.' % (partition,)) |
| 3542 | if partition == "system": |
| 3543 | code = ErrorCode.SYSTEM_NONZERO_CONTENTS |
| 3544 | else: |
| 3545 | code = ErrorCode.VENDOR_NONZERO_CONTENTS |
| 3546 | script.AppendExtra( |
| 3547 | 'else\n' |
| 3548 | ' abort("E%d: %s partition has unexpected non-zero contents after ' |
| 3549 | 'OTA update");\n' |
| 3550 | 'endif;' % (code, partition)) |
| 3551 | else: |
| 3552 | script.Print('Verified the updated %s image.' % (partition,)) |
| 3553 | |
| 3554 | if partition == "system": |
| 3555 | code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS |
| 3556 | else: |
| 3557 | code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS |
| 3558 | |
| 3559 | script.AppendExtra( |
| 3560 | 'else\n' |
| 3561 | ' abort("E%d: %s partition has unexpected contents after OTA ' |
| 3562 | 'update");\n' |
| 3563 | 'endif;' % (code, partition)) |
| 3564 | |
| 3565 | def _WriteUpdate(self, script, output_zip): |
| 3566 | ZipWrite(output_zip, |
| 3567 | '{}.transfer.list'.format(self.path), |
| 3568 | '{}.transfer.list'.format(self.partition)) |
| 3569 | |
| 3570 | # For full OTA, compress the new.dat with brotli with quality 6 to reduce |
| 3571 | # its size. Quailty 9 almost triples the compression time but doesn't |
| 3572 | # further reduce the size too much. For a typical 1.8G system.new.dat |
| 3573 | # zip | brotli(quality 6) | brotli(quality 9) |
| 3574 | # compressed_size: 942M | 869M (~8% reduced) | 854M |
| 3575 | # compression_time: 75s | 265s | 719s |
| 3576 | # decompression_time: 15s | 25s | 25s |
| 3577 | |
| 3578 | if not self.src: |
| 3579 | brotli_cmd = ['brotli', '--quality=6', |
| 3580 | '--output={}.new.dat.br'.format(self.path), |
| 3581 | '{}.new.dat'.format(self.path)] |
| 3582 | print("Compressing {}.new.dat with brotli".format(self.partition)) |
| 3583 | RunAndCheckOutput(brotli_cmd) |
| 3584 | |
| 3585 | new_data_name = '{}.new.dat.br'.format(self.partition) |
| 3586 | ZipWrite(output_zip, |
| 3587 | '{}.new.dat.br'.format(self.path), |
| 3588 | new_data_name, |
| 3589 | compress_type=zipfile.ZIP_STORED) |
| 3590 | else: |
| 3591 | new_data_name = '{}.new.dat'.format(self.partition) |
| 3592 | ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name) |
| 3593 | |
| 3594 | ZipWrite(output_zip, |
| 3595 | '{}.patch.dat'.format(self.path), |
| 3596 | '{}.patch.dat'.format(self.partition), |
| 3597 | compress_type=zipfile.ZIP_STORED) |
| 3598 | |
| 3599 | if self.partition == "system": |
| 3600 | code = ErrorCode.SYSTEM_UPDATE_FAILURE |
| 3601 | else: |
| 3602 | code = ErrorCode.VENDOR_UPDATE_FAILURE |
| 3603 | |
| 3604 | call = ('block_image_update({device}, ' |
| 3605 | 'package_extract_file("{partition}.transfer.list"), ' |
| 3606 | '"{new_data_name}", "{partition}.patch.dat") ||\n' |
| 3607 | ' abort("E{code}: Failed to update {partition} image.");'.format( |
| 3608 | device=self.device, partition=self.partition, |
| 3609 | new_data_name=new_data_name, code=code)) |
| 3610 | script.AppendExtra(script.WordWrap(call)) |
| 3611 | |
| 3612 | def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use |
| 3613 | data = source.ReadRangeSet(ranges) |
| 3614 | ctx = sha1() |
| 3615 | |
| 3616 | for p in data: |
| 3617 | ctx.update(p) |
| 3618 | |
| 3619 | return ctx.hexdigest() |
| 3620 | |
| 3621 | def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use |
| 3622 | """Return the hash value for all zero blocks.""" |
| 3623 | zero_block = '\x00' * 4096 |
| 3624 | ctx = sha1() |
| 3625 | for _ in range(num_blocks): |
| 3626 | ctx.update(zero_block) |
| 3627 | |
| 3628 | return ctx.hexdigest() |
| 3629 | |
| 3630 | |
Tianjie Xu | 41976c7 | 2019-07-03 13:57:01 -0700 | [diff] [blame] | 3631 | # Expose these two classes to support vendor-specific scripts |
| 3632 | DataImage = images.DataImage |
| 3633 | EmptyImage = images.EmptyImage |
| 3634 | |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 3635 | |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 3636 | # map recovery.fstab's fs_types to mount/format "partition types" |
| 3637 | PARTITION_TYPES = { |
| 3638 | "ext4": "EMMC", |
| 3639 | "emmc": "EMMC", |
| 3640 | "f2fs": "EMMC", |
| 3641 | "squashfs": "EMMC", |
| 3642 | "erofs": "EMMC" |
| 3643 | } |
| 3644 | |
| 3645 | |
| 3646 | def GetTypeAndDevice(mount_point, info, check_no_slot=True): |
| 3647 | """ |
| 3648 | Use GetTypeAndDeviceExpr whenever possible. This function is kept for |
| 3649 | backwards compatibility. It aborts if the fstab entry has slotselect option |
| 3650 | (unless check_no_slot is explicitly set to False). |
| 3651 | """ |
| 3652 | fstab = info["fstab"] |
| 3653 | if fstab: |
| 3654 | if check_no_slot: |
| 3655 | assert not fstab[mount_point].slotselect, \ |
| 3656 | "Use GetTypeAndDeviceExpr instead" |
| 3657 | return (PARTITION_TYPES[fstab[mount_point].fs_type], |
| 3658 | fstab[mount_point].device) |
| 3659 | raise KeyError |
| 3660 | |
| 3661 | |
| 3662 | def GetTypeAndDeviceExpr(mount_point, info): |
| 3663 | """ |
| 3664 | Return the filesystem of the partition, and an edify expression that evaluates |
| 3665 | to the device at runtime. |
| 3666 | """ |
| 3667 | fstab = info["fstab"] |
| 3668 | if fstab: |
| 3669 | p = fstab[mount_point] |
| 3670 | device_expr = '"%s"' % fstab[mount_point].device |
| 3671 | if p.slotselect: |
| 3672 | device_expr = 'add_slot_suffix(%s)' % device_expr |
| 3673 | return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr) |
| 3674 | raise KeyError |
| 3675 | |
Yifan Hong | bdb3201 | 2020-05-07 12:38:53 -0700 | [diff] [blame] | 3676 | |
| 3677 | def GetEntryForDevice(fstab, device): |
| 3678 | """ |
| 3679 | Returns: |
| 3680 | The first entry in fstab whose device is the given value. |
| 3681 | """ |
| 3682 | if not fstab: |
| 3683 | return None |
| 3684 | for mount_point in fstab: |
| 3685 | if fstab[mount_point].device == device: |
| 3686 | return fstab[mount_point] |
| 3687 | return None |
| 3688 | |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 3689 | |
Baligh Uddin | beb6afd | 2013-11-13 00:22:34 +0000 | [diff] [blame] | 3690 | def ParseCertificate(data): |
Tao Bao | 17e4e61 | 2018-02-16 17:12:54 -0800 | [diff] [blame] | 3691 | """Parses and converts a PEM-encoded certificate into DER-encoded. |
| 3692 | |
| 3693 | This gives the same result as `openssl x509 -in <filename> -outform DER`. |
| 3694 | |
| 3695 | Returns: |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 3696 | The decoded certificate bytes. |
Tao Bao | 17e4e61 | 2018-02-16 17:12:54 -0800 | [diff] [blame] | 3697 | """ |
| 3698 | cert_buffer = [] |
Baligh Uddin | beb6afd | 2013-11-13 00:22:34 +0000 | [diff] [blame] | 3699 | save = False |
| 3700 | for line in data.split("\n"): |
| 3701 | if "--END CERTIFICATE--" in line: |
| 3702 | break |
| 3703 | if save: |
Tao Bao | 17e4e61 | 2018-02-16 17:12:54 -0800 | [diff] [blame] | 3704 | cert_buffer.append(line) |
Baligh Uddin | beb6afd | 2013-11-13 00:22:34 +0000 | [diff] [blame] | 3705 | if "--BEGIN CERTIFICATE--" in line: |
| 3706 | save = True |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 3707 | cert = base64.b64decode("".join(cert_buffer)) |
Baligh Uddin | beb6afd | 2013-11-13 00:22:34 +0000 | [diff] [blame] | 3708 | return cert |
Doug Zongker | c925382 | 2014-02-04 12:17:58 -0800 | [diff] [blame] | 3709 | |
Tao Bao | 04e1f01 | 2018-02-04 12:13:35 -0800 | [diff] [blame] | 3710 | |
| 3711 | def ExtractPublicKey(cert): |
| 3712 | """Extracts the public key (PEM-encoded) from the given certificate file. |
| 3713 | |
| 3714 | Args: |
| 3715 | cert: The certificate filename. |
| 3716 | |
| 3717 | Returns: |
| 3718 | The public key string. |
| 3719 | |
| 3720 | Raises: |
| 3721 | AssertionError: On non-zero return from 'openssl'. |
| 3722 | """ |
| 3723 | # The behavior with '-out' is different between openssl 1.1 and openssl 1.0. |
| 3724 | # While openssl 1.1 writes the key into the given filename followed by '-out', |
| 3725 | # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from |
| 3726 | # stdout instead. |
| 3727 | cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert] |
| 3728 | proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) |
| 3729 | pubkey, stderrdata = proc.communicate() |
| 3730 | assert proc.returncode == 0, \ |
| 3731 | 'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata) |
| 3732 | return pubkey |
| 3733 | |
| 3734 | |
Tao Bao | 1ac886e | 2019-06-26 11:58:22 -0700 | [diff] [blame] | 3735 | def ExtractAvbPublicKey(avbtool, key): |
Tao Bao | 2cc0ca1 | 2019-03-15 10:44:43 -0700 | [diff] [blame] | 3736 | """Extracts the AVB public key from the given public or private key. |
| 3737 | |
| 3738 | Args: |
Tao Bao | 1ac886e | 2019-06-26 11:58:22 -0700 | [diff] [blame] | 3739 | avbtool: The AVB tool to use. |
Tao Bao | 2cc0ca1 | 2019-03-15 10:44:43 -0700 | [diff] [blame] | 3740 | key: The input key file, which should be PEM-encoded public or private key. |
| 3741 | |
| 3742 | Returns: |
| 3743 | The path to the extracted AVB public key file. |
| 3744 | """ |
| 3745 | output = MakeTempFile(prefix='avb-', suffix='.avbpubkey') |
| 3746 | RunAndCheckOutput( |
Tao Bao | 1ac886e | 2019-06-26 11:58:22 -0700 | [diff] [blame] | 3747 | [avbtool, 'extract_public_key', "--key", key, "--output", output]) |
Tao Bao | 2cc0ca1 | 2019-03-15 10:44:43 -0700 | [diff] [blame] | 3748 | return output |
| 3749 | |
| 3750 | |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 3751 | def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img, |
| 3752 | info_dict=None): |
| 3753 | """Generates the recovery-from-boot patch and writes the script to output. |
| 3754 | |
| 3755 | Most of the space in the boot and recovery images is just the kernel, which is |
| 3756 | identical for the two, so the resulting patch should be efficient. Add it to |
| 3757 | the output zip, along with a shell script that is run from init.rc on first |
| 3758 | boot to actually do the patching and install the new recovery image. |
| 3759 | |
| 3760 | Args: |
| 3761 | input_dir: The top-level input directory of the target-files.zip. |
| 3762 | output_sink: The callback function that writes the result. |
| 3763 | recovery_img: File object for the recovery image. |
| 3764 | boot_img: File objects for the boot image. |
| 3765 | info_dict: A dict returned by common.LoadInfoDict() on the input |
| 3766 | target_files. Will use OPTIONS.info_dict if None has been given. |
| 3767 | """ |
| 3768 | if info_dict is None: |
| 3769 | info_dict = OPTIONS.info_dict |
| 3770 | |
| 3771 | full_recovery_image = info_dict.get("full_recovery_image") == "true" |
| 3772 | board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true" |
| 3773 | |
| 3774 | if board_uses_vendorimage: |
| 3775 | # In this case, the output sink is rooted at VENDOR |
| 3776 | recovery_img_path = "etc/recovery.img" |
| 3777 | recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat" |
| 3778 | sh_dir = "bin" |
| 3779 | else: |
| 3780 | # In this case the output sink is rooted at SYSTEM |
| 3781 | recovery_img_path = "vendor/etc/recovery.img" |
| 3782 | recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat" |
| 3783 | sh_dir = "vendor/bin" |
| 3784 | |
| 3785 | if full_recovery_image: |
| 3786 | output_sink(recovery_img_path, recovery_img.data) |
| 3787 | |
| 3788 | else: |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 3789 | include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true" |
| 3790 | include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true" |
| 3791 | path = os.path.join(input_dir, recovery_resource_dat_path) |
Yi-Yo Chiang | 18650c7 | 2022-10-12 18:29:14 +0800 | [diff] [blame] | 3792 | # Use bsdiff to handle mismatching entries (Bug: 72731506) |
| 3793 | if include_recovery_dtbo or include_recovery_acpio: |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 3794 | diff_program = ["bsdiff"] |
| 3795 | bonus_args = "" |
| 3796 | assert not os.path.exists(path) |
| 3797 | else: |
| 3798 | diff_program = ["imgdiff"] |
| 3799 | if os.path.exists(path): |
| 3800 | diff_program.append("-b") |
| 3801 | diff_program.append(path) |
| 3802 | bonus_args = "--bonus /vendor/etc/recovery-resource.dat" |
| 3803 | else: |
| 3804 | bonus_args = "" |
| 3805 | |
| 3806 | d = Difference(recovery_img, boot_img, diff_program=diff_program) |
| 3807 | _, _, patch = d.ComputePatch() |
| 3808 | output_sink("recovery-from-boot.p", patch) |
| 3809 | |
| 3810 | try: |
| 3811 | # The following GetTypeAndDevice()s need to use the path in the target |
| 3812 | # info_dict instead of source_info_dict. |
| 3813 | boot_type, boot_device = GetTypeAndDevice("/boot", info_dict, |
| 3814 | check_no_slot=False) |
| 3815 | recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict, |
| 3816 | check_no_slot=False) |
| 3817 | except KeyError: |
| 3818 | return |
| 3819 | |
| 3820 | if full_recovery_image: |
| 3821 | |
| 3822 | # Note that we use /vendor to refer to the recovery resources. This will |
| 3823 | # work for a separate vendor partition mounted at /vendor or a |
| 3824 | # /system/vendor subdirectory on the system partition, for which init will |
| 3825 | # create a symlink from /vendor to /system/vendor. |
| 3826 | |
| 3827 | sh = """#!/vendor/bin/sh |
| 3828 | if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then |
| 3829 | applypatch \\ |
| 3830 | --flash /vendor/etc/recovery.img \\ |
| 3831 | --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\ |
| 3832 | log -t recovery "Installing new recovery image: succeeded" || \\ |
| 3833 | log -t recovery "Installing new recovery image: failed" |
| 3834 | else |
| 3835 | log -t recovery "Recovery image already installed" |
| 3836 | fi |
| 3837 | """ % {'type': recovery_type, |
| 3838 | 'device': recovery_device, |
| 3839 | 'sha1': recovery_img.sha1, |
| 3840 | 'size': recovery_img.size} |
| 3841 | else: |
| 3842 | sh = """#!/vendor/bin/sh |
| 3843 | if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then |
| 3844 | applypatch %(bonus_args)s \\ |
| 3845 | --patch /vendor/recovery-from-boot.p \\ |
| 3846 | --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\ |
| 3847 | --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\ |
| 3848 | log -t recovery "Installing new recovery image: succeeded" || \\ |
| 3849 | log -t recovery "Installing new recovery image: failed" |
| 3850 | else |
| 3851 | log -t recovery "Recovery image already installed" |
| 3852 | fi |
| 3853 | """ % {'boot_size': boot_img.size, |
| 3854 | 'boot_sha1': boot_img.sha1, |
| 3855 | 'recovery_size': recovery_img.size, |
| 3856 | 'recovery_sha1': recovery_img.sha1, |
| 3857 | 'boot_type': boot_type, |
| 3858 | 'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)', |
| 3859 | 'recovery_type': recovery_type, |
| 3860 | 'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)', |
| 3861 | 'bonus_args': bonus_args} |
| 3862 | |
| 3863 | # The install script location moved from /system/etc to /system/bin in the L |
| 3864 | # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin. |
| 3865 | sh_location = os.path.join(sh_dir, "install-recovery.sh") |
| 3866 | |
| 3867 | logger.info("putting script in %s", sh_location) |
| 3868 | |
| 3869 | output_sink(sh_location, sh.encode()) |
| 3870 | |
| 3871 | |
| 3872 | class DynamicPartitionUpdate(object): |
| 3873 | def __init__(self, src_group=None, tgt_group=None, progress=None, |
| 3874 | block_difference=None): |
| 3875 | self.src_group = src_group |
| 3876 | self.tgt_group = tgt_group |
| 3877 | self.progress = progress |
| 3878 | self.block_difference = block_difference |
| 3879 | |
| 3880 | @property |
| 3881 | def src_size(self): |
| 3882 | if not self.block_difference: |
| 3883 | return 0 |
| 3884 | return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src) |
| 3885 | |
| 3886 | @property |
| 3887 | def tgt_size(self): |
| 3888 | if not self.block_difference: |
| 3889 | return 0 |
| 3890 | return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt) |
| 3891 | |
| 3892 | @staticmethod |
| 3893 | def _GetSparseImageSize(img): |
| 3894 | if not img: |
| 3895 | return 0 |
| 3896 | return img.blocksize * img.total_blocks |
| 3897 | |
| 3898 | |
| 3899 | class DynamicGroupUpdate(object): |
| 3900 | def __init__(self, src_size=None, tgt_size=None): |
| 3901 | # None: group does not exist. 0: no size limits. |
| 3902 | self.src_size = src_size |
| 3903 | self.tgt_size = tgt_size |
| 3904 | |
| 3905 | |
| 3906 | class DynamicPartitionsDifference(object): |
| 3907 | def __init__(self, info_dict, block_diffs, progress_dict=None, |
| 3908 | source_info_dict=None): |
| 3909 | if progress_dict is None: |
| 3910 | progress_dict = {} |
| 3911 | |
| 3912 | self._remove_all_before_apply = False |
| 3913 | if source_info_dict is None: |
| 3914 | self._remove_all_before_apply = True |
| 3915 | source_info_dict = {} |
| 3916 | |
| 3917 | block_diff_dict = collections.OrderedDict( |
| 3918 | [(e.partition, e) for e in block_diffs]) |
| 3919 | |
| 3920 | assert len(block_diff_dict) == len(block_diffs), \ |
| 3921 | "Duplicated BlockDifference object for {}".format( |
| 3922 | [partition for partition, count in |
| 3923 | collections.Counter(e.partition for e in block_diffs).items() |
| 3924 | if count > 1]) |
| 3925 | |
| 3926 | self._partition_updates = collections.OrderedDict() |
| 3927 | |
| 3928 | for p, block_diff in block_diff_dict.items(): |
| 3929 | self._partition_updates[p] = DynamicPartitionUpdate() |
| 3930 | self._partition_updates[p].block_difference = block_diff |
| 3931 | |
| 3932 | for p, progress in progress_dict.items(): |
| 3933 | if p in self._partition_updates: |
| 3934 | self._partition_updates[p].progress = progress |
| 3935 | |
| 3936 | tgt_groups = shlex.split(info_dict.get( |
| 3937 | "super_partition_groups", "").strip()) |
| 3938 | src_groups = shlex.split(source_info_dict.get( |
| 3939 | "super_partition_groups", "").strip()) |
| 3940 | |
| 3941 | for g in tgt_groups: |
| 3942 | for p in shlex.split(info_dict.get( |
| 3943 | "super_%s_partition_list" % g, "").strip()): |
| 3944 | assert p in self._partition_updates, \ |
| 3945 | "{} is in target super_{}_partition_list but no BlockDifference " \ |
| 3946 | "object is provided.".format(p, g) |
| 3947 | self._partition_updates[p].tgt_group = g |
| 3948 | |
| 3949 | for g in src_groups: |
| 3950 | for p in shlex.split(source_info_dict.get( |
| 3951 | "super_%s_partition_list" % g, "").strip()): |
| 3952 | assert p in self._partition_updates, \ |
| 3953 | "{} is in source super_{}_partition_list but no BlockDifference " \ |
| 3954 | "object is provided.".format(p, g) |
| 3955 | self._partition_updates[p].src_group = g |
| 3956 | |
| 3957 | target_dynamic_partitions = set(shlex.split(info_dict.get( |
| 3958 | "dynamic_partition_list", "").strip())) |
| 3959 | block_diffs_with_target = set(p for p, u in self._partition_updates.items() |
| 3960 | if u.tgt_size) |
| 3961 | assert block_diffs_with_target == target_dynamic_partitions, \ |
| 3962 | "Target Dynamic partitions: {}, BlockDifference with target: {}".format( |
| 3963 | list(target_dynamic_partitions), list(block_diffs_with_target)) |
| 3964 | |
| 3965 | source_dynamic_partitions = set(shlex.split(source_info_dict.get( |
| 3966 | "dynamic_partition_list", "").strip())) |
| 3967 | block_diffs_with_source = set(p for p, u in self._partition_updates.items() |
| 3968 | if u.src_size) |
| 3969 | assert block_diffs_with_source == source_dynamic_partitions, \ |
| 3970 | "Source Dynamic partitions: {}, BlockDifference with source: {}".format( |
| 3971 | list(source_dynamic_partitions), list(block_diffs_with_source)) |
| 3972 | |
| 3973 | if self._partition_updates: |
| 3974 | logger.info("Updating dynamic partitions %s", |
| 3975 | self._partition_updates.keys()) |
| 3976 | |
| 3977 | self._group_updates = collections.OrderedDict() |
| 3978 | |
| 3979 | for g in tgt_groups: |
| 3980 | self._group_updates[g] = DynamicGroupUpdate() |
| 3981 | self._group_updates[g].tgt_size = int(info_dict.get( |
| 3982 | "super_%s_group_size" % g, "0").strip()) |
| 3983 | |
| 3984 | for g in src_groups: |
| 3985 | if g not in self._group_updates: |
| 3986 | self._group_updates[g] = DynamicGroupUpdate() |
| 3987 | self._group_updates[g].src_size = int(source_info_dict.get( |
| 3988 | "super_%s_group_size" % g, "0").strip()) |
| 3989 | |
| 3990 | self._Compute() |
| 3991 | |
| 3992 | def WriteScript(self, script, output_zip, write_verify_script=False): |
| 3993 | script.Comment('--- Start patching dynamic partitions ---') |
| 3994 | for p, u in self._partition_updates.items(): |
| 3995 | if u.src_size and u.tgt_size and u.src_size > u.tgt_size: |
| 3996 | script.Comment('Patch partition %s' % p) |
| 3997 | u.block_difference.WriteScript(script, output_zip, progress=u.progress, |
| 3998 | write_verify_script=False) |
| 3999 | |
| 4000 | op_list_path = MakeTempFile() |
| 4001 | with open(op_list_path, 'w') as f: |
| 4002 | for line in self._op_list: |
| 4003 | f.write('{}\n'.format(line)) |
| 4004 | |
| 4005 | ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list") |
| 4006 | |
| 4007 | script.Comment('Update dynamic partition metadata') |
| 4008 | script.AppendExtra('assert(update_dynamic_partitions(' |
| 4009 | 'package_extract_file("dynamic_partitions_op_list")));') |
| 4010 | |
| 4011 | if write_verify_script: |
| 4012 | for p, u in self._partition_updates.items(): |
| 4013 | if u.src_size and u.tgt_size and u.src_size > u.tgt_size: |
| 4014 | u.block_difference.WritePostInstallVerifyScript(script) |
| 4015 | script.AppendExtra('unmap_partition("%s");' % p) # ignore errors |
| 4016 | |
| 4017 | for p, u in self._partition_updates.items(): |
| 4018 | if u.tgt_size and u.src_size <= u.tgt_size: |
| 4019 | script.Comment('Patch partition %s' % p) |
| 4020 | u.block_difference.WriteScript(script, output_zip, progress=u.progress, |
| 4021 | write_verify_script=write_verify_script) |
| 4022 | if write_verify_script: |
| 4023 | script.AppendExtra('unmap_partition("%s");' % p) # ignore errors |
| 4024 | |
| 4025 | script.Comment('--- End patching dynamic partitions ---') |
| 4026 | |
| 4027 | def _Compute(self): |
| 4028 | self._op_list = list() |
| 4029 | |
| 4030 | def append(line): |
| 4031 | self._op_list.append(line) |
| 4032 | |
| 4033 | def comment(line): |
| 4034 | self._op_list.append("# %s" % line) |
| 4035 | |
| 4036 | if self._remove_all_before_apply: |
| 4037 | comment('Remove all existing dynamic partitions and groups before ' |
| 4038 | 'applying full OTA') |
| 4039 | append('remove_all_groups') |
| 4040 | |
| 4041 | for p, u in self._partition_updates.items(): |
| 4042 | if u.src_group and not u.tgt_group: |
| 4043 | append('remove %s' % p) |
| 4044 | |
| 4045 | for p, u in self._partition_updates.items(): |
| 4046 | if u.src_group and u.tgt_group and u.src_group != u.tgt_group: |
| 4047 | comment('Move partition %s from %s to default' % (p, u.src_group)) |
| 4048 | append('move %s default' % p) |
| 4049 | |
| 4050 | for p, u in self._partition_updates.items(): |
| 4051 | if u.src_size and u.tgt_size and u.src_size > u.tgt_size: |
| 4052 | comment('Shrink partition %s from %d to %d' % |
| 4053 | (p, u.src_size, u.tgt_size)) |
| 4054 | append('resize %s %s' % (p, u.tgt_size)) |
| 4055 | |
| 4056 | for g, u in self._group_updates.items(): |
| 4057 | if u.src_size is not None and u.tgt_size is None: |
| 4058 | append('remove_group %s' % g) |
| 4059 | if (u.src_size is not None and u.tgt_size is not None and |
| 4060 | u.src_size > u.tgt_size): |
| 4061 | comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size)) |
| 4062 | append('resize_group %s %d' % (g, u.tgt_size)) |
| 4063 | |
| 4064 | for g, u in self._group_updates.items(): |
| 4065 | if u.src_size is None and u.tgt_size is not None: |
| 4066 | comment('Add group %s with maximum size %d' % (g, u.tgt_size)) |
| 4067 | append('add_group %s %d' % (g, u.tgt_size)) |
| 4068 | if (u.src_size is not None and u.tgt_size is not None and |
| 4069 | u.src_size < u.tgt_size): |
| 4070 | comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size)) |
| 4071 | append('resize_group %s %d' % (g, u.tgt_size)) |
| 4072 | |
| 4073 | for p, u in self._partition_updates.items(): |
| 4074 | if u.tgt_group and not u.src_group: |
| 4075 | comment('Add partition %s to group %s' % (p, u.tgt_group)) |
| 4076 | append('add %s %s' % (p, u.tgt_group)) |
| 4077 | |
| 4078 | for p, u in self._partition_updates.items(): |
| 4079 | if u.tgt_size and u.src_size < u.tgt_size: |
| 4080 | comment('Grow partition %s from %d to %d' % |
| 4081 | (p, u.src_size, u.tgt_size)) |
| 4082 | append('resize %s %d' % (p, u.tgt_size)) |
| 4083 | |
| 4084 | for p, u in self._partition_updates.items(): |
| 4085 | if u.src_group and u.tgt_group and u.src_group != u.tgt_group: |
| 4086 | comment('Move partition %s from default to %s' % |
| 4087 | (p, u.tgt_group)) |
| 4088 | append('move %s %s' % (p, u.tgt_group)) |
| 4089 | |
| 4090 | |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 4091 | def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4): |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4092 | """ |
Yifan Hong | 85ac501 | 2021-01-07 14:43:46 -0800 | [diff] [blame] | 4093 | Get build.prop from ramdisk within the boot image |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4094 | |
| 4095 | Args: |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 4096 | boot_img: the boot image file. Ramdisk must be compressed with lz4 or gzip format. |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4097 | |
| 4098 | Return: |
Yifan Hong | 85ac501 | 2021-01-07 14:43:46 -0800 | [diff] [blame] | 4099 | An extracted file that stores properties in the boot image. |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4100 | """ |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4101 | tmp_dir = MakeTempDir('boot_', suffix='.img') |
| 4102 | try: |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 4103 | RunAndCheckOutput(['unpack_bootimg', '--boot_img', |
| 4104 | boot_img, '--out', tmp_dir]) |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4105 | ramdisk = os.path.join(tmp_dir, 'ramdisk') |
| 4106 | if not os.path.isfile(ramdisk): |
| 4107 | logger.warning('Unable to get boot image timestamp: no ramdisk in boot') |
| 4108 | return None |
| 4109 | uncompressed_ramdisk = os.path.join(tmp_dir, 'uncompressed_ramdisk') |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 4110 | if ramdisk_format == RamdiskFormat.LZ4: |
| 4111 | RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk]) |
| 4112 | elif ramdisk_format == RamdiskFormat.GZ: |
| 4113 | with open(ramdisk, 'rb') as input_stream: |
| 4114 | with open(uncompressed_ramdisk, 'wb') as output_stream: |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 4115 | p2 = Run(['gzip', '-d'], stdin=input_stream.fileno(), |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 4116 | stdout=output_stream.fileno()) |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 4117 | p2.wait() |
| 4118 | else: |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 4119 | logger.error('Only support lz4 or gzip ramdisk format.') |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 4120 | return None |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4121 | |
| 4122 | abs_uncompressed_ramdisk = os.path.abspath(uncompressed_ramdisk) |
| 4123 | extracted_ramdisk = MakeTempDir('extracted_ramdisk') |
| 4124 | # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from |
| 4125 | # the host environment. |
| 4126 | RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'], |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 4127 | cwd=extracted_ramdisk) |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4128 | |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4129 | for search_path in RAMDISK_BUILD_PROP_REL_PATHS: |
| 4130 | prop_file = os.path.join(extracted_ramdisk, search_path) |
| 4131 | if os.path.isfile(prop_file): |
Yifan Hong | 7dc5117 | 2021-01-12 11:27:39 -0800 | [diff] [blame] | 4132 | return prop_file |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 4133 | logger.warning( |
| 4134 | 'Unable to get boot image timestamp: no %s in ramdisk', search_path) |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4135 | |
Yifan Hong | 7dc5117 | 2021-01-12 11:27:39 -0800 | [diff] [blame] | 4136 | return None |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4137 | |
Yifan Hong | 85ac501 | 2021-01-07 14:43:46 -0800 | [diff] [blame] | 4138 | except ExternalError as e: |
| 4139 | logger.warning('Unable to get boot image build props: %s', e) |
| 4140 | return None |
| 4141 | |
| 4142 | |
| 4143 | def GetBootImageTimestamp(boot_img): |
| 4144 | """ |
| 4145 | Get timestamp from ramdisk within the boot image |
| 4146 | |
| 4147 | Args: |
| 4148 | boot_img: the boot image file. Ramdisk must be compressed with lz4 format. |
| 4149 | |
| 4150 | Return: |
| 4151 | An integer that corresponds to the timestamp of the boot image, or None |
| 4152 | if file has unknown format. Raise exception if an unexpected error has |
| 4153 | occurred. |
| 4154 | """ |
| 4155 | prop_file = GetBootImageBuildProp(boot_img) |
| 4156 | if not prop_file: |
| 4157 | return None |
| 4158 | |
| 4159 | props = PartitionBuildProps.FromBuildPropFile('boot', prop_file) |
| 4160 | if props is None: |
| 4161 | return None |
| 4162 | |
| 4163 | try: |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4164 | timestamp = props.GetProp('ro.bootimage.build.date.utc') |
| 4165 | if timestamp: |
| 4166 | return int(timestamp) |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 4167 | logger.warning( |
| 4168 | 'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined') |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4169 | return None |
| 4170 | |
| 4171 | except ExternalError as e: |
| 4172 | logger.warning('Unable to get boot image timestamp: %s', e) |
| 4173 | return None |
Kelvin Zhang | 2732413 | 2021-03-22 15:38:38 -0400 | [diff] [blame] | 4174 | |
| 4175 | |
Kelvin Zhang | 2639048 | 2021-11-02 14:31:10 -0700 | [diff] [blame] | 4176 | def IsSparseImage(filepath): |
Kelvin Zhang | 1caead0 | 2022-09-23 10:06:03 -0700 | [diff] [blame] | 4177 | if not os.path.exists(filepath): |
| 4178 | return False |
Kelvin Zhang | 2639048 | 2021-11-02 14:31:10 -0700 | [diff] [blame] | 4179 | with open(filepath, 'rb') as fp: |
| 4180 | # Magic for android sparse image format |
| 4181 | # https://source.android.com/devices/bootloader/images |
| 4182 | return fp.read(4) == b'\x3A\xFF\x26\xED' |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 4183 | |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 4184 | |
Kelvin Zhang | 2268091 | 2023-05-19 13:12:59 -0700 | [diff] [blame] | 4185 | def UnsparseImage(filepath, target_path=None): |
| 4186 | if not IsSparseImage(filepath): |
| 4187 | return |
| 4188 | if target_path is None: |
| 4189 | tmp_img = MakeTempFile(suffix=".img") |
| 4190 | RunAndCheckOutput(["simg2img", filepath, tmp_img]) |
| 4191 | os.rename(tmp_img, filepath) |
| 4192 | else: |
| 4193 | RunAndCheckOutput(["simg2img", filepath, target_path]) |
| 4194 | |
| 4195 | |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 4196 | def ParseUpdateEngineConfig(path: str): |
| 4197 | """Parse the update_engine config stored in file `path` |
| 4198 | Args |
| 4199 | path: Path to update_engine_config.txt file in target_files |
| 4200 | |
| 4201 | Returns |
| 4202 | A tuple of (major, minor) version number . E.g. (2, 8) |
| 4203 | """ |
| 4204 | with open(path, "r") as fp: |
| 4205 | # update_engine_config.txt is only supposed to contain two lines, |
| 4206 | # PAYLOAD_MAJOR_VERSION and PAYLOAD_MINOR_VERSION. 1024 should be more than |
| 4207 | # sufficient. If the length is more than that, something is wrong. |
| 4208 | data = fp.read(1024) |
| 4209 | major = re.search(r"PAYLOAD_MAJOR_VERSION=(\d+)", data) |
| 4210 | if not major: |
| 4211 | raise ValueError( |
| 4212 | f"{path} is an invalid update_engine config, missing PAYLOAD_MAJOR_VERSION {data}") |
| 4213 | minor = re.search(r"PAYLOAD_MINOR_VERSION=(\d+)", data) |
| 4214 | if not minor: |
| 4215 | raise ValueError( |
| 4216 | f"{path} is an invalid update_engine config, missing PAYLOAD_MINOR_VERSION {data}") |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 4217 | return (int(major.group(1)), int(minor.group(1))) |