Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1 | # Copyright (C) 2008 The Android Open Source Project |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | # you may not use this file except in compliance with the License. |
| 5 | # You may obtain a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | # See the License for the specific language governing permissions and |
| 13 | # limitations under the License. |
| 14 | |
Tao Bao | 89fbb0f | 2017-01-10 10:47:58 -0800 | [diff] [blame] | 15 | from __future__ import print_function |
| 16 | |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 17 | import base64 |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 18 | import collections |
Doug Zongker | ea5d7a9 | 2010-09-12 15:26:16 -0700 | [diff] [blame] | 19 | import copy |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 20 | import datetime |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 21 | import errno |
Tao Bao | 0ff15de | 2019-03-20 11:26:06 -0700 | [diff] [blame] | 22 | import fnmatch |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 23 | import getopt |
| 24 | import getpass |
Narayan Kamath | a07bf04 | 2017-08-14 14:49:21 +0100 | [diff] [blame] | 25 | import gzip |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 26 | import imp |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 27 | import json |
| 28 | import logging |
| 29 | import logging.config |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 30 | import os |
Ying Wang | 7e6d4e4 | 2010-12-13 16:25:36 -0800 | [diff] [blame] | 31 | import platform |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 32 | import re |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 33 | import shlex |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 34 | import shutil |
| 35 | import subprocess |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 36 | import stat |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 37 | import sys |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 38 | import tempfile |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 39 | import threading |
| 40 | import time |
Doug Zongker | 048e7ca | 2009-06-15 14:31:53 -0700 | [diff] [blame] | 41 | import zipfile |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 42 | from dataclasses import dataclass |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 43 | from genericpath import isdir |
Tao Bao | 12d87fc | 2018-01-31 12:18:52 -0800 | [diff] [blame] | 44 | from hashlib import sha1, sha256 |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 45 | |
Tianjie Xu | 41976c7 | 2019-07-03 13:57:01 -0700 | [diff] [blame] | 46 | import images |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 47 | import rangelib |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 48 | import sparse_img |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 49 | from blockimgdiff import BlockImageDiff |
Doug Zongker | ab7ca1d | 2014-08-26 10:40:28 -0700 | [diff] [blame] | 50 | |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 51 | logger = logging.getLogger(__name__) |
| 52 | |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 53 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 54 | class Options(object): |
Tao Bao | afd92a8 | 2019-10-10 22:44:22 -0700 | [diff] [blame] | 55 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 56 | def __init__(self): |
Tao Bao | afd92a8 | 2019-10-10 22:44:22 -0700 | [diff] [blame] | 57 | # Set up search path, in order to find framework/ and lib64/. At the time of |
| 58 | # running this function, user-supplied search path (`--path`) hasn't been |
| 59 | # available. So the value set here is the default, which might be overridden |
| 60 | # by commandline flag later. |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 61 | exec_path = os.path.realpath(sys.argv[0]) |
Tao Bao | afd92a8 | 2019-10-10 22:44:22 -0700 | [diff] [blame] | 62 | if exec_path.endswith('.py'): |
| 63 | script_name = os.path.basename(exec_path) |
| 64 | # logger hasn't been initialized yet at this point. Use print to output |
| 65 | # warnings. |
| 66 | print( |
| 67 | 'Warning: releasetools script should be invoked as hermetic Python ' |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 68 | 'executable -- build and run `{}` directly.'.format( |
| 69 | script_name[:-3]), |
Tao Bao | afd92a8 | 2019-10-10 22:44:22 -0700 | [diff] [blame] | 70 | file=sys.stderr) |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 71 | self.search_path = os.path.dirname(os.path.dirname(exec_path)) |
Pavel Salomatov | 3267655 | 2019-03-06 20:00:45 +0300 | [diff] [blame] | 72 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 73 | self.signapk_path = "framework/signapk.jar" # Relative to search_path |
Kelvin Zhang | 4fc3aa0 | 2021-11-16 18:58:58 -0800 | [diff] [blame] | 74 | if not os.path.exists(os.path.join(self.search_path, self.signapk_path)): |
| 75 | if "ANDROID_HOST_OUT" in os.environ: |
| 76 | self.search_path = os.environ["ANDROID_HOST_OUT"] |
Alex Klyubin | 9667b18 | 2015-12-10 13:38:50 -0800 | [diff] [blame] | 77 | self.signapk_shared_library_path = "lib64" # Relative to search_path |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 78 | self.extra_signapk_args = [] |
Martin Stjernholm | 58472e8 | 2022-01-07 22:08:47 +0000 | [diff] [blame] | 79 | self.aapt2_path = "aapt2" |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 80 | self.java_path = "java" # Use the one on the path by default. |
Sorin Basca | 0508583 | 2022-09-14 11:33:22 +0100 | [diff] [blame] | 81 | self.java_args = ["-Xmx4096m"] # The default JVM args. |
Tianjie Xu | 88a759d | 2020-01-23 10:47:54 -0800 | [diff] [blame] | 82 | self.android_jar_path = None |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 83 | self.public_key_suffix = ".x509.pem" |
| 84 | self.private_key_suffix = ".pk8" |
Dan Albert | cd9ecc0 | 2015-03-27 16:37:23 -0700 | [diff] [blame] | 85 | # use otatools built boot_signer by default |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 86 | self.verbose = False |
| 87 | self.tempfiles = [] |
| 88 | self.device_specific = None |
| 89 | self.extras = {} |
| 90 | self.info_dict = None |
Tao Bao | 6f0b219 | 2015-10-13 16:37:12 -0700 | [diff] [blame] | 91 | self.source_info_dict = None |
| 92 | self.target_info_dict = None |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 93 | self.worker_threads = None |
Tao Bao | 575d68a | 2015-08-07 19:49:45 -0700 | [diff] [blame] | 94 | # Stash size cannot exceed cache_size * threshold. |
| 95 | self.cache_size = None |
| 96 | self.stash_threshold = 0.8 |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 97 | self.logfile = None |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 98 | |
| 99 | |
| 100 | OPTIONS = Options() |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 101 | |
Tao Bao | 7119751 | 2018-10-11 14:08:45 -0700 | [diff] [blame] | 102 | # The block size that's used across the releasetools scripts. |
| 103 | BLOCK_SIZE = 4096 |
| 104 | |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 105 | # Values for "certificate" in apkcerts that mean special things. |
| 106 | SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") |
| 107 | |
Tao Bao | 5cc0abb | 2019-03-21 10:18:05 -0700 | [diff] [blame] | 108 | # The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note |
| 109 | # that system_other is not in the list because we don't want to include its |
Tianjie | bf0b8a8 | 2021-03-03 17:31:04 -0800 | [diff] [blame] | 110 | # descriptor into vbmeta.img. When adding a new entry here, the |
| 111 | # AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated |
| 112 | # accordingly. |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 113 | AVB_PARTITIONS = ('boot', 'init_boot', 'dtbo', 'odm', 'product', 'pvmfw', |
| 114 | 'recovery', 'system', 'system_ext', 'vendor', 'vendor_boot', |
| 115 | 'vendor_kernel_boot', 'vendor_dlkm', 'odm_dlkm', |
| 116 | 'system_dlkm') |
Tao Bao | 9dd909e | 2017-11-14 11:27:32 -0800 | [diff] [blame] | 117 | |
Tao Bao | 08c190f | 2019-06-03 23:07:58 -0700 | [diff] [blame] | 118 | # Chained VBMeta partitions. |
| 119 | AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor') |
| 120 | |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 121 | # avbtool arguments name |
| 122 | AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG = '--include_descriptors_from_image' |
| 123 | AVB_ARG_NAME_CHAIN_PARTITION = '--chain_partition' |
| 124 | |
Tianjie Xu | 861f413 | 2018-09-12 11:49:33 -0700 | [diff] [blame] | 125 | # Partitions that should have their care_map added to META/care_map.pb |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 126 | PARTITIONS_WITH_CARE_MAP = [ |
Yifan Hong | cfb917a | 2020-05-07 14:58:20 -0700 | [diff] [blame] | 127 | 'system', |
| 128 | 'vendor', |
| 129 | 'product', |
| 130 | 'system_ext', |
| 131 | 'odm', |
| 132 | 'vendor_dlkm', |
Yifan Hong | f496f1b | 2020-07-15 16:52:59 -0700 | [diff] [blame] | 133 | 'odm_dlkm', |
Ramji Jiyani | 13a4137 | 2022-01-27 07:05:08 +0000 | [diff] [blame] | 134 | 'system_dlkm', |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 135 | ] |
Tianjie Xu | 861f413 | 2018-09-12 11:49:33 -0700 | [diff] [blame] | 136 | |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 137 | # Partitions with a build.prop file |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 138 | PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot', 'init_boot'] |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 139 | |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 140 | # See sysprop.mk. If file is moved, add new search paths here; don't remove |
| 141 | # existing search paths. |
| 142 | RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop'] |
Tianjie Xu | 861f413 | 2018-09-12 11:49:33 -0700 | [diff] [blame] | 143 | |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 144 | |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 145 | @dataclass |
| 146 | class AvbChainedPartitionArg: |
| 147 | """The required arguments for avbtool --chain_partition.""" |
| 148 | partition: str |
| 149 | rollback_index_location: int |
| 150 | pubkey_path: str |
| 151 | |
| 152 | def to_string(self): |
| 153 | """Convert to string command arguments.""" |
| 154 | return '{}:{}:{}'.format( |
| 155 | self.partition, self.rollback_index_location, self.pubkey_path) |
| 156 | |
| 157 | |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 158 | class ErrorCode(object): |
| 159 | """Define error_codes for failures that happen during the actual |
| 160 | update package installation. |
| 161 | |
| 162 | Error codes 0-999 are reserved for failures before the package |
| 163 | installation (i.e. low battery, package verification failure). |
| 164 | Detailed code in 'bootable/recovery/error_code.h' """ |
| 165 | |
| 166 | SYSTEM_VERIFICATION_FAILURE = 1000 |
| 167 | SYSTEM_UPDATE_FAILURE = 1001 |
| 168 | SYSTEM_UNEXPECTED_CONTENTS = 1002 |
| 169 | SYSTEM_NONZERO_CONTENTS = 1003 |
| 170 | SYSTEM_RECOVER_FAILURE = 1004 |
| 171 | VENDOR_VERIFICATION_FAILURE = 2000 |
| 172 | VENDOR_UPDATE_FAILURE = 2001 |
| 173 | VENDOR_UNEXPECTED_CONTENTS = 2002 |
| 174 | VENDOR_NONZERO_CONTENTS = 2003 |
| 175 | VENDOR_RECOVER_FAILURE = 2004 |
| 176 | OEM_PROP_MISMATCH = 3000 |
| 177 | FINGERPRINT_MISMATCH = 3001 |
| 178 | THUMBPRINT_MISMATCH = 3002 |
| 179 | OLDER_BUILD = 3003 |
| 180 | DEVICE_MISMATCH = 3004 |
| 181 | BAD_PATCH_FILE = 3005 |
| 182 | INSUFFICIENT_CACHE_SPACE = 3006 |
| 183 | TUNE_PARTITION_FAILURE = 3007 |
| 184 | APPLY_PATCH_FAILURE = 3008 |
| 185 | |
| 186 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 187 | class ExternalError(RuntimeError): |
| 188 | pass |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 189 | |
| 190 | |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 191 | def InitLogging(): |
| 192 | DEFAULT_LOGGING_CONFIG = { |
| 193 | 'version': 1, |
| 194 | 'disable_existing_loggers': False, |
| 195 | 'formatters': { |
| 196 | 'standard': { |
| 197 | 'format': |
| 198 | '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s', |
| 199 | 'datefmt': '%Y-%m-%d %H:%M:%S', |
| 200 | }, |
| 201 | }, |
| 202 | 'handlers': { |
| 203 | 'default': { |
| 204 | 'class': 'logging.StreamHandler', |
| 205 | 'formatter': 'standard', |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 206 | 'level': 'WARNING', |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 207 | }, |
| 208 | }, |
| 209 | 'loggers': { |
| 210 | '': { |
| 211 | 'handlers': ['default'], |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 212 | 'propagate': True, |
Kelvin Zhang | 9d74128 | 2023-10-24 15:35:54 -0700 | [diff] [blame] | 213 | 'level': 'NOTSET', |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 214 | } |
| 215 | } |
| 216 | } |
| 217 | env_config = os.getenv('LOGGING_CONFIG') |
| 218 | if env_config: |
| 219 | with open(env_config) as f: |
| 220 | config = json.load(f) |
| 221 | else: |
| 222 | config = DEFAULT_LOGGING_CONFIG |
| 223 | |
| 224 | # Increase the logging level for verbose mode. |
| 225 | if OPTIONS.verbose: |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 226 | config = copy.deepcopy(config) |
| 227 | config['handlers']['default']['level'] = 'INFO' |
| 228 | |
| 229 | if OPTIONS.logfile: |
| 230 | config = copy.deepcopy(config) |
| 231 | config['handlers']['logfile'] = { |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 232 | 'class': 'logging.FileHandler', |
| 233 | 'formatter': 'standard', |
| 234 | 'level': 'INFO', |
| 235 | 'mode': 'w', |
| 236 | 'filename': OPTIONS.logfile, |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 237 | } |
| 238 | config['loggers']['']['handlers'].append('logfile') |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 239 | |
| 240 | logging.config.dictConfig(config) |
| 241 | |
| 242 | |
Jiyong Park | c8c94ac | 2020-11-20 03:03:57 +0900 | [diff] [blame] | 243 | def FindHostToolPath(tool_name): |
| 244 | """Finds the path to the host tool. |
| 245 | |
| 246 | Args: |
| 247 | tool_name: name of the tool to find |
| 248 | Returns: |
Cole Faust | 6833d7d | 2023-08-01 18:00:37 -0700 | [diff] [blame] | 249 | path to the tool if found under the same directory as this binary is located at. If not found, |
| 250 | tool_name is returned. |
Jiyong Park | c8c94ac | 2020-11-20 03:03:57 +0900 | [diff] [blame] | 251 | """ |
Jiyong Park | c8c94ac | 2020-11-20 03:03:57 +0900 | [diff] [blame] | 252 | my_dir = os.path.dirname(os.path.realpath(sys.argv[0])) |
| 253 | tool_path = os.path.join(my_dir, tool_name) |
| 254 | if os.path.exists(tool_path): |
| 255 | return tool_path |
| 256 | |
| 257 | return tool_name |
Yifan Hong | 8e332ff | 2020-07-29 17:51:55 -0700 | [diff] [blame] | 258 | |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 259 | |
Tao Bao | 3945158 | 2017-05-04 11:10:47 -0700 | [diff] [blame] | 260 | def Run(args, verbose=None, **kwargs): |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 261 | """Creates and returns a subprocess.Popen object. |
Tao Bao | 3945158 | 2017-05-04 11:10:47 -0700 | [diff] [blame] | 262 | |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 263 | Args: |
| 264 | args: The command represented as a list of strings. |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 265 | verbose: Whether the commands should be shown. Default to the global |
| 266 | verbosity if unspecified. |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 267 | kwargs: Any additional args to be passed to subprocess.Popen(), such as env, |
| 268 | stdin, etc. stdout and stderr will default to subprocess.PIPE and |
| 269 | subprocess.STDOUT respectively unless caller specifies any of them. |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 270 | universal_newlines will default to True, as most of the users in |
| 271 | releasetools expect string output. |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 272 | |
| 273 | Returns: |
| 274 | A subprocess.Popen object. |
Tao Bao | 3945158 | 2017-05-04 11:10:47 -0700 | [diff] [blame] | 275 | """ |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 276 | if 'stdout' not in kwargs and 'stderr' not in kwargs: |
| 277 | kwargs['stdout'] = subprocess.PIPE |
| 278 | kwargs['stderr'] = subprocess.STDOUT |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 279 | if 'universal_newlines' not in kwargs: |
| 280 | kwargs['universal_newlines'] = True |
Yifan Hong | 8e332ff | 2020-07-29 17:51:55 -0700 | [diff] [blame] | 281 | |
Jiyong Park | c8c94ac | 2020-11-20 03:03:57 +0900 | [diff] [blame] | 282 | if args: |
| 283 | # Make a copy of args in case client relies on the content of args later. |
Yifan Hong | 8e332ff | 2020-07-29 17:51:55 -0700 | [diff] [blame] | 284 | args = args[:] |
Jiyong Park | c8c94ac | 2020-11-20 03:03:57 +0900 | [diff] [blame] | 285 | args[0] = FindHostToolPath(args[0]) |
Yifan Hong | 8e332ff | 2020-07-29 17:51:55 -0700 | [diff] [blame] | 286 | |
Kelvin Zhang | 766eea7 | 2021-06-03 09:36:08 -0400 | [diff] [blame] | 287 | if verbose is None: |
| 288 | verbose = OPTIONS.verbose |
| 289 | |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 290 | # Don't log any if caller explicitly says so. |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 291 | if verbose: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 292 | logger.info(" Running: \"%s\"", " ".join(args)) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 293 | return subprocess.Popen(args, **kwargs) |
| 294 | |
| 295 | |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 296 | def RunAndCheckOutput(args, verbose=None, **kwargs): |
| 297 | """Runs the given command and returns the output. |
| 298 | |
| 299 | Args: |
| 300 | args: The command represented as a list of strings. |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 301 | verbose: Whether the commands should be shown. Default to the global |
| 302 | verbosity if unspecified. |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 303 | kwargs: Any additional args to be passed to subprocess.Popen(), such as env, |
| 304 | stdin, etc. stdout and stderr will default to subprocess.PIPE and |
| 305 | subprocess.STDOUT respectively unless caller specifies any of them. |
| 306 | |
| 307 | Returns: |
| 308 | The output string. |
| 309 | |
| 310 | Raises: |
| 311 | ExternalError: On non-zero exit from the command. |
| 312 | """ |
Kelvin Zhang | c8ff84b | 2023-02-15 16:52:46 -0800 | [diff] [blame] | 313 | if verbose is None: |
| 314 | verbose = OPTIONS.verbose |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 315 | proc = Run(args, verbose=verbose, **kwargs) |
| 316 | output, _ = proc.communicate() |
Regnier, Philippe | 2f7e11e | 2019-05-22 10:10:57 +0800 | [diff] [blame] | 317 | if output is None: |
| 318 | output = "" |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 319 | # Don't log any if caller explicitly says so. |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 320 | if verbose: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 321 | logger.info("%s", output.rstrip()) |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 322 | if proc.returncode != 0: |
| 323 | raise ExternalError( |
| 324 | "Failed to run command '{}' (exit code {}):\n{}".format( |
| 325 | args, proc.returncode, output)) |
| 326 | return output |
| 327 | |
| 328 | |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 329 | def RoundUpTo4K(value): |
| 330 | rounded_up = value + 4095 |
| 331 | return rounded_up - (rounded_up % 4096) |
| 332 | |
| 333 | |
Ying Wang | 7e6d4e4 | 2010-12-13 16:25:36 -0800 | [diff] [blame] | 334 | def CloseInheritedPipes(): |
| 335 | """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds |
| 336 | before doing other work.""" |
| 337 | if platform.system() != "Darwin": |
| 338 | return |
| 339 | for d in range(3, 1025): |
| 340 | try: |
| 341 | stat = os.fstat(d) |
| 342 | if stat is not None: |
| 343 | pipebit = stat[0] & 0x1000 |
| 344 | if pipebit != 0: |
| 345 | os.close(d) |
| 346 | except OSError: |
| 347 | pass |
| 348 | |
| 349 | |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 350 | class BuildInfo(object): |
| 351 | """A class that holds the information for a given build. |
| 352 | |
| 353 | This class wraps up the property querying for a given source or target build. |
| 354 | It abstracts away the logic of handling OEM-specific properties, and caches |
| 355 | the commonly used properties such as fingerprint. |
| 356 | |
| 357 | There are two types of info dicts: a) build-time info dict, which is generated |
| 358 | at build time (i.e. included in a target_files zip); b) OEM info dict that is |
| 359 | specified at package generation time (via command line argument |
| 360 | '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not |
| 361 | having "oem_fingerprint_properties" in build-time info dict), all the queries |
| 362 | would be answered based on build-time info dict only. Otherwise if using |
| 363 | OEM-specific properties, some of them will be calculated from two info dicts. |
| 364 | |
| 365 | Users can query properties similarly as using a dict() (e.g. info['fstab']), |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 366 | or to query build properties via GetBuildProp() or GetPartitionBuildProp(). |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 367 | |
| 368 | Attributes: |
| 369 | info_dict: The build-time info dict. |
| 370 | is_ab: Whether it's a build that uses A/B OTA. |
| 371 | oem_dicts: A list of OEM dicts. |
| 372 | oem_props: A list of OEM properties that should be read from OEM dicts; None |
| 373 | if the build doesn't use any OEM-specific property. |
| 374 | fingerprint: The fingerprint of the build, which would be calculated based |
| 375 | on OEM properties if applicable. |
| 376 | device: The device name, which could come from OEM dicts if applicable. |
| 377 | """ |
| 378 | |
| 379 | _RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device", |
| 380 | "ro.product.manufacturer", "ro.product.model", |
| 381 | "ro.product.name"] |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 382 | _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [ |
| 383 | "product", "odm", "vendor", "system_ext", "system"] |
| 384 | _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [ |
| 385 | "product", "product_services", "odm", "vendor", "system"] |
| 386 | _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = [] |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 387 | |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 388 | # The length of vbmeta digest to append to the fingerprint |
| 389 | _VBMETA_DIGEST_SIZE_USED = 8 |
| 390 | |
| 391 | def __init__(self, info_dict, oem_dicts=None, use_legacy_id=False): |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 392 | """Initializes a BuildInfo instance with the given dicts. |
| 393 | |
| 394 | Note that it only wraps up the given dicts, without making copies. |
| 395 | |
| 396 | Arguments: |
| 397 | info_dict: The build-time info dict. |
| 398 | oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note |
| 399 | that it always uses the first dict to calculate the fingerprint or the |
| 400 | device name. The rest would be used for asserting OEM properties only |
| 401 | (e.g. one package can be installed on one of these devices). |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 402 | use_legacy_id: Use the legacy build id to construct the fingerprint. This |
| 403 | is used when we need a BuildInfo class, while the vbmeta digest is |
| 404 | unavailable. |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 405 | |
| 406 | Raises: |
| 407 | ValueError: On invalid inputs. |
| 408 | """ |
| 409 | self.info_dict = info_dict |
| 410 | self.oem_dicts = oem_dicts |
| 411 | |
| 412 | self._is_ab = info_dict.get("ab_update") == "true" |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 413 | self.use_legacy_id = use_legacy_id |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 414 | |
Hongguang Chen | d7c160f | 2020-05-03 21:24:26 -0700 | [diff] [blame] | 415 | # Skip _oem_props if oem_dicts is None to use BuildInfo in |
| 416 | # sign_target_files_apks |
| 417 | if self.oem_dicts: |
| 418 | self._oem_props = info_dict.get("oem_fingerprint_properties") |
| 419 | else: |
| 420 | self._oem_props = None |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 421 | |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 422 | def check_fingerprint(fingerprint): |
| 423 | if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)): |
| 424 | raise ValueError( |
| 425 | 'Invalid build fingerprint: "{}". See the requirement in Android CDD ' |
| 426 | "3.2.2. Build Parameters.".format(fingerprint)) |
| 427 | |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 428 | self._partition_fingerprints = {} |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 429 | for partition in PARTITIONS_WITH_BUILD_PROP: |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 430 | try: |
| 431 | fingerprint = self.CalculatePartitionFingerprint(partition) |
| 432 | check_fingerprint(fingerprint) |
| 433 | self._partition_fingerprints[partition] = fingerprint |
| 434 | except ExternalError: |
| 435 | continue |
| 436 | if "system" in self._partition_fingerprints: |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 437 | # system_other is not included in PARTITIONS_WITH_BUILD_PROP, but does |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 438 | # need a fingerprint when creating the image. |
| 439 | self._partition_fingerprints[ |
| 440 | "system_other"] = self._partition_fingerprints["system"] |
| 441 | |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 442 | # These two should be computed only after setting self._oem_props. |
| 443 | self._device = self.GetOemProperty("ro.product.device") |
| 444 | self._fingerprint = self.CalculateFingerprint() |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 445 | check_fingerprint(self._fingerprint) |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 446 | |
| 447 | @property |
| 448 | def is_ab(self): |
| 449 | return self._is_ab |
| 450 | |
| 451 | @property |
| 452 | def device(self): |
| 453 | return self._device |
| 454 | |
| 455 | @property |
| 456 | def fingerprint(self): |
| 457 | return self._fingerprint |
| 458 | |
| 459 | @property |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 460 | def is_vabc(self): |
Kelvin Zhang | e634bde | 2023-04-28 23:59:43 -0700 | [diff] [blame] | 461 | return self.info_dict.get("virtual_ab_compression") == "true" |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 462 | |
| 463 | @property |
Kelvin Zhang | a9a87ec | 2022-05-04 16:44:52 -0700 | [diff] [blame] | 464 | def is_android_r(self): |
| 465 | system_prop = self.info_dict.get("system.build.prop") |
| 466 | return system_prop and system_prop.GetProp("ro.build.version.release") == "11" |
| 467 | |
| 468 | @property |
Kelvin Zhang | 2f9a9ae | 2023-09-27 09:33:52 -0700 | [diff] [blame] | 469 | def is_release_key(self): |
| 470 | system_prop = self.info_dict.get("build.prop") |
| 471 | return system_prop and system_prop.GetProp("ro.build.tags") == "release-key" |
| 472 | |
| 473 | @property |
Kelvin Zhang | 8f83000 | 2023-08-16 13:16:48 -0700 | [diff] [blame] | 474 | def vabc_compression_param(self): |
| 475 | return self.get("virtual_ab_compression_method", "") |
| 476 | |
| 477 | @property |
David Anderson | 1c59617 | 2023-04-14 16:01:55 -0700 | [diff] [blame] | 478 | def vendor_api_level(self): |
| 479 | vendor_prop = self.info_dict.get("vendor.build.prop") |
| 480 | if not vendor_prop: |
| 481 | return -1 |
| 482 | |
| 483 | props = [ |
| 484 | "ro.board.api_level", |
| 485 | "ro.board.first_api_level", |
| 486 | "ro.product.first_api_level", |
| 487 | ] |
| 488 | for prop in props: |
| 489 | value = vendor_prop.GetProp(prop) |
| 490 | try: |
Kelvin Zhang | e634bde | 2023-04-28 23:59:43 -0700 | [diff] [blame] | 491 | return int(value) |
David Anderson | 1c59617 | 2023-04-14 16:01:55 -0700 | [diff] [blame] | 492 | except: |
Kelvin Zhang | e634bde | 2023-04-28 23:59:43 -0700 | [diff] [blame] | 493 | pass |
David Anderson | 1c59617 | 2023-04-14 16:01:55 -0700 | [diff] [blame] | 494 | return -1 |
| 495 | |
| 496 | @property |
Kelvin Zhang | ad42738 | 2021-08-12 16:19:09 -0700 | [diff] [blame] | 497 | def is_vabc_xor(self): |
| 498 | vendor_prop = self.info_dict.get("vendor.build.prop") |
| 499 | vabc_xor_enabled = vendor_prop and \ |
| 500 | vendor_prop.GetProp("ro.virtual_ab.compression.xor.enabled") == "true" |
| 501 | return vabc_xor_enabled |
| 502 | |
| 503 | @property |
Kelvin Zhang | 10eac08 | 2021-06-10 14:32:19 -0400 | [diff] [blame] | 504 | def vendor_suppressed_vabc(self): |
| 505 | vendor_prop = self.info_dict.get("vendor.build.prop") |
| 506 | vabc_suppressed = vendor_prop and \ |
| 507 | vendor_prop.GetProp("ro.vendor.build.dont_use_vabc") |
| 508 | return vabc_suppressed and vabc_suppressed.lower() == "true" |
| 509 | |
| 510 | @property |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 511 | def oem_props(self): |
| 512 | return self._oem_props |
| 513 | |
| 514 | def __getitem__(self, key): |
| 515 | return self.info_dict[key] |
| 516 | |
| 517 | def __setitem__(self, key, value): |
| 518 | self.info_dict[key] = value |
| 519 | |
| 520 | def get(self, key, default=None): |
| 521 | return self.info_dict.get(key, default) |
| 522 | |
| 523 | def items(self): |
| 524 | return self.info_dict.items() |
| 525 | |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 526 | def _GetRawBuildProp(self, prop, partition): |
| 527 | prop_file = '{}.build.prop'.format( |
| 528 | partition) if partition else 'build.prop' |
| 529 | partition_props = self.info_dict.get(prop_file) |
| 530 | if not partition_props: |
| 531 | return None |
| 532 | return partition_props.GetProp(prop) |
| 533 | |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 534 | def GetPartitionBuildProp(self, prop, partition): |
| 535 | """Returns the inquired build property for the provided partition.""" |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 536 | |
Kelvin Zhang | 8250d2c | 2022-03-23 19:46:09 +0000 | [diff] [blame] | 537 | # Boot image and init_boot image uses ro.[product.]bootimage instead of boot. |
Devin Moore | b5195ff | 2022-02-11 18:44:26 +0000 | [diff] [blame] | 538 | # This comes from the generic ramdisk |
Kelvin Zhang | 8250d2c | 2022-03-23 19:46:09 +0000 | [diff] [blame] | 539 | prop_partition = "bootimage" if partition == "boot" or partition == "init_boot" else partition |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 540 | |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 541 | # If provided a partition for this property, only look within that |
| 542 | # partition's build.prop. |
| 543 | if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS: |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 544 | prop = prop.replace("ro.product", "ro.product.{}".format(prop_partition)) |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 545 | else: |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 546 | prop = prop.replace("ro.", "ro.{}.".format(prop_partition)) |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 547 | |
| 548 | prop_val = self._GetRawBuildProp(prop, partition) |
| 549 | if prop_val is not None: |
| 550 | return prop_val |
| 551 | raise ExternalError("couldn't find %s in %s.build.prop" % |
| 552 | (prop, partition)) |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 553 | |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 554 | def GetBuildProp(self, prop): |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 555 | """Returns the inquired build property from the standard build.prop file.""" |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 556 | if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS: |
| 557 | return self._ResolveRoProductBuildProp(prop) |
| 558 | |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 559 | if prop == "ro.build.id": |
| 560 | return self._GetBuildId() |
| 561 | |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 562 | prop_val = self._GetRawBuildProp(prop, None) |
| 563 | if prop_val is not None: |
| 564 | return prop_val |
| 565 | |
| 566 | raise ExternalError("couldn't find %s in build.prop" % (prop,)) |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 567 | |
| 568 | def _ResolveRoProductBuildProp(self, prop): |
| 569 | """Resolves the inquired ro.product.* build property""" |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 570 | prop_val = self._GetRawBuildProp(prop, None) |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 571 | if prop_val: |
| 572 | return prop_val |
| 573 | |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 574 | default_source_order = self._GetRoProductPropsDefaultSourceOrder() |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 575 | source_order_val = self._GetRawBuildProp( |
| 576 | "ro.product.property_source_order", None) |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 577 | if source_order_val: |
| 578 | source_order = source_order_val.split(",") |
| 579 | else: |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 580 | source_order = default_source_order |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 581 | |
| 582 | # Check that all sources in ro.product.property_source_order are valid |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 583 | if any([x not in default_source_order for x in source_order]): |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 584 | raise ExternalError( |
| 585 | "Invalid ro.product.property_source_order '{}'".format(source_order)) |
| 586 | |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 587 | for source_partition in source_order: |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 588 | source_prop = prop.replace( |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 589 | "ro.product", "ro.product.{}".format(source_partition), 1) |
| 590 | prop_val = self._GetRawBuildProp(source_prop, source_partition) |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 591 | if prop_val: |
| 592 | return prop_val |
| 593 | |
| 594 | raise ExternalError("couldn't resolve {}".format(prop)) |
| 595 | |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 596 | def _GetRoProductPropsDefaultSourceOrder(self): |
| 597 | # NOTE: refer to CDDs and android.os.Build.VERSION for the definition and |
| 598 | # values of these properties for each Android release. |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 599 | android_codename = self._GetRawBuildProp("ro.build.version.codename", None) |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 600 | if android_codename == "REL": |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 601 | android_version = self._GetRawBuildProp("ro.build.version.release", None) |
Steven Laver | 8e2086e | 2020-04-27 16:26:31 -0700 | [diff] [blame] | 602 | if android_version == "10": |
| 603 | return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 |
| 604 | # NOTE: float() conversion of android_version will have rounding error. |
| 605 | # We are checking for "9" or less, and using "< 10" is well outside of |
| 606 | # possible floating point rounding. |
| 607 | try: |
| 608 | android_version_val = float(android_version) |
| 609 | except ValueError: |
| 610 | android_version_val = 0 |
| 611 | if android_version_val < 10: |
| 612 | return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY |
| 613 | return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT |
| 614 | |
Tianjie | b37c5be | 2020-10-15 21:27:10 -0700 | [diff] [blame] | 615 | def _GetPlatformVersion(self): |
| 616 | version_sdk = self.GetBuildProp("ro.build.version.sdk") |
| 617 | # init code switches to version_release_or_codename (see b/158483506). After |
| 618 | # API finalization, release_or_codename will be the same as release. This |
| 619 | # is the best effort to support pre-S dev stage builds. |
| 620 | if int(version_sdk) >= 30: |
| 621 | try: |
| 622 | return self.GetBuildProp("ro.build.version.release_or_codename") |
| 623 | except ExternalError: |
| 624 | logger.warning('Failed to find ro.build.version.release_or_codename') |
| 625 | |
| 626 | return self.GetBuildProp("ro.build.version.release") |
| 627 | |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 628 | def _GetBuildId(self): |
| 629 | build_id = self._GetRawBuildProp("ro.build.id", None) |
| 630 | if build_id: |
| 631 | return build_id |
| 632 | |
| 633 | legacy_build_id = self.GetBuildProp("ro.build.legacy.id") |
| 634 | if not legacy_build_id: |
| 635 | raise ExternalError("Couldn't find build id in property file") |
| 636 | |
| 637 | if self.use_legacy_id: |
| 638 | return legacy_build_id |
| 639 | |
| 640 | # Append the top 8 chars of vbmeta digest to the existing build id. The |
| 641 | # logic needs to match the one in init, so that OTA can deliver correctly. |
| 642 | avb_enable = self.info_dict.get("avb_enable") == "true" |
| 643 | if not avb_enable: |
| 644 | raise ExternalError("AVB isn't enabled when using legacy build id") |
| 645 | |
| 646 | vbmeta_digest = self.info_dict.get("vbmeta_digest") |
| 647 | if not vbmeta_digest: |
| 648 | raise ExternalError("Vbmeta digest isn't provided when using legacy build" |
| 649 | " id") |
| 650 | if len(vbmeta_digest) < self._VBMETA_DIGEST_SIZE_USED: |
| 651 | raise ExternalError("Invalid vbmeta digest " + vbmeta_digest) |
| 652 | |
| 653 | digest_prefix = vbmeta_digest[:self._VBMETA_DIGEST_SIZE_USED] |
| 654 | return legacy_build_id + '.' + digest_prefix |
| 655 | |
Tianjie | b37c5be | 2020-10-15 21:27:10 -0700 | [diff] [blame] | 656 | def _GetPartitionPlatformVersion(self, partition): |
| 657 | try: |
| 658 | return self.GetPartitionBuildProp("ro.build.version.release_or_codename", |
| 659 | partition) |
| 660 | except ExternalError: |
| 661 | return self.GetPartitionBuildProp("ro.build.version.release", |
| 662 | partition) |
| 663 | |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 664 | def GetOemProperty(self, key): |
| 665 | if self.oem_props is not None and key in self.oem_props: |
| 666 | return self.oem_dicts[0][key] |
| 667 | return self.GetBuildProp(key) |
| 668 | |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 669 | def GetPartitionFingerprint(self, partition): |
| 670 | return self._partition_fingerprints.get(partition, None) |
| 671 | |
| 672 | def CalculatePartitionFingerprint(self, partition): |
| 673 | try: |
| 674 | return self.GetPartitionBuildProp("ro.build.fingerprint", partition) |
| 675 | except ExternalError: |
| 676 | return "{}/{}/{}:{}/{}/{}:{}/{}".format( |
| 677 | self.GetPartitionBuildProp("ro.product.brand", partition), |
| 678 | self.GetPartitionBuildProp("ro.product.name", partition), |
| 679 | self.GetPartitionBuildProp("ro.product.device", partition), |
Tianjie | b37c5be | 2020-10-15 21:27:10 -0700 | [diff] [blame] | 680 | self._GetPartitionPlatformVersion(partition), |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 681 | self.GetPartitionBuildProp("ro.build.id", partition), |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 682 | self.GetPartitionBuildProp( |
| 683 | "ro.build.version.incremental", partition), |
Daniel Norman | d5fe862 | 2020-01-08 17:01:11 -0800 | [diff] [blame] | 684 | self.GetPartitionBuildProp("ro.build.type", partition), |
| 685 | self.GetPartitionBuildProp("ro.build.tags", partition)) |
| 686 | |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 687 | def CalculateFingerprint(self): |
| 688 | if self.oem_props is None: |
| 689 | try: |
| 690 | return self.GetBuildProp("ro.build.fingerprint") |
| 691 | except ExternalError: |
| 692 | return "{}/{}/{}:{}/{}/{}:{}/{}".format( |
| 693 | self.GetBuildProp("ro.product.brand"), |
| 694 | self.GetBuildProp("ro.product.name"), |
| 695 | self.GetBuildProp("ro.product.device"), |
Tianjie | b37c5be | 2020-10-15 21:27:10 -0700 | [diff] [blame] | 696 | self._GetPlatformVersion(), |
Tao Bao | 1c320f8 | 2019-10-04 23:25:12 -0700 | [diff] [blame] | 697 | self.GetBuildProp("ro.build.id"), |
| 698 | self.GetBuildProp("ro.build.version.incremental"), |
| 699 | self.GetBuildProp("ro.build.type"), |
| 700 | self.GetBuildProp("ro.build.tags")) |
| 701 | return "%s/%s/%s:%s" % ( |
| 702 | self.GetOemProperty("ro.product.brand"), |
| 703 | self.GetOemProperty("ro.product.name"), |
| 704 | self.GetOemProperty("ro.product.device"), |
| 705 | self.GetBuildProp("ro.build.thumbprint")) |
| 706 | |
| 707 | def WriteMountOemScript(self, script): |
| 708 | assert self.oem_props is not None |
| 709 | recovery_mount_options = self.info_dict.get("recovery_mount_options") |
| 710 | script.Mount("/oem", recovery_mount_options) |
| 711 | |
| 712 | def WriteDeviceAssertions(self, script, oem_no_mount): |
| 713 | # Read the property directly if not using OEM properties. |
| 714 | if not self.oem_props: |
| 715 | script.AssertDevice(self.device) |
| 716 | return |
| 717 | |
| 718 | # Otherwise assert OEM properties. |
| 719 | if not self.oem_dicts: |
| 720 | raise ExternalError( |
| 721 | "No OEM file provided to answer expected assertions") |
| 722 | |
| 723 | for prop in self.oem_props.split(): |
| 724 | values = [] |
| 725 | for oem_dict in self.oem_dicts: |
| 726 | if prop in oem_dict: |
| 727 | values.append(oem_dict[prop]) |
| 728 | if not values: |
| 729 | raise ExternalError( |
| 730 | "The OEM file is missing the property %s" % (prop,)) |
| 731 | script.AssertOemProperty(prop, values, oem_no_mount) |
| 732 | |
| 733 | |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 734 | def DoesInputFileContain(input_file, fn): |
| 735 | """Check whether the input target_files.zip contain an entry `fn`""" |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 736 | if isinstance(input_file, zipfile.ZipFile): |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 737 | return fn in input_file.namelist() |
Kelvin Zhang | 5ef2519 | 2022-10-19 11:25:22 -0700 | [diff] [blame] | 738 | elif zipfile.is_zipfile(input_file): |
| 739 | with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp: |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 740 | return fn in zfp.namelist() |
| 741 | else: |
| 742 | if not os.path.isdir(input_file): |
| 743 | raise ValueError( |
| 744 | "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file) |
| 745 | path = os.path.join(input_file, *fn.split("/")) |
| 746 | return os.path.exists(path) |
| 747 | |
| 748 | |
| 749 | def ReadBytesFromInputFile(input_file, fn): |
| 750 | """Reads the bytes of fn from input zipfile or directory.""" |
| 751 | if isinstance(input_file, zipfile.ZipFile): |
| 752 | return input_file.read(fn) |
| 753 | elif zipfile.is_zipfile(input_file): |
| 754 | with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp: |
| 755 | return zfp.read(fn) |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 756 | else: |
Kelvin Zhang | 5ef2519 | 2022-10-19 11:25:22 -0700 | [diff] [blame] | 757 | if not os.path.isdir(input_file): |
| 758 | raise ValueError( |
| 759 | "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file) |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 760 | path = os.path.join(input_file, *fn.split("/")) |
| 761 | try: |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 762 | with open(path, "rb") as f: |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 763 | return f.read() |
| 764 | except IOError as e: |
| 765 | if e.errno == errno.ENOENT: |
| 766 | raise KeyError(fn) |
| 767 | |
| 768 | |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 769 | def ReadFromInputFile(input_file, fn): |
| 770 | """Reads the str contents of fn from input zipfile or directory.""" |
| 771 | return ReadBytesFromInputFile(input_file, fn).decode() |
| 772 | |
| 773 | |
Kelvin Zhang | 6b10e15 | 2023-05-02 15:48:16 -0700 | [diff] [blame] | 774 | def WriteBytesToInputFile(input_file, fn, data): |
| 775 | """Write bytes |data| contents to fn of input zipfile or directory.""" |
| 776 | if isinstance(input_file, zipfile.ZipFile): |
| 777 | with input_file.open(fn, "w") as entry_fp: |
| 778 | return entry_fp.write(data) |
| 779 | elif zipfile.is_zipfile(input_file): |
| 780 | with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp: |
| 781 | with zfp.open(fn, "w") as entry_fp: |
| 782 | return entry_fp.write(data) |
| 783 | else: |
| 784 | if not os.path.isdir(input_file): |
| 785 | raise ValueError( |
| 786 | "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file) |
| 787 | path = os.path.join(input_file, *fn.split("/")) |
| 788 | try: |
| 789 | with open(path, "wb") as f: |
| 790 | return f.write(data) |
| 791 | except IOError as e: |
| 792 | if e.errno == errno.ENOENT: |
| 793 | raise KeyError(fn) |
| 794 | |
| 795 | |
| 796 | def WriteToInputFile(input_file, fn, str: str): |
| 797 | """Write str content to fn of input file or directory""" |
| 798 | return WriteBytesToInputFile(input_file, fn, str.encode()) |
| 799 | |
| 800 | |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 801 | def ExtractFromInputFile(input_file, fn): |
| 802 | """Extracts the contents of fn from input zipfile or directory into a file.""" |
| 803 | if isinstance(input_file, zipfile.ZipFile): |
| 804 | tmp_file = MakeTempFile(os.path.basename(fn)) |
Kelvin Zhang | 645dcb8 | 2021-02-09 17:52:50 -0500 | [diff] [blame] | 805 | with open(tmp_file, 'wb') as f: |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 806 | f.write(input_file.read(fn)) |
| 807 | return tmp_file |
Kelvin Zhang | eb147e0 | 2022-10-21 10:53:21 -0700 | [diff] [blame] | 808 | elif zipfile.is_zipfile(input_file): |
| 809 | with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp: |
| 810 | tmp_file = MakeTempFile(os.path.basename(fn)) |
| 811 | with open(tmp_file, "wb") as fp: |
| 812 | fp.write(zfp.read(fn)) |
| 813 | return tmp_file |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 814 | else: |
Kelvin Zhang | eb147e0 | 2022-10-21 10:53:21 -0700 | [diff] [blame] | 815 | if not os.path.isdir(input_file): |
| 816 | raise ValueError( |
| 817 | "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 818 | file = os.path.join(input_file, *fn.split("/")) |
| 819 | if not os.path.exists(file): |
| 820 | raise KeyError(fn) |
| 821 | return file |
| 822 | |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 823 | |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 824 | class RamdiskFormat(object): |
| 825 | LZ4 = 1 |
| 826 | GZ = 2 |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 827 | |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 828 | |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 829 | def GetRamdiskFormat(info_dict): |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 830 | if info_dict.get('lz4_ramdisks') == 'true': |
| 831 | ramdisk_format = RamdiskFormat.LZ4 |
| 832 | else: |
| 833 | ramdisk_format = RamdiskFormat.GZ |
| 834 | return ramdisk_format |
| 835 | |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 836 | |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 837 | def LoadInfoDict(input_file, repacking=False): |
| 838 | """Loads the key/value pairs from the given input target_files. |
| 839 | |
Tianjie | a85bdf0 | 2020-07-29 11:56:19 -0700 | [diff] [blame] | 840 | It reads `META/misc_info.txt` file in the target_files input, does validation |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 841 | checks and returns the parsed key/value pairs for to the given build. It's |
| 842 | usually called early when working on input target_files files, e.g. when |
| 843 | generating OTAs, or signing builds. Note that the function may be called |
| 844 | against an old target_files file (i.e. from past dessert releases). So the |
| 845 | property parsing needs to be backward compatible. |
| 846 | |
| 847 | In a `META/misc_info.txt`, a few properties are stored as links to the files |
| 848 | in the PRODUCT_OUT directory. It works fine with the build system. However, |
| 849 | they are no longer available when (re)generating images from target_files zip. |
| 850 | When `repacking` is True, redirect these properties to the actual files in the |
| 851 | unzipped directory. |
| 852 | |
| 853 | Args: |
| 854 | input_file: The input target_files file, which could be an open |
| 855 | zipfile.ZipFile instance, or a str for the dir that contains the files |
| 856 | unzipped from a target_files file. |
| 857 | repacking: Whether it's trying repack an target_files file after loading the |
| 858 | info dict (default: False). If so, it will rewrite a few loaded |
| 859 | properties (e.g. selinux_fc, root_dir) to point to the actual files in |
| 860 | target_files file. When doing repacking, `input_file` must be a dir. |
| 861 | |
| 862 | Returns: |
| 863 | A dict that contains the parsed key/value pairs. |
| 864 | |
| 865 | Raises: |
| 866 | AssertionError: On invalid input arguments. |
| 867 | ValueError: On malformed input values. |
| 868 | """ |
| 869 | if repacking: |
| 870 | assert isinstance(input_file, str), \ |
| 871 | "input_file must be a path str when doing repacking" |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 872 | |
Doug Zongker | c925382 | 2014-02-04 12:17:58 -0800 | [diff] [blame] | 873 | def read_helper(fn): |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 874 | return ReadFromInputFile(input_file, fn) |
Tao Bao | 6cd5473 | 2017-02-27 15:12:05 -0800 | [diff] [blame] | 875 | |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 876 | try: |
Michael Runge | 6e83611 | 2014-04-15 17:40:21 -0700 | [diff] [blame] | 877 | d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n")) |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 878 | except KeyError: |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 879 | raise ValueError("Failed to find META/misc_info.txt in input target-files") |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 880 | |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 881 | if "recovery_api_version" not in d: |
| 882 | raise ValueError("Failed to find 'recovery_api_version'") |
| 883 | if "fstab_version" not in d: |
| 884 | raise ValueError("Failed to find 'fstab_version'") |
Ken Sumrall | 3b07cf1 | 2013-02-19 17:35:29 -0800 | [diff] [blame] | 885 | |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 886 | if repacking: |
Daniel Norman | 72c626f | 2019-05-13 15:58:14 -0700 | [diff] [blame] | 887 | # "selinux_fc" properties should point to the file_contexts files |
| 888 | # (file_contexts.bin) under META/. |
| 889 | for key in d: |
| 890 | if key.endswith("selinux_fc"): |
| 891 | fc_basename = os.path.basename(d[key]) |
| 892 | fc_config = os.path.join(input_file, "META", fc_basename) |
| 893 | assert os.path.exists(fc_config) |
Tao Bao | 2c15d9e | 2015-07-09 11:51:16 -0700 | [diff] [blame] | 894 | |
Daniel Norman | 72c626f | 2019-05-13 15:58:14 -0700 | [diff] [blame] | 895 | d[key] = fc_config |
Tao Bao | 2c15d9e | 2015-07-09 11:51:16 -0700 | [diff] [blame] | 896 | |
Tom Cherry | d14b895 | 2018-08-09 14:26:00 -0700 | [diff] [blame] | 897 | # Similarly we need to redirect "root_dir", and "root_fs_config". |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 898 | d["root_dir"] = os.path.join(input_file, "ROOT") |
Tom Cherry | d14b895 | 2018-08-09 14:26:00 -0700 | [diff] [blame] | 899 | d["root_fs_config"] = os.path.join( |
Tao Bao | 410ad8b | 2018-08-24 12:08:38 -0700 | [diff] [blame] | 900 | input_file, "META", "root_filesystem_config.txt") |
Tao Bao | 84e7568 | 2015-07-19 02:38:53 -0700 | [diff] [blame] | 901 | |
David Anderson | 0ec64ac | 2019-12-06 12:21:18 -0800 | [diff] [blame] | 902 | # Redirect {partition}_base_fs_file for each of the named partitions. |
Yifan Hong | cfb917a | 2020-05-07 14:58:20 -0700 | [diff] [blame] | 903 | for part_name in ["system", "vendor", "system_ext", "product", "odm", |
Ramji Jiyani | 13a4137 | 2022-01-27 07:05:08 +0000 | [diff] [blame] | 904 | "vendor_dlkm", "odm_dlkm", "system_dlkm"]: |
David Anderson | 0ec64ac | 2019-12-06 12:21:18 -0800 | [diff] [blame] | 905 | key_name = part_name + "_base_fs_file" |
| 906 | if key_name not in d: |
| 907 | continue |
| 908 | basename = os.path.basename(d[key_name]) |
| 909 | base_fs_file = os.path.join(input_file, "META", basename) |
| 910 | if os.path.exists(base_fs_file): |
| 911 | d[key_name] = base_fs_file |
Tao Bao | b079b50 | 2016-05-03 08:01:19 -0700 | [diff] [blame] | 912 | else: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 913 | logger.warning( |
David Anderson | 0ec64ac | 2019-12-06 12:21:18 -0800 | [diff] [blame] | 914 | "Failed to find %s base fs file: %s", part_name, base_fs_file) |
| 915 | del d[key_name] |
Tao Bao | f54216f | 2016-03-29 15:12:37 -0700 | [diff] [blame] | 916 | |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 917 | def makeint(key): |
| 918 | if key in d: |
| 919 | d[key] = int(d[key], 0) |
| 920 | |
| 921 | makeint("recovery_api_version") |
| 922 | makeint("blocksize") |
| 923 | makeint("system_size") |
Daniel Rosenberg | f4eabc3 | 2014-07-10 15:42:38 -0700 | [diff] [blame] | 924 | makeint("vendor_size") |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 925 | makeint("userdata_size") |
Ying Wang | 9f8e8db | 2011-11-04 11:37:01 -0700 | [diff] [blame] | 926 | makeint("cache_size") |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 927 | makeint("recovery_size") |
Ken Sumrall | 3b07cf1 | 2013-02-19 17:35:29 -0800 | [diff] [blame] | 928 | makeint("fstab_version") |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 929 | |
Steve Muckle | 903a1ca | 2020-05-07 17:32:10 -0700 | [diff] [blame] | 930 | boot_images = "boot.img" |
| 931 | if "boot_images" in d: |
| 932 | boot_images = d["boot_images"] |
| 933 | for b in boot_images.split(): |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 934 | makeint(b.replace(".img", "_size")) |
Steve Muckle | 903a1ca | 2020-05-07 17:32:10 -0700 | [diff] [blame] | 935 | |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 936 | # Load recovery fstab if applicable. |
| 937 | d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper) |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 938 | ramdisk_format = GetRamdiskFormat(d) |
Tianjie Xu | cfa8622 | 2016-03-07 16:31:19 -0800 | [diff] [blame] | 939 | |
Tianjie Xu | 861f413 | 2018-09-12 11:49:33 -0700 | [diff] [blame] | 940 | # Tries to load the build props for all partitions with care_map, including |
| 941 | # system and vendor. |
Yifan Hong | 5057b95 | 2021-01-07 14:09:57 -0800 | [diff] [blame] | 942 | for partition in PARTITIONS_WITH_BUILD_PROP: |
Bowgo Tsai | 71a4d5c | 2019-05-17 23:21:48 +0800 | [diff] [blame] | 943 | partition_prop = "{}.build.prop".format(partition) |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 944 | d[partition_prop] = PartitionBuildProps.FromInputFile( |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 945 | input_file, partition, ramdisk_format=ramdisk_format) |
Tianjie Xu | 861f413 | 2018-09-12 11:49:33 -0700 | [diff] [blame] | 946 | d["build.prop"] = d["system.build.prop"] |
Tao Bao | 12d87fc | 2018-01-31 12:18:52 -0800 | [diff] [blame] | 947 | |
Tao Bao | 12d87fc | 2018-01-31 12:18:52 -0800 | [diff] [blame] | 948 | if d.get("avb_enable") == "true": |
Tianjie | fdda51d | 2021-05-05 14:46:35 -0700 | [diff] [blame] | 949 | # Set the vbmeta digest if exists |
| 950 | try: |
| 951 | d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip() |
| 952 | except KeyError: |
| 953 | pass |
| 954 | |
Kelvin Zhang | 39aea44 | 2020-08-17 11:04:25 -0400 | [diff] [blame] | 955 | try: |
| 956 | d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n") |
| 957 | except KeyError: |
| 958 | logger.warning("Can't find META/ab_partitions.txt") |
Doug Zongker | 1eb74dd | 2012-08-16 16:19:00 -0700 | [diff] [blame] | 959 | return d |
| 960 | |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 961 | |
Daniel Norman | 4cc9df6 | 2019-07-18 10:11:07 -0700 | [diff] [blame] | 962 | def LoadListFromFile(file_path): |
Kiyoung Kim | ebe7c9c | 2019-06-25 17:09:55 +0900 | [diff] [blame] | 963 | with open(file_path) as f: |
Daniel Norman | 4cc9df6 | 2019-07-18 10:11:07 -0700 | [diff] [blame] | 964 | return f.read().splitlines() |
Kiyoung Kim | ebe7c9c | 2019-06-25 17:09:55 +0900 | [diff] [blame] | 965 | |
Daniel Norman | 4cc9df6 | 2019-07-18 10:11:07 -0700 | [diff] [blame] | 966 | |
| 967 | def LoadDictionaryFromFile(file_path): |
| 968 | lines = LoadListFromFile(file_path) |
Kiyoung Kim | ebe7c9c | 2019-06-25 17:09:55 +0900 | [diff] [blame] | 969 | return LoadDictionaryFromLines(lines) |
| 970 | |
| 971 | |
Michael Runge | 6e83611 | 2014-04-15 17:40:21 -0700 | [diff] [blame] | 972 | def LoadDictionaryFromLines(lines): |
Doug Zongker | 1eb74dd | 2012-08-16 16:19:00 -0700 | [diff] [blame] | 973 | d = {} |
Michael Runge | 6e83611 | 2014-04-15 17:40:21 -0700 | [diff] [blame] | 974 | for line in lines: |
Doug Zongker | 1eb74dd | 2012-08-16 16:19:00 -0700 | [diff] [blame] | 975 | line = line.strip() |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 976 | if not line or line.startswith("#"): |
| 977 | continue |
Ying Wang | 114b46f | 2014-04-15 11:24:00 -0700 | [diff] [blame] | 978 | if "=" in line: |
| 979 | name, value = line.split("=", 1) |
| 980 | d[name] = value |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 981 | return d |
| 982 | |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 983 | |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 984 | class PartitionBuildProps(object): |
| 985 | """The class holds the build prop of a particular partition. |
| 986 | |
| 987 | This class loads the build.prop and holds the build properties for a given |
| 988 | partition. It also partially recognizes the 'import' statement in the |
| 989 | build.prop; and calculates alternative values of some specific build |
| 990 | properties during runtime. |
| 991 | |
| 992 | Attributes: |
| 993 | input_file: a zipped target-file or an unzipped target-file directory. |
| 994 | partition: name of the partition. |
| 995 | props_allow_override: a list of build properties to search for the |
| 996 | alternative values during runtime. |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 997 | build_props: a dict of build properties for the given partition. |
| 998 | prop_overrides: a set of props that are overridden by import. |
| 999 | placeholder_values: A dict of runtime variables' values to replace the |
| 1000 | placeholders in the build.prop file. We expect exactly one value for |
| 1001 | each of the variables. |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 1002 | ramdisk_format: If name is "boot", the format of ramdisk inside the |
| 1003 | boot image. Otherwise, its value is ignored. |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 1004 | Use lz4 to decompress by default. If its value is gzip, use gzip. |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1005 | """ |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 1006 | |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1007 | def __init__(self, input_file, name, placeholder_values=None): |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1008 | self.input_file = input_file |
| 1009 | self.partition = name |
| 1010 | self.props_allow_override = [props.format(name) for props in [ |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1011 | 'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']] |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1012 | self.build_props = {} |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1013 | self.prop_overrides = set() |
| 1014 | self.placeholder_values = {} |
| 1015 | if placeholder_values: |
| 1016 | self.placeholder_values = copy.deepcopy(placeholder_values) |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1017 | |
| 1018 | @staticmethod |
| 1019 | def FromDictionary(name, build_props): |
| 1020 | """Constructs an instance from a build prop dictionary.""" |
| 1021 | |
| 1022 | props = PartitionBuildProps("unknown", name) |
| 1023 | props.build_props = build_props.copy() |
| 1024 | return props |
| 1025 | |
| 1026 | @staticmethod |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 1027 | def FromInputFile(input_file, name, placeholder_values=None, ramdisk_format=RamdiskFormat.LZ4): |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1028 | """Loads the build.prop file and builds the attributes.""" |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1029 | |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1030 | if name in ("boot", "init_boot"): |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 1031 | data = PartitionBuildProps._ReadBootPropFile( |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1032 | input_file, name, ramdisk_format=ramdisk_format) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1033 | else: |
| 1034 | data = PartitionBuildProps._ReadPartitionPropFile(input_file, name) |
| 1035 | |
| 1036 | props = PartitionBuildProps(input_file, name, placeholder_values) |
| 1037 | props._LoadBuildProp(data) |
| 1038 | return props |
| 1039 | |
| 1040 | @staticmethod |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1041 | def _ReadBootPropFile(input_file, partition_name, ramdisk_format): |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1042 | """ |
| 1043 | Read build.prop for boot image from input_file. |
| 1044 | Return empty string if not found. |
| 1045 | """ |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1046 | image_path = 'IMAGES/' + partition_name + '.img' |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1047 | try: |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1048 | boot_img = ExtractFromInputFile(input_file, image_path) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1049 | except KeyError: |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1050 | logger.warning('Failed to read %s', image_path) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1051 | return '' |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 1052 | prop_file = GetBootImageBuildProp(boot_img, ramdisk_format=ramdisk_format) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1053 | if prop_file is None: |
| 1054 | return '' |
Kelvin Zhang | 645dcb8 | 2021-02-09 17:52:50 -0500 | [diff] [blame] | 1055 | with open(prop_file, "r") as f: |
| 1056 | return f.read() |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1057 | |
| 1058 | @staticmethod |
| 1059 | def _ReadPartitionPropFile(input_file, name): |
| 1060 | """ |
| 1061 | Read build.prop for name from input_file. |
| 1062 | Return empty string if not found. |
| 1063 | """ |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1064 | data = '' |
| 1065 | for prop_file in ['{}/etc/build.prop'.format(name.upper()), |
| 1066 | '{}/build.prop'.format(name.upper())]: |
| 1067 | try: |
| 1068 | data = ReadFromInputFile(input_file, prop_file) |
| 1069 | break |
| 1070 | except KeyError: |
| 1071 | logger.warning('Failed to read %s', prop_file) |
Kelvin Zhang | 4fc3aa0 | 2021-11-16 18:58:58 -0800 | [diff] [blame] | 1072 | if data == '': |
| 1073 | logger.warning("Failed to read build.prop for partition {}".format(name)) |
Yifan Hong | 10482a2 | 2021-01-07 14:38:41 -0800 | [diff] [blame] | 1074 | return data |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1075 | |
Yifan Hong | 125d0b6 | 2020-09-24 17:07:03 -0700 | [diff] [blame] | 1076 | @staticmethod |
| 1077 | def FromBuildPropFile(name, build_prop_file): |
| 1078 | """Constructs an instance from a build prop file.""" |
| 1079 | |
| 1080 | props = PartitionBuildProps("unknown", name) |
| 1081 | with open(build_prop_file) as f: |
| 1082 | props._LoadBuildProp(f.read()) |
| 1083 | return props |
| 1084 | |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1085 | def _LoadBuildProp(self, data): |
| 1086 | for line in data.split('\n'): |
| 1087 | line = line.strip() |
| 1088 | if not line or line.startswith("#"): |
| 1089 | continue |
| 1090 | if line.startswith("import"): |
| 1091 | overrides = self._ImportParser(line) |
| 1092 | duplicates = self.prop_overrides.intersection(overrides.keys()) |
| 1093 | if duplicates: |
| 1094 | raise ValueError('prop {} is overridden multiple times'.format( |
| 1095 | ','.join(duplicates))) |
| 1096 | self.prop_overrides = self.prop_overrides.union(overrides.keys()) |
| 1097 | self.build_props.update(overrides) |
| 1098 | elif "=" in line: |
| 1099 | name, value = line.split("=", 1) |
| 1100 | if name in self.prop_overrides: |
| 1101 | raise ValueError('prop {} is set again after overridden by import ' |
| 1102 | 'statement'.format(name)) |
| 1103 | self.build_props[name] = value |
| 1104 | |
| 1105 | def _ImportParser(self, line): |
| 1106 | """Parses the build prop in a given import statement.""" |
| 1107 | |
| 1108 | tokens = line.split() |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 1109 | if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3): |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1110 | raise ValueError('Unrecognized import statement {}'.format(line)) |
Hongguang Chen | b4702b7 | 2020-05-13 18:05:20 -0700 | [diff] [blame] | 1111 | |
| 1112 | if len(tokens) == 3: |
| 1113 | logger.info("Import %s from %s, skip", tokens[2], tokens[1]) |
| 1114 | return {} |
| 1115 | |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1116 | import_path = tokens[1] |
| 1117 | if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path): |
Kelvin Zhang | 42ab828 | 2022-02-17 13:07:55 -0800 | [diff] [blame] | 1118 | logger.warn('Unrecognized import path {}'.format(line)) |
| 1119 | return {} |
Tianjie Xu | 9afb221 | 2020-05-10 21:48:15 +0000 | [diff] [blame] | 1120 | |
| 1121 | # We only recognize a subset of import statement that the init process |
| 1122 | # supports. And we can loose the restriction based on how the dynamic |
| 1123 | # fingerprint is used in practice. The placeholder format should be |
| 1124 | # ${placeholder}, and its value should be provided by the caller through |
| 1125 | # the placeholder_values. |
| 1126 | for prop, value in self.placeholder_values.items(): |
| 1127 | prop_place_holder = '${{{}}}'.format(prop) |
| 1128 | if prop_place_holder in import_path: |
| 1129 | import_path = import_path.replace(prop_place_holder, value) |
| 1130 | if '$' in import_path: |
| 1131 | logger.info('Unresolved place holder in import path %s', import_path) |
| 1132 | return {} |
| 1133 | |
| 1134 | import_path = import_path.replace('/{}'.format(self.partition), |
| 1135 | self.partition.upper()) |
| 1136 | logger.info('Parsing build props override from %s', import_path) |
| 1137 | |
| 1138 | lines = ReadFromInputFile(self.input_file, import_path).split('\n') |
| 1139 | d = LoadDictionaryFromLines(lines) |
| 1140 | return {key: val for key, val in d.items() |
| 1141 | if key in self.props_allow_override} |
| 1142 | |
Kelvin Zhang | 5ef2519 | 2022-10-19 11:25:22 -0700 | [diff] [blame] | 1143 | def __getstate__(self): |
| 1144 | state = self.__dict__.copy() |
| 1145 | # Don't pickle baz |
| 1146 | if "input_file" in state and isinstance(state["input_file"], zipfile.ZipFile): |
| 1147 | state["input_file"] = state["input_file"].filename |
| 1148 | return state |
| 1149 | |
Tianjie Xu | 0fde41e | 2020-05-09 05:24:18 +0000 | [diff] [blame] | 1150 | def GetProp(self, prop): |
| 1151 | return self.build_props.get(prop) |
| 1152 | |
| 1153 | |
Tianjie Xu | cfa8622 | 2016-03-07 16:31:19 -0800 | [diff] [blame] | 1154 | def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path, |
| 1155 | system_root_image=False): |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 1156 | class Partition(object): |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1157 | def __init__(self, mount_point, fs_type, device, length, context, slotselect): |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 1158 | self.mount_point = mount_point |
| 1159 | self.fs_type = fs_type |
| 1160 | self.device = device |
| 1161 | self.length = length |
Tao Bao | 548eb76 | 2015-06-10 12:32:41 -0700 | [diff] [blame] | 1162 | self.context = context |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1163 | self.slotselect = slotselect |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 1164 | |
| 1165 | try: |
Tianjie Xu | cfa8622 | 2016-03-07 16:31:19 -0800 | [diff] [blame] | 1166 | data = read_helper(recovery_fstab_path) |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 1167 | except KeyError: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 1168 | logger.warning("Failed to find %s", recovery_fstab_path) |
Jeff Davidson | 033fbe2 | 2011-10-26 18:08:09 -0700 | [diff] [blame] | 1169 | data = "" |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 1170 | |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 1171 | assert fstab_version == 2 |
| 1172 | |
| 1173 | d = {} |
| 1174 | for line in data.split("\n"): |
| 1175 | line = line.strip() |
| 1176 | if not line or line.startswith("#"): |
| 1177 | continue |
| 1178 | |
| 1179 | # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags> |
| 1180 | pieces = line.split() |
| 1181 | if len(pieces) != 5: |
| 1182 | raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) |
| 1183 | |
| 1184 | # Ignore entries that are managed by vold. |
| 1185 | options = pieces[4] |
| 1186 | if "voldmanaged=" in options: |
| 1187 | continue |
| 1188 | |
| 1189 | # It's a good line, parse it. |
| 1190 | length = 0 |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1191 | slotselect = False |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 1192 | options = options.split(",") |
| 1193 | for i in options: |
| 1194 | if i.startswith("length="): |
| 1195 | length = int(i[7:]) |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1196 | elif i == "slotselect": |
| 1197 | slotselect = True |
Doug Zongker | 086cbb0 | 2011-02-17 15:54:20 -0800 | [diff] [blame] | 1198 | else: |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 1199 | # Ignore all unknown options in the unified fstab. |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 1200 | continue |
Ken Sumrall | 3b07cf1 | 2013-02-19 17:35:29 -0800 | [diff] [blame] | 1201 | |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 1202 | mount_flags = pieces[3] |
| 1203 | # Honor the SELinux context if present. |
| 1204 | context = None |
| 1205 | for i in mount_flags.split(","): |
| 1206 | if i.startswith("context="): |
| 1207 | context = i |
Doug Zongker | 086cbb0 | 2011-02-17 15:54:20 -0800 | [diff] [blame] | 1208 | |
Tao Bao | d1de6f3 | 2017-03-01 16:38:48 -0800 | [diff] [blame] | 1209 | mount_point = pieces[1] |
| 1210 | d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2], |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1211 | device=pieces[0], length=length, context=context, |
| 1212 | slotselect=slotselect) |
Ken Sumrall | 3b07cf1 | 2013-02-19 17:35:29 -0800 | [diff] [blame] | 1213 | |
Daniel Rosenberg | e6853b0 | 2015-06-05 17:59:27 -0700 | [diff] [blame] | 1214 | # / is used for the system mount point when the root directory is included in |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1215 | # system. Other areas assume system is always at "/system" so point /system |
| 1216 | # at /. |
Daniel Rosenberg | e6853b0 | 2015-06-05 17:59:27 -0700 | [diff] [blame] | 1217 | if system_root_image: |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 1218 | assert '/system' not in d and '/' in d |
Daniel Rosenberg | e6853b0 | 2015-06-05 17:59:27 -0700 | [diff] [blame] | 1219 | d["/system"] = d["/"] |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 1220 | return d |
| 1221 | |
| 1222 | |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1223 | def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper): |
| 1224 | """Finds the path to recovery fstab and loads its contents.""" |
| 1225 | # recovery fstab is only meaningful when installing an update via recovery |
| 1226 | # (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA. |
Yifan Hong | 65afc07 | 2020-04-17 10:08:10 -0700 | [diff] [blame] | 1227 | if info_dict.get('ab_update') == 'true' and \ |
| 1228 | info_dict.get("allow_non_ab") != "true": |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1229 | return None |
| 1230 | |
| 1231 | # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to |
| 1232 | # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both |
| 1233 | # cases, since it may load the info_dict from an old build (e.g. when |
| 1234 | # generating incremental OTAs from that build). |
| 1235 | system_root_image = info_dict.get('system_root_image') == 'true' |
| 1236 | if info_dict.get('no_recovery') != 'true': |
| 1237 | recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab' |
Kelvin Zhang | 2ab6986 | 2023-10-27 10:58:05 -0700 | [diff] [blame] | 1238 | if not DoesInputFileContain(input_file, recovery_fstab_path): |
| 1239 | recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab' |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1240 | return LoadRecoveryFSTab( |
| 1241 | read_helper, info_dict['fstab_version'], recovery_fstab_path, |
| 1242 | system_root_image) |
| 1243 | |
| 1244 | if info_dict.get('recovery_as_boot') == 'true': |
| 1245 | recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab' |
Kelvin Zhang | 2ab6986 | 2023-10-27 10:58:05 -0700 | [diff] [blame] | 1246 | if not DoesInputFileContain(input_file, recovery_fstab_path): |
| 1247 | recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab' |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1248 | return LoadRecoveryFSTab( |
| 1249 | read_helper, info_dict['fstab_version'], recovery_fstab_path, |
| 1250 | system_root_image) |
| 1251 | |
| 1252 | return None |
| 1253 | |
| 1254 | |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 1255 | def DumpInfoDict(d): |
| 1256 | for k, v in sorted(d.items()): |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 1257 | logger.info("%-25s = (%s) %s", k, type(v).__name__, v) |
Doug Zongker | c19a8d5 | 2010-07-01 15:30:11 -0700 | [diff] [blame] | 1258 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 1259 | |
Daniel Norman | 5541714 | 2019-11-25 16:04:36 -0800 | [diff] [blame] | 1260 | def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict): |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1261 | """Merges dynamic partition info variables. |
| 1262 | |
| 1263 | Args: |
| 1264 | framework_dict: The dictionary of dynamic partition info variables from the |
| 1265 | partial framework target files. |
| 1266 | vendor_dict: The dictionary of dynamic partition info variables from the |
| 1267 | partial vendor target files. |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1268 | |
| 1269 | Returns: |
| 1270 | The merged dynamic partition info dictionary. |
| 1271 | """ |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1272 | |
| 1273 | def uniq_concat(a, b): |
jiajia tang | e5ddfcd | 2022-06-21 10:36:12 +0800 | [diff] [blame] | 1274 | combined = set(a.split()) |
| 1275 | combined.update(set(b.split())) |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1276 | combined = [item.strip() for item in combined if item.strip()] |
| 1277 | return " ".join(sorted(combined)) |
| 1278 | |
| 1279 | if (framework_dict.get("use_dynamic_partitions") != |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 1280 | "true") or (vendor_dict.get("use_dynamic_partitions") != "true"): |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1281 | raise ValueError("Both dictionaries must have use_dynamic_partitions=true") |
| 1282 | |
| 1283 | merged_dict = {"use_dynamic_partitions": "true"} |
Kelvin Zhang | 6a683ce | 2022-05-02 12:19:45 -0700 | [diff] [blame] | 1284 | # For keys-value pairs that are the same, copy to merged dict |
| 1285 | for key in vendor_dict.keys(): |
| 1286 | if key in framework_dict and framework_dict[key] == vendor_dict[key]: |
| 1287 | merged_dict[key] = vendor_dict[key] |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1288 | |
| 1289 | merged_dict["dynamic_partition_list"] = uniq_concat( |
| 1290 | framework_dict.get("dynamic_partition_list", ""), |
| 1291 | vendor_dict.get("dynamic_partition_list", "")) |
| 1292 | |
| 1293 | # Super block devices are defined by the vendor dict. |
| 1294 | if "super_block_devices" in vendor_dict: |
| 1295 | merged_dict["super_block_devices"] = vendor_dict["super_block_devices"] |
jiajia tang | e5ddfcd | 2022-06-21 10:36:12 +0800 | [diff] [blame] | 1296 | for block_device in merged_dict["super_block_devices"].split(): |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1297 | key = "super_%s_device_size" % block_device |
| 1298 | if key not in vendor_dict: |
| 1299 | raise ValueError("Vendor dict does not contain required key %s." % key) |
| 1300 | merged_dict[key] = vendor_dict[key] |
| 1301 | |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1302 | # Partition groups and group sizes are defined by the vendor dict because |
| 1303 | # these values may vary for each board that uses a shared system image. |
| 1304 | merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"] |
jiajia tang | e5ddfcd | 2022-06-21 10:36:12 +0800 | [diff] [blame] | 1305 | for partition_group in merged_dict["super_partition_groups"].split(): |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1306 | # Set the partition group's size using the value from the vendor dict. |
Daniel Norman | 5541714 | 2019-11-25 16:04:36 -0800 | [diff] [blame] | 1307 | key = "super_%s_group_size" % partition_group |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1308 | if key not in vendor_dict: |
| 1309 | raise ValueError("Vendor dict does not contain required key %s." % key) |
| 1310 | merged_dict[key] = vendor_dict[key] |
| 1311 | |
| 1312 | # Set the partition group's partition list using a concatenation of the |
| 1313 | # framework and vendor partition lists. |
Daniel Norman | 5541714 | 2019-11-25 16:04:36 -0800 | [diff] [blame] | 1314 | key = "super_%s_partition_list" % partition_group |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1315 | merged_dict[key] = uniq_concat( |
| 1316 | framework_dict.get(key, ""), vendor_dict.get(key, "")) |
P Adarsh Reddy | 7e9b5c4 | 2019-12-20 15:07:24 +0530 | [diff] [blame] | 1317 | |
Daniel Norman | b0c7591 | 2020-09-24 14:30:21 -0700 | [diff] [blame] | 1318 | # Various other flags should be copied from the vendor dict, if defined. |
| 1319 | for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake", |
| 1320 | "super_metadata_device", "super_partition_error_limit", |
| 1321 | "super_partition_size"): |
| 1322 | if key in vendor_dict.keys(): |
| 1323 | merged_dict[key] = vendor_dict[key] |
| 1324 | |
Daniel Norman | bfc51ef | 2019-07-24 14:34:54 -0700 | [diff] [blame] | 1325 | return merged_dict |
| 1326 | |
| 1327 | |
Daniel Norman | 21c34f7 | 2020-11-11 17:25:50 -0800 | [diff] [blame] | 1328 | def PartitionMapFromTargetFiles(target_files_dir): |
| 1329 | """Builds a map from partition -> path within an extracted target files directory.""" |
| 1330 | # Keep possible_subdirs in sync with build/make/core/board_config.mk. |
| 1331 | possible_subdirs = { |
| 1332 | "system": ["SYSTEM"], |
| 1333 | "vendor": ["VENDOR", "SYSTEM/vendor"], |
| 1334 | "product": ["PRODUCT", "SYSTEM/product"], |
| 1335 | "system_ext": ["SYSTEM_EXT", "SYSTEM/system_ext"], |
| 1336 | "odm": ["ODM", "VENDOR/odm", "SYSTEM/vendor/odm"], |
| 1337 | "vendor_dlkm": [ |
| 1338 | "VENDOR_DLKM", "VENDOR/vendor_dlkm", "SYSTEM/vendor/vendor_dlkm" |
| 1339 | ], |
| 1340 | "odm_dlkm": ["ODM_DLKM", "VENDOR/odm_dlkm", "SYSTEM/vendor/odm_dlkm"], |
Ramji Jiyani | 13a4137 | 2022-01-27 07:05:08 +0000 | [diff] [blame] | 1341 | "system_dlkm": ["SYSTEM_DLKM", "SYSTEM/system_dlkm"], |
Daniel Norman | 21c34f7 | 2020-11-11 17:25:50 -0800 | [diff] [blame] | 1342 | } |
| 1343 | partition_map = {} |
| 1344 | for partition, subdirs in possible_subdirs.items(): |
| 1345 | for subdir in subdirs: |
| 1346 | if os.path.exists(os.path.join(target_files_dir, subdir)): |
| 1347 | partition_map[partition] = subdir |
| 1348 | break |
| 1349 | return partition_map |
| 1350 | |
| 1351 | |
Daniel Norman | d335156 | 2020-10-29 12:33:11 -0700 | [diff] [blame] | 1352 | def SharedUidPartitionViolations(uid_dict, partition_groups): |
| 1353 | """Checks for APK sharedUserIds that cross partition group boundaries. |
| 1354 | |
| 1355 | This uses a single or merged build's shareduid_violation_modules.json |
| 1356 | output file, as generated by find_shareduid_violation.py or |
| 1357 | core/tasks/find-shareduid-violation.mk. |
| 1358 | |
| 1359 | An error is defined as a sharedUserId that is found in a set of partitions |
| 1360 | that span more than one partition group. |
| 1361 | |
| 1362 | Args: |
| 1363 | uid_dict: A dictionary created by using the standard json module to read a |
| 1364 | complete shareduid_violation_modules.json file. |
| 1365 | partition_groups: A list of groups, where each group is a list of |
| 1366 | partitions. |
| 1367 | |
| 1368 | Returns: |
| 1369 | A list of error messages. |
| 1370 | """ |
| 1371 | errors = [] |
| 1372 | for uid, partitions in uid_dict.items(): |
| 1373 | found_in_groups = [ |
| 1374 | group for group in partition_groups |
| 1375 | if set(partitions.keys()) & set(group) |
| 1376 | ] |
| 1377 | if len(found_in_groups) > 1: |
| 1378 | errors.append( |
| 1379 | "APK sharedUserId \"%s\" found across partition groups in partitions \"%s\"" |
| 1380 | % (uid, ",".join(sorted(partitions.keys())))) |
| 1381 | return errors |
| 1382 | |
| 1383 | |
Daniel Norman | 21c34f7 | 2020-11-11 17:25:50 -0800 | [diff] [blame] | 1384 | def RunHostInitVerifier(product_out, partition_map): |
| 1385 | """Runs host_init_verifier on the init rc files within partitions. |
| 1386 | |
| 1387 | host_init_verifier searches the etc/init path within each partition. |
| 1388 | |
| 1389 | Args: |
| 1390 | product_out: PRODUCT_OUT directory, containing partition directories. |
| 1391 | partition_map: A map of partition name -> relative path within product_out. |
| 1392 | """ |
| 1393 | allowed_partitions = ("system", "system_ext", "product", "vendor", "odm") |
| 1394 | cmd = ["host_init_verifier"] |
| 1395 | for partition, path in partition_map.items(): |
| 1396 | if partition not in allowed_partitions: |
| 1397 | raise ExternalError("Unable to call host_init_verifier for partition %s" % |
| 1398 | partition) |
| 1399 | cmd.extend(["--out_%s" % partition, os.path.join(product_out, path)]) |
| 1400 | # Add --property-contexts if the file exists on the partition. |
| 1401 | property_contexts = "%s_property_contexts" % ( |
| 1402 | "plat" if partition == "system" else partition) |
| 1403 | property_contexts_path = os.path.join(product_out, path, "etc", "selinux", |
| 1404 | property_contexts) |
| 1405 | if os.path.exists(property_contexts_path): |
| 1406 | cmd.append("--property-contexts=%s" % property_contexts_path) |
| 1407 | # Add the passwd file if the file exists on the partition. |
| 1408 | passwd_path = os.path.join(product_out, path, "etc", "passwd") |
| 1409 | if os.path.exists(passwd_path): |
| 1410 | cmd.extend(["-p", passwd_path]) |
| 1411 | return RunAndCheckOutput(cmd) |
| 1412 | |
| 1413 | |
Kelvin Zhang | de53f7d | 2023-10-03 12:21:28 -0700 | [diff] [blame] | 1414 | def AppendAVBSigningArgs(cmd, partition, avb_salt=None): |
Bowgo Tsai | 3e599ea | 2017-05-26 18:30:04 +0800 | [diff] [blame] | 1415 | """Append signing arguments for avbtool.""" |
| 1416 | # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096" |
Kelvin Zhang | e634bde | 2023-04-28 23:59:43 -0700 | [diff] [blame] | 1417 | key_path = ResolveAVBSigningPathArgs( |
| 1418 | OPTIONS.info_dict.get("avb_" + partition + "_key_path")) |
Bowgo Tsai | 3e599ea | 2017-05-26 18:30:04 +0800 | [diff] [blame] | 1419 | algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm") |
| 1420 | if key_path and algorithm: |
| 1421 | cmd.extend(["--key", key_path, "--algorithm", algorithm]) |
Kelvin Zhang | de53f7d | 2023-10-03 12:21:28 -0700 | [diff] [blame] | 1422 | if avb_salt is None: |
| 1423 | avb_salt = OPTIONS.info_dict.get("avb_salt") |
Tao Bao | 2b6dfd6 | 2017-09-27 17:17:43 -0700 | [diff] [blame] | 1424 | # make_vbmeta_image doesn't like "--salt" (and it's not needed). |
Tao Bao | 744c4c7 | 2018-08-20 21:09:07 -0700 | [diff] [blame] | 1425 | if avb_salt and not partition.startswith("vbmeta"): |
Tao Bao | 2b6dfd6 | 2017-09-27 17:17:43 -0700 | [diff] [blame] | 1426 | cmd.extend(["--salt", avb_salt]) |
Bowgo Tsai | 3e599ea | 2017-05-26 18:30:04 +0800 | [diff] [blame] | 1427 | |
| 1428 | |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1429 | def ResolveAVBSigningPathArgs(split_args): |
| 1430 | |
| 1431 | def ResolveBinaryPath(path): |
| 1432 | if os.path.exists(path): |
| 1433 | return path |
Kelvin Zhang | 97a5afe | 2023-06-27 10:30:48 -0700 | [diff] [blame] | 1434 | if OPTIONS.search_path: |
| 1435 | new_path = os.path.join(OPTIONS.search_path, path) |
| 1436 | if os.path.exists(new_path): |
| 1437 | return new_path |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1438 | raise ExternalError( |
Kelvin Zhang | 43df080 | 2023-07-24 13:16:03 -0700 | [diff] [blame] | 1439 | "Failed to find {}".format(path)) |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1440 | |
| 1441 | if not split_args: |
| 1442 | return split_args |
| 1443 | |
| 1444 | if isinstance(split_args, list): |
| 1445 | for index, arg in enumerate(split_args[:-1]): |
| 1446 | if arg == '--signing_helper': |
| 1447 | signing_helper_path = split_args[index + 1] |
| 1448 | split_args[index + 1] = ResolveBinaryPath(signing_helper_path) |
| 1449 | break |
| 1450 | elif isinstance(split_args, str): |
| 1451 | split_args = ResolveBinaryPath(split_args) |
| 1452 | |
| 1453 | return split_args |
| 1454 | |
| 1455 | |
Tao Bao | 765668f | 2019-10-04 22:03:00 -0700 | [diff] [blame] | 1456 | def GetAvbPartitionArg(partition, image, info_dict=None): |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1457 | """Returns the VBMeta arguments for one partition. |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1458 | |
| 1459 | It sets up the VBMeta argument by including the partition descriptor from the |
| 1460 | given 'image', or by configuring the partition as a chained partition. |
| 1461 | |
| 1462 | Args: |
| 1463 | partition: The name of the partition (e.g. "system"). |
| 1464 | image: The path to the partition image. |
| 1465 | info_dict: A dict returned by common.LoadInfoDict(). Will use |
| 1466 | OPTIONS.info_dict if None has been given. |
| 1467 | |
| 1468 | Returns: |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1469 | A list of VBMeta arguments for one partition. |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1470 | """ |
| 1471 | if info_dict is None: |
| 1472 | info_dict = OPTIONS.info_dict |
| 1473 | |
| 1474 | # Check if chain partition is used. |
| 1475 | key_path = info_dict.get("avb_" + partition + "_key_path") |
cfig | 1aeef72 | 2019-09-20 22:45:06 +0800 | [diff] [blame] | 1476 | if not key_path: |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 1477 | return [AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image] |
cfig | 1aeef72 | 2019-09-20 22:45:06 +0800 | [diff] [blame] | 1478 | |
| 1479 | # For a non-A/B device, we don't chain /recovery nor include its descriptor |
| 1480 | # into vbmeta.img. The recovery image will be configured on an independent |
| 1481 | # boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION. |
| 1482 | # See details at |
| 1483 | # https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery. |
Tao Bao | 3612c88 | 2019-10-14 17:49:31 -0700 | [diff] [blame] | 1484 | if info_dict.get("ab_update") != "true" and partition == "recovery": |
cfig | 1aeef72 | 2019-09-20 22:45:06 +0800 | [diff] [blame] | 1485 | return [] |
| 1486 | |
| 1487 | # Otherwise chain the partition into vbmeta. |
| 1488 | chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict) |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 1489 | return [AVB_ARG_NAME_CHAIN_PARTITION, chained_partition_arg] |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1490 | |
| 1491 | |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1492 | def GetAvbPartitionsArg(partitions, |
| 1493 | resolve_rollback_index_location_conflict=False, |
| 1494 | info_dict=None): |
| 1495 | """Returns the VBMeta arguments for all AVB partitions. |
| 1496 | |
| 1497 | It sets up the VBMeta argument by calling GetAvbPartitionArg of all |
| 1498 | partitions. |
| 1499 | |
| 1500 | Args: |
| 1501 | partitions: A dict of all AVB partitions. |
| 1502 | resolve_rollback_index_location_conflict: If true, resolve conflicting avb |
| 1503 | rollback index locations by assigning the smallest unused value. |
| 1504 | info_dict: A dict returned by common.LoadInfoDict(). |
| 1505 | |
| 1506 | Returns: |
| 1507 | A list of VBMeta arguments for all partitions. |
| 1508 | """ |
| 1509 | # An AVB partition will be linked into a vbmeta partition by either |
| 1510 | # AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG or AVB_ARG_NAME_CHAIN_PARTITION, there |
| 1511 | # should be no other cases. |
| 1512 | valid_args = { |
| 1513 | AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG: [], |
| 1514 | AVB_ARG_NAME_CHAIN_PARTITION: [] |
| 1515 | } |
| 1516 | |
| 1517 | for partition, path in partitions.items(): |
| 1518 | avb_partition_arg = GetAvbPartitionArg(partition, path, info_dict) |
| 1519 | if not avb_partition_arg: |
| 1520 | continue |
| 1521 | arg_name, arg_value = avb_partition_arg |
| 1522 | assert arg_name in valid_args |
| 1523 | valid_args[arg_name].append(arg_value) |
| 1524 | |
| 1525 | # Copy the arguments for non-chained AVB partitions directly without |
| 1526 | # intervention. |
| 1527 | avb_args = [] |
| 1528 | for image in valid_args[AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG]: |
| 1529 | avb_args.extend([AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image]) |
| 1530 | |
| 1531 | # Handle chained AVB partitions. The rollback index location might be |
| 1532 | # adjusted if two partitions use the same value. This may happen when mixing |
| 1533 | # a shared system image with other vendor images. |
| 1534 | used_index_loc = set() |
| 1535 | for chained_partition_arg in valid_args[AVB_ARG_NAME_CHAIN_PARTITION]: |
| 1536 | if resolve_rollback_index_location_conflict: |
| 1537 | while chained_partition_arg.rollback_index_location in used_index_loc: |
| 1538 | chained_partition_arg.rollback_index_location += 1 |
| 1539 | |
| 1540 | used_index_loc.add(chained_partition_arg.rollback_index_location) |
| 1541 | avb_args.extend([AVB_ARG_NAME_CHAIN_PARTITION, |
| 1542 | chained_partition_arg.to_string()]) |
| 1543 | |
| 1544 | return avb_args |
| 1545 | |
| 1546 | |
Tao Bao | 02a0859 | 2018-07-22 12:40:45 -0700 | [diff] [blame] | 1547 | def GetAvbChainedPartitionArg(partition, info_dict, key=None): |
| 1548 | """Constructs and returns the arg to build or verify a chained partition. |
| 1549 | |
| 1550 | Args: |
| 1551 | partition: The partition name. |
| 1552 | info_dict: The info dict to look up the key info and rollback index |
| 1553 | location. |
| 1554 | key: The key to be used for building or verifying the partition. Defaults to |
| 1555 | the key listed in info_dict. |
| 1556 | |
| 1557 | Returns: |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1558 | An AvbChainedPartitionArg object with rollback_index_location and |
| 1559 | pubkey_path that can be used to build or verify vbmeta image. |
Tao Bao | 02a0859 | 2018-07-22 12:40:45 -0700 | [diff] [blame] | 1560 | """ |
| 1561 | if key is None: |
| 1562 | key = info_dict["avb_" + partition + "_key_path"] |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1563 | key = ResolveAVBSigningPathArgs(key) |
Tao Bao | 1ac886e | 2019-06-26 11:58:22 -0700 | [diff] [blame] | 1564 | pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key) |
Tao Bao | 02a0859 | 2018-07-22 12:40:45 -0700 | [diff] [blame] | 1565 | rollback_index_location = info_dict[ |
| 1566 | "avb_" + partition + "_rollback_index_location"] |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1567 | return AvbChainedPartitionArg( |
| 1568 | partition=partition, |
| 1569 | rollback_index_location=int(rollback_index_location), |
| 1570 | pubkey_path=pubkey_path) |
Tao Bao | 02a0859 | 2018-07-22 12:40:45 -0700 | [diff] [blame] | 1571 | |
| 1572 | |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1573 | def _HasGkiCertificationArgs(): |
| 1574 | return ("gki_signing_key_path" in OPTIONS.info_dict and |
| 1575 | "gki_signing_algorithm" in OPTIONS.info_dict) |
Bowgo Tsai | 27c39b0 | 2021-03-12 21:40:32 +0800 | [diff] [blame] | 1576 | |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1577 | |
Yi-Yo Chiang | 24da1a4 | 2022-02-22 19:51:15 +0800 | [diff] [blame] | 1578 | def _GenerateGkiCertificate(image, image_name): |
Bowgo Tsai | 27c39b0 | 2021-03-12 21:40:32 +0800 | [diff] [blame] | 1579 | key_path = OPTIONS.info_dict.get("gki_signing_key_path") |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1580 | algorithm = OPTIONS.info_dict.get("gki_signing_algorithm") |
Bowgo Tsai | 27c39b0 | 2021-03-12 21:40:32 +0800 | [diff] [blame] | 1581 | |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1582 | key_path = ResolveAVBSigningPathArgs(key_path) |
Bowgo Tsai | 27c39b0 | 2021-03-12 21:40:32 +0800 | [diff] [blame] | 1583 | |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1584 | # Checks key_path exists, before processing --gki_signing_* args. |
Bowgo Tsai | 27c39b0 | 2021-03-12 21:40:32 +0800 | [diff] [blame] | 1585 | if not os.path.exists(key_path): |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 1586 | raise ExternalError( |
| 1587 | 'gki_signing_key_path: "{}" not found'.format(key_path)) |
Bowgo Tsai | 27c39b0 | 2021-03-12 21:40:32 +0800 | [diff] [blame] | 1588 | |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1589 | output_certificate = tempfile.NamedTemporaryFile() |
| 1590 | cmd = [ |
| 1591 | "generate_gki_certificate", |
| 1592 | "--name", image_name, |
| 1593 | "--algorithm", algorithm, |
| 1594 | "--key", key_path, |
| 1595 | "--output", output_certificate.name, |
| 1596 | image, |
| 1597 | ] |
Bowgo Tsai | 27c39b0 | 2021-03-12 21:40:32 +0800 | [diff] [blame] | 1598 | |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1599 | signature_args = OPTIONS.info_dict.get("gki_signing_signature_args", "") |
| 1600 | signature_args = signature_args.strip() |
| 1601 | if signature_args: |
| 1602 | cmd.extend(["--additional_avb_args", signature_args]) |
| 1603 | |
Yi-Yo Chiang | 24da1a4 | 2022-02-22 19:51:15 +0800 | [diff] [blame] | 1604 | args = OPTIONS.info_dict.get("avb_boot_add_hash_footer_args", "") |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1605 | args = args.strip() |
| 1606 | if args: |
| 1607 | cmd.extend(["--additional_avb_args", args]) |
| 1608 | |
| 1609 | RunAndCheckOutput(cmd) |
| 1610 | |
| 1611 | output_certificate.seek(os.SEEK_SET, 0) |
| 1612 | data = output_certificate.read() |
| 1613 | output_certificate.close() |
| 1614 | return data |
Bowgo Tsai | 27c39b0 | 2021-03-12 21:40:32 +0800 | [diff] [blame] | 1615 | |
| 1616 | |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1617 | def BuildVBMeta(image_path, partitions, name, needed_partitions, |
| 1618 | resolve_rollback_index_location_conflict=False): |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1619 | """Creates a VBMeta image. |
| 1620 | |
| 1621 | It generates the requested VBMeta image. The requested image could be for |
| 1622 | top-level or chained VBMeta image, which is determined based on the name. |
| 1623 | |
| 1624 | Args: |
| 1625 | image_path: The output path for the new VBMeta image. |
| 1626 | partitions: A dict that's keyed by partition names with image paths as |
Hongguang Chen | f23364d | 2020-04-27 18:36:36 -0700 | [diff] [blame] | 1627 | values. Only valid partition names are accepted, as partitions listed |
| 1628 | in common.AVB_PARTITIONS and custom partitions listed in |
| 1629 | OPTIONS.info_dict.get("avb_custom_images_partition_list") |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1630 | name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'. |
| 1631 | needed_partitions: Partitions whose descriptors should be included into the |
| 1632 | generated VBMeta image. |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1633 | resolve_rollback_index_location_conflict: If true, resolve conflicting avb |
| 1634 | rollback index locations by assigning the smallest unused value. |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1635 | |
| 1636 | Raises: |
| 1637 | AssertionError: On invalid input args. |
| 1638 | """ |
| 1639 | avbtool = OPTIONS.info_dict["avb_avbtool"] |
| 1640 | cmd = [avbtool, "make_vbmeta_image", "--output", image_path] |
| 1641 | AppendAVBSigningArgs(cmd, name) |
| 1642 | |
Hongguang Chen | f23364d | 2020-04-27 18:36:36 -0700 | [diff] [blame] | 1643 | custom_partitions = OPTIONS.info_dict.get( |
| 1644 | "avb_custom_images_partition_list", "").strip().split() |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 1645 | custom_avb_partitions = ["vbmeta_" + part for part in OPTIONS.info_dict.get( |
| 1646 | "avb_custom_vbmeta_images_partition_list", "").strip().split()] |
Hongguang Chen | f23364d | 2020-04-27 18:36:36 -0700 | [diff] [blame] | 1647 | |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1648 | avb_partitions = {} |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1649 | for partition, path in partitions.items(): |
| 1650 | if partition not in needed_partitions: |
| 1651 | continue |
| 1652 | assert (partition in AVB_PARTITIONS or |
Hongguang Chen | f23364d | 2020-04-27 18:36:36 -0700 | [diff] [blame] | 1653 | partition in AVB_VBMETA_PARTITIONS or |
Kelvin Zhang | b81b4e3 | 2023-01-10 10:37:56 -0800 | [diff] [blame] | 1654 | partition in custom_avb_partitions or |
Hongguang Chen | f23364d | 2020-04-27 18:36:36 -0700 | [diff] [blame] | 1655 | partition in custom_partitions), \ |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1656 | 'Unknown partition: {}'.format(partition) |
| 1657 | assert os.path.exists(path), \ |
| 1658 | 'Failed to find {} for {}'.format(path, partition) |
Dennis Song | 4aae62e | 2023-10-02 04:31:34 +0000 | [diff] [blame] | 1659 | avb_partitions[partition] = path |
| 1660 | cmd.extend(GetAvbPartitionsArg(avb_partitions, |
| 1661 | resolve_rollback_index_location_conflict)) |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1662 | |
| 1663 | args = OPTIONS.info_dict.get("avb_{}_args".format(name)) |
| 1664 | if args and args.strip(): |
| 1665 | split_args = shlex.split(args) |
| 1666 | for index, arg in enumerate(split_args[:-1]): |
Ivan Lozano | b021b2a | 2020-07-28 09:31:06 -0400 | [diff] [blame] | 1667 | # Check that the image file exists. Some images might be defined |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1668 | # as a path relative to source tree, which may not be available at the |
| 1669 | # same location when running this script (we have the input target_files |
| 1670 | # zip only). For such cases, we additionally scan other locations (e.g. |
| 1671 | # IMAGES/, RADIO/, etc) before bailing out. |
Dennis Song | 6e5e44d | 2023-10-03 02:18:06 +0000 | [diff] [blame] | 1672 | if arg == AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG: |
Tianjie Xu | eaed60c | 2020-03-12 00:33:28 -0700 | [diff] [blame] | 1673 | chained_image = split_args[index + 1] |
| 1674 | if os.path.exists(chained_image): |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1675 | continue |
| 1676 | found = False |
| 1677 | for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']: |
| 1678 | alt_path = os.path.join( |
Tianjie Xu | eaed60c | 2020-03-12 00:33:28 -0700 | [diff] [blame] | 1679 | OPTIONS.input_tmp, dir_name, os.path.basename(chained_image)) |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1680 | if os.path.exists(alt_path): |
| 1681 | split_args[index + 1] = alt_path |
| 1682 | found = True |
| 1683 | break |
Tianjie Xu | eaed60c | 2020-03-12 00:33:28 -0700 | [diff] [blame] | 1684 | assert found, 'Failed to find {}'.format(chained_image) |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1685 | |
| 1686 | split_args = ResolveAVBSigningPathArgs(split_args) |
Daniel Norman | 276f062 | 2019-07-26 14:13:51 -0700 | [diff] [blame] | 1687 | cmd.extend(split_args) |
| 1688 | |
| 1689 | RunAndCheckOutput(cmd) |
| 1690 | |
| 1691 | |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1692 | def _MakeRamdisk(sourcedir, fs_config_file=None, |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1693 | dev_node_file=None, |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1694 | ramdisk_format=RamdiskFormat.GZ): |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1695 | ramdisk_img = tempfile.NamedTemporaryFile() |
| 1696 | |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1697 | cmd = ["mkbootfs"] |
| 1698 | |
| 1699 | if fs_config_file and os.access(fs_config_file, os.F_OK): |
| 1700 | cmd.extend(["-f", fs_config_file]) |
| 1701 | |
| 1702 | if dev_node_file and os.access(dev_node_file, os.F_OK): |
| 1703 | cmd.extend(["-n", dev_node_file]) |
| 1704 | |
| 1705 | cmd.append(os.path.join(sourcedir, "RAMDISK")) |
| 1706 | |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1707 | p1 = Run(cmd, stdout=subprocess.PIPE) |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1708 | if ramdisk_format == RamdiskFormat.LZ4: |
Kelvin Zhang | cff4d76 | 2020-07-29 16:37:51 -0400 | [diff] [blame] | 1709 | p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout, |
J. Avila | 98cd4cc | 2020-06-10 20:09:10 +0000 | [diff] [blame] | 1710 | stdout=ramdisk_img.file.fileno()) |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1711 | elif ramdisk_format == RamdiskFormat.GZ: |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 1712 | p2 = Run(["gzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno()) |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1713 | else: |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 1714 | raise ValueError("Only support lz4 or gzip ramdisk format.") |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1715 | |
| 1716 | p2.wait() |
| 1717 | p1.wait() |
| 1718 | assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,) |
J. Avila | 98cd4cc | 2020-06-10 20:09:10 +0000 | [diff] [blame] | 1719 | assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,) |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 1720 | |
| 1721 | return ramdisk_img |
| 1722 | |
| 1723 | |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1724 | def _BuildBootableImage(image_name, sourcedir, fs_config_file, |
| 1725 | dev_node_file=None, info_dict=None, |
Tao Bao | d42e97e | 2016-11-30 12:11:57 -0800 | [diff] [blame] | 1726 | has_ramdisk=False, two_step_image=False): |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1727 | """Build a bootable image from the specified sourcedir. |
Doug Zongker | e1c31ba | 2009-06-23 17:40:35 -0700 | [diff] [blame] | 1728 | |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1729 | Take a kernel, cmdline, and optionally a ramdisk directory from the input (in |
Tao Bao | d42e97e | 2016-11-30 12:11:57 -0800 | [diff] [blame] | 1730 | 'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if |
| 1731 | we are building a two-step special image (i.e. building a recovery image to |
| 1732 | be loaded into /boot in two-step OTAs). |
| 1733 | |
| 1734 | Return the image data, or None if sourcedir does not appear to contains files |
| 1735 | for building the requested image. |
| 1736 | """ |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1737 | |
Yifan Hong | 63c5ca1 | 2020-10-08 11:54:02 -0700 | [diff] [blame] | 1738 | if info_dict is None: |
| 1739 | info_dict = OPTIONS.info_dict |
| 1740 | |
Steve Muckle | 9793cf6 | 2020-04-08 18:27:00 -0700 | [diff] [blame] | 1741 | # "boot" or "recovery", without extension. |
| 1742 | partition_name = os.path.basename(sourcedir).lower() |
| 1743 | |
Yifan Hong | 63c5ca1 | 2020-10-08 11:54:02 -0700 | [diff] [blame] | 1744 | kernel = None |
Steve Muckle | 9793cf6 | 2020-04-08 18:27:00 -0700 | [diff] [blame] | 1745 | if partition_name == "recovery": |
Yifan Hong | 63c5ca1 | 2020-10-08 11:54:02 -0700 | [diff] [blame] | 1746 | if info_dict.get("exclude_kernel_from_recovery_image") == "true": |
| 1747 | logger.info("Excluded kernel binary from recovery image.") |
| 1748 | else: |
| 1749 | kernel = "kernel" |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1750 | elif partition_name == "init_boot": |
| 1751 | pass |
Steve Muckle | 9793cf6 | 2020-04-08 18:27:00 -0700 | [diff] [blame] | 1752 | else: |
| 1753 | kernel = image_name.replace("boot", "kernel") |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 1754 | kernel = kernel.replace(".img", "") |
Yifan Hong | 63c5ca1 | 2020-10-08 11:54:02 -0700 | [diff] [blame] | 1755 | if kernel and not os.access(os.path.join(sourcedir, kernel), os.F_OK): |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1756 | return None |
| 1757 | |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1758 | kernel_path = os.path.join(sourcedir, kernel) if kernel else None |
| 1759 | |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1760 | if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK): |
Doug Zongker | e1c31ba | 2009-06-23 17:40:35 -0700 | [diff] [blame] | 1761 | return None |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1762 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1763 | img = tempfile.NamedTemporaryFile() |
| 1764 | |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1765 | if has_ramdisk: |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 1766 | ramdisk_format = GetRamdiskFormat(info_dict) |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1767 | ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, dev_node_file, |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 1768 | ramdisk_format=ramdisk_format) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1769 | |
Bjorn Andersson | 612e2cd | 2012-11-25 16:53:44 -0800 | [diff] [blame] | 1770 | # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set |
| 1771 | mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" |
| 1772 | |
Yifan Hong | 63c5ca1 | 2020-10-08 11:54:02 -0700 | [diff] [blame] | 1773 | cmd = [mkbootimg] |
Yi-Yo Chiang | 36054e2 | 2022-01-08 22:29:30 +0800 | [diff] [blame] | 1774 | if kernel_path is not None: |
| 1775 | cmd.extend(["--kernel", kernel_path]) |
Doug Zongker | 38a649f | 2009-06-17 09:07:09 -0700 | [diff] [blame] | 1776 | |
Benoit Fradin | a45a868 | 2014-07-14 21:00:43 +0200 | [diff] [blame] | 1777 | fn = os.path.join(sourcedir, "second") |
| 1778 | if os.access(fn, os.F_OK): |
| 1779 | cmd.append("--second") |
| 1780 | cmd.append(fn) |
| 1781 | |
Hridya Valsaraju | 9683b2f | 2019-01-22 18:08:59 -0800 | [diff] [blame] | 1782 | fn = os.path.join(sourcedir, "dtb") |
| 1783 | if os.access(fn, os.F_OK): |
| 1784 | cmd.append("--dtb") |
| 1785 | cmd.append(fn) |
| 1786 | |
Doug Zongker | 171f1cd | 2009-06-15 22:36:37 -0700 | [diff] [blame] | 1787 | fn = os.path.join(sourcedir, "cmdline") |
| 1788 | if os.access(fn, os.F_OK): |
Doug Zongker | 38a649f | 2009-06-17 09:07:09 -0700 | [diff] [blame] | 1789 | cmd.append("--cmdline") |
| 1790 | cmd.append(open(fn).read().rstrip("\n")) |
| 1791 | |
| 1792 | fn = os.path.join(sourcedir, "base") |
| 1793 | if os.access(fn, os.F_OK): |
| 1794 | cmd.append("--base") |
| 1795 | cmd.append(open(fn).read().rstrip("\n")) |
| 1796 | |
Ying Wang | 4de6b5b | 2010-08-25 14:29:34 -0700 | [diff] [blame] | 1797 | fn = os.path.join(sourcedir, "pagesize") |
| 1798 | if os.access(fn, os.F_OK): |
| 1799 | cmd.append("--pagesize") |
| 1800 | cmd.append(open(fn).read().rstrip("\n")) |
| 1801 | |
Steve Muckle | f84668e | 2020-03-16 19:13:46 -0700 | [diff] [blame] | 1802 | if partition_name == "recovery": |
| 1803 | args = info_dict.get("recovery_mkbootimg_args") |
P.Adarsh Reddy | d8e24ee | 2020-05-04 19:40:16 +0530 | [diff] [blame] | 1804 | if not args: |
| 1805 | # Fall back to "mkbootimg_args" for recovery image |
| 1806 | # in case "recovery_mkbootimg_args" is not set. |
| 1807 | args = info_dict.get("mkbootimg_args") |
Devin Moore | afdd7c7 | 2021-12-13 22:04:08 +0000 | [diff] [blame] | 1808 | elif partition_name == "init_boot": |
| 1809 | args = info_dict.get("mkbootimg_init_args") |
Steve Muckle | f84668e | 2020-03-16 19:13:46 -0700 | [diff] [blame] | 1810 | else: |
| 1811 | args = info_dict.get("mkbootimg_args") |
Doug Zongker | d513160 | 2012-08-02 14:46:42 -0700 | [diff] [blame] | 1812 | if args and args.strip(): |
Jianxun Zhang | 0984949 | 2013-04-17 15:19:19 -0700 | [diff] [blame] | 1813 | cmd.extend(shlex.split(args)) |
Doug Zongker | d513160 | 2012-08-02 14:46:42 -0700 | [diff] [blame] | 1814 | |
Yi-Yo Chiang | 24da1a4 | 2022-02-22 19:51:15 +0800 | [diff] [blame] | 1815 | args = info_dict.get("mkbootimg_version_args") |
| 1816 | if args and args.strip(): |
| 1817 | cmd.extend(shlex.split(args)) |
Sami Tolvanen | 3303d90 | 2016-03-15 16:49:30 +0000 | [diff] [blame] | 1818 | |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1819 | if has_ramdisk: |
| 1820 | cmd.extend(["--ramdisk", ramdisk_img.name]) |
| 1821 | |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1822 | img_unsigned = None |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 1823 | if info_dict.get("vboot"): |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1824 | img_unsigned = tempfile.NamedTemporaryFile() |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1825 | cmd.extend(["--output", img_unsigned.name]) |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1826 | else: |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1827 | cmd.extend(["--output", img.name]) |
Doug Zongker | 38a649f | 2009-06-17 09:07:09 -0700 | [diff] [blame] | 1828 | |
Chen, ZhiminX | 752439b | 2018-09-23 22:10:47 +0800 | [diff] [blame] | 1829 | if partition_name == "recovery": |
| 1830 | if info_dict.get("include_recovery_dtbo") == "true": |
| 1831 | fn = os.path.join(sourcedir, "recovery_dtbo") |
| 1832 | cmd.extend(["--recovery_dtbo", fn]) |
| 1833 | if info_dict.get("include_recovery_acpio") == "true": |
| 1834 | fn = os.path.join(sourcedir, "recovery_acpio") |
| 1835 | cmd.extend(["--recovery_acpio", fn]) |
Hridya Valsaraju | e74a38b | 2018-03-21 12:15:11 -0700 | [diff] [blame] | 1836 | |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 1837 | RunAndCheckOutput(cmd) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1838 | |
Yi-Yo Chiang | 24da1a4 | 2022-02-22 19:51:15 +0800 | [diff] [blame] | 1839 | if _HasGkiCertificationArgs(): |
| 1840 | if not os.path.exists(img.name): |
| 1841 | raise ValueError("Cannot find GKI boot.img") |
| 1842 | if kernel_path is None or not os.path.exists(kernel_path): |
| 1843 | raise ValueError("Cannot find GKI kernel.img") |
| 1844 | |
| 1845 | # Certify GKI images. |
| 1846 | boot_signature_bytes = b'' |
| 1847 | boot_signature_bytes += _GenerateGkiCertificate(img.name, "boot") |
| 1848 | boot_signature_bytes += _GenerateGkiCertificate( |
| 1849 | kernel_path, "generic_kernel") |
| 1850 | |
| 1851 | BOOT_SIGNATURE_SIZE = 16 * 1024 |
| 1852 | if len(boot_signature_bytes) > BOOT_SIGNATURE_SIZE: |
| 1853 | raise ValueError( |
| 1854 | f"GKI boot_signature size must be <= {BOOT_SIGNATURE_SIZE}") |
| 1855 | boot_signature_bytes += ( |
| 1856 | b'\0' * (BOOT_SIGNATURE_SIZE - len(boot_signature_bytes))) |
| 1857 | assert len(boot_signature_bytes) == BOOT_SIGNATURE_SIZE |
| 1858 | |
| 1859 | with open(img.name, 'ab') as f: |
| 1860 | f.write(boot_signature_bytes) |
| 1861 | |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1862 | # Sign the image if vboot is non-empty. |
hungweichen | 22e3b01 | 2022-08-19 06:35:43 +0000 | [diff] [blame] | 1863 | if info_dict.get("vboot"): |
Tao Bao | bf70c31 | 2017-07-11 17:27:55 -0700 | [diff] [blame] | 1864 | path = "/" + partition_name |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1865 | img_keyblock = tempfile.NamedTemporaryFile() |
Tao Bao | 4f104d1 | 2017-02-17 23:21:31 -0800 | [diff] [blame] | 1866 | # We have switched from the prebuilt futility binary to using the tool |
| 1867 | # (futility-host) built from the source. Override the setting in the old |
| 1868 | # TF.zip. |
| 1869 | futility = info_dict["futility"] |
| 1870 | if futility.startswith("prebuilts/"): |
| 1871 | futility = "futility-host" |
| 1872 | cmd = [info_dict["vboot_signer_cmd"], futility, |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1873 | img_unsigned.name, info_dict["vboot_key"] + ".vbpubk", |
Furquan Shaikh | 852b8de | 2015-08-10 11:43:45 -0700 | [diff] [blame] | 1874 | info_dict["vboot_key"] + ".vbprivk", |
| 1875 | info_dict["vboot_subkey"] + ".vbprivk", |
| 1876 | img_keyblock.name, |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1877 | img.name] |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 1878 | RunAndCheckOutput(cmd) |
Tao Bao | d95e9fd | 2015-03-29 23:07:41 -0700 | [diff] [blame] | 1879 | |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 1880 | # Clean up the temp files. |
| 1881 | img_unsigned.close() |
| 1882 | img_keyblock.close() |
| 1883 | |
David Zeuthen | 8fecb28 | 2017-12-01 16:24:01 -0500 | [diff] [blame] | 1884 | # AVB: if enabled, calculate and add hash to boot.img or recovery.img. |
Bowgo Tsai | 3e599ea | 2017-05-26 18:30:04 +0800 | [diff] [blame] | 1885 | if info_dict.get("avb_enable") == "true": |
Tao Bao | f88e0ce | 2019-03-18 14:01:38 -0700 | [diff] [blame] | 1886 | avbtool = info_dict["avb_avbtool"] |
Steve Muckle | 903a1ca | 2020-05-07 17:32:10 -0700 | [diff] [blame] | 1887 | if partition_name == "recovery": |
| 1888 | part_size = info_dict["recovery_size"] |
| 1889 | else: |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 1890 | part_size = info_dict[image_name.replace(".img", "_size")] |
David Zeuthen | 2ce63ed | 2016-09-15 13:43:54 -0400 | [diff] [blame] | 1891 | cmd = [avbtool, "add_hash_footer", "--image", img.name, |
Tao Bao | bf70c31 | 2017-07-11 17:27:55 -0700 | [diff] [blame] | 1892 | "--partition_size", str(part_size), "--partition_name", |
| 1893 | partition_name] |
Kelvin Zhang | de53f7d | 2023-10-03 12:21:28 -0700 | [diff] [blame] | 1894 | salt = None |
| 1895 | if kernel_path is not None: |
| 1896 | with open(kernel_path, "rb") as fp: |
| 1897 | salt = sha256(fp.read()).hexdigest() |
| 1898 | AppendAVBSigningArgs(cmd, partition_name, salt) |
David Zeuthen | 8fecb28 | 2017-12-01 16:24:01 -0500 | [diff] [blame] | 1899 | args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args") |
David Zeuthen | 2ce63ed | 2016-09-15 13:43:54 -0400 | [diff] [blame] | 1900 | if args and args.strip(): |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1901 | split_args = ResolveAVBSigningPathArgs(shlex.split(args)) |
| 1902 | cmd.extend(split_args) |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 1903 | RunAndCheckOutput(cmd) |
David Zeuthen | d995f4b | 2016-01-29 16:59:17 -0500 | [diff] [blame] | 1904 | |
| 1905 | img.seek(os.SEEK_SET, 0) |
| 1906 | data = img.read() |
| 1907 | |
| 1908 | if has_ramdisk: |
| 1909 | ramdisk_img.close() |
| 1910 | img.close() |
| 1911 | |
| 1912 | return data |
| 1913 | |
| 1914 | |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1915 | def _SignBootableImage(image_path, prebuilt_name, partition_name, |
| 1916 | info_dict=None): |
| 1917 | """Performs AVB signing for a prebuilt boot.img. |
| 1918 | |
| 1919 | Args: |
| 1920 | image_path: The full path of the image, e.g., /path/to/boot.img. |
| 1921 | prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img, |
Bowgo Tsai | 88fc2bd | 2022-01-05 20:19:25 +0800 | [diff] [blame] | 1922 | boot-5.10.img, recovery.img or init_boot.img. |
| 1923 | partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'. |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1924 | info_dict: The information dict read from misc_info.txt. |
| 1925 | """ |
| 1926 | if info_dict is None: |
| 1927 | info_dict = OPTIONS.info_dict |
| 1928 | |
| 1929 | # AVB: if enabled, calculate and add hash to boot.img or recovery.img. |
| 1930 | if info_dict.get("avb_enable") == "true": |
| 1931 | avbtool = info_dict["avb_avbtool"] |
| 1932 | if partition_name == "recovery": |
| 1933 | part_size = info_dict["recovery_size"] |
| 1934 | else: |
| 1935 | part_size = info_dict[prebuilt_name.replace(".img", "_size")] |
| 1936 | |
| 1937 | cmd = [avbtool, "add_hash_footer", "--image", image_path, |
| 1938 | "--partition_size", str(part_size), "--partition_name", |
| 1939 | partition_name] |
Kelvin Zhang | 160762a | 2023-10-17 12:27:56 -0700 | [diff] [blame] | 1940 | # Use sha256 of the kernel as salt for reproducible builds |
| 1941 | with tempfile.TemporaryDirectory() as tmpdir: |
| 1942 | RunAndCheckOutput(["unpack_bootimg", "--boot_img", image_path, "--out", tmpdir]) |
| 1943 | for filename in ["kernel", "ramdisk", "vendor_ramdisk00"]: |
| 1944 | path = os.path.join(tmpdir, filename) |
| 1945 | if os.path.exists(path) and os.path.getsize(path): |
Kelvin Zhang | 9f9ac4e | 2023-11-01 10:12:03 -0700 | [diff] [blame] | 1946 | print("Using {} as salt for avb footer of {}".format( |
| 1947 | filename, partition_name)) |
Kelvin Zhang | 160762a | 2023-10-17 12:27:56 -0700 | [diff] [blame] | 1948 | with open(path, "rb") as fp: |
| 1949 | salt = sha256(fp.read()).hexdigest() |
Kelvin Zhang | 9f9ac4e | 2023-11-01 10:12:03 -0700 | [diff] [blame] | 1950 | break |
Kelvin Zhang | 160762a | 2023-10-17 12:27:56 -0700 | [diff] [blame] | 1951 | AppendAVBSigningArgs(cmd, partition_name, salt) |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1952 | args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args") |
| 1953 | if args and args.strip(): |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 1954 | split_args = ResolveAVBSigningPathArgs(shlex.split(args)) |
| 1955 | cmd.extend(split_args) |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 1956 | RunAndCheckOutput(cmd) |
| 1957 | |
| 1958 | |
Bowgo Tsai | 88fc2bd | 2022-01-05 20:19:25 +0800 | [diff] [blame] | 1959 | def HasRamdisk(partition_name, info_dict=None): |
| 1960 | """Returns true/false to see if a bootable image should have a ramdisk. |
| 1961 | |
| 1962 | Args: |
| 1963 | partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'. |
| 1964 | info_dict: The information dict read from misc_info.txt. |
| 1965 | """ |
| 1966 | if info_dict is None: |
| 1967 | info_dict = OPTIONS.info_dict |
| 1968 | |
| 1969 | if partition_name != "boot": |
| 1970 | return True # init_boot.img or recovery.img has a ramdisk. |
| 1971 | |
| 1972 | if info_dict.get("recovery_as_boot") == "true": |
| 1973 | return True # the recovery-as-boot boot.img has a RECOVERY ramdisk. |
| 1974 | |
Bowgo Tsai | 85578e0 | 2022-04-19 10:50:59 +0800 | [diff] [blame] | 1975 | if info_dict.get("gki_boot_image_without_ramdisk") == "true": |
| 1976 | return False # A GKI boot.img has no ramdisk since Android-13. |
| 1977 | |
Bowgo Tsai | 88fc2bd | 2022-01-05 20:19:25 +0800 | [diff] [blame] | 1978 | if info_dict.get("system_root_image") == "true": |
| 1979 | # The ramdisk content is merged into the system.img, so there is NO |
| 1980 | # ramdisk in the boot.img or boot-<kernel version>.img. |
| 1981 | return False |
| 1982 | |
| 1983 | if info_dict.get("init_boot") == "true": |
| 1984 | # The ramdisk is moved to the init_boot.img, so there is NO |
| 1985 | # ramdisk in the boot.img or boot-<kernel version>.img. |
| 1986 | return False |
| 1987 | |
| 1988 | return True |
| 1989 | |
| 1990 | |
Doug Zongker | d513160 | 2012-08-02 14:46:42 -0700 | [diff] [blame] | 1991 | def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir, |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 1992 | info_dict=None, two_step_image=False, |
| 1993 | dev_nodes=False): |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 1994 | """Return a File object with the desired bootable image. |
| 1995 | |
| 1996 | Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name', |
| 1997 | otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from |
| 1998 | the source files in 'unpack_dir'/'tree_subdir'.""" |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 1999 | |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 2000 | if info_dict is None: |
| 2001 | info_dict = OPTIONS.info_dict |
| 2002 | |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2003 | prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name) |
| 2004 | if os.path.exists(prebuilt_path): |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 2005 | logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name) |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2006 | return File.FromLocalFile(name, prebuilt_path) |
Doug Zongker | 6f1d031 | 2014-08-22 08:07:12 -0700 | [diff] [blame] | 2007 | |
| 2008 | prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) |
| 2009 | if os.path.exists(prebuilt_path): |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 2010 | logger.info("using prebuilt %s from IMAGES...", prebuilt_name) |
Doug Zongker | 6f1d031 | 2014-08-22 08:07:12 -0700 | [diff] [blame] | 2011 | return File.FromLocalFile(name, prebuilt_path) |
| 2012 | |
Bowgo Tsai | 88fc2bd | 2022-01-05 20:19:25 +0800 | [diff] [blame] | 2013 | partition_name = tree_subdir.lower() |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 2014 | prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name) |
| 2015 | if os.path.exists(prebuilt_path): |
| 2016 | logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name) |
| 2017 | signed_img = MakeTempFile() |
| 2018 | shutil.copy(prebuilt_path, signed_img) |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 2019 | _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict) |
| 2020 | return File.FromLocalFile(name, signed_img) |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 2021 | |
Bowgo Tsai | cf9ead8 | 2021-05-20 00:14:42 +0800 | [diff] [blame] | 2022 | logger.info("building image from target_files %s...", tree_subdir) |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 2023 | |
Bowgo Tsai | 88fc2bd | 2022-01-05 20:19:25 +0800 | [diff] [blame] | 2024 | has_ramdisk = HasRamdisk(partition_name, info_dict) |
Tao Bao | 7a5bf8a | 2015-07-21 18:01:20 -0700 | [diff] [blame] | 2025 | |
Doug Zongker | 6f1d031 | 2014-08-22 08:07:12 -0700 | [diff] [blame] | 2026 | fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt" |
Steve Muckle | 9793cf6 | 2020-04-08 18:27:00 -0700 | [diff] [blame] | 2027 | data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir), |
David Zeuthen | 2ce63ed | 2016-09-15 13:43:54 -0400 | [diff] [blame] | 2028 | os.path.join(unpack_dir, fs_config), |
Vincent Donnefort | 6e861e9 | 2023-02-17 10:12:57 +0000 | [diff] [blame] | 2029 | os.path.join(unpack_dir, 'META/ramdisk_node_list') |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 2030 | if dev_nodes else None, |
Tao Bao | d42e97e | 2016-11-30 12:11:57 -0800 | [diff] [blame] | 2031 | info_dict, has_ramdisk, two_step_image) |
Doug Zongker | 6f1d031 | 2014-08-22 08:07:12 -0700 | [diff] [blame] | 2032 | if data: |
| 2033 | return File(name, data) |
| 2034 | return None |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2035 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2036 | |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 2037 | def _BuildVendorBootImage(sourcedir, partition_name, info_dict=None): |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2038 | """Build a vendor boot image from the specified sourcedir. |
| 2039 | |
| 2040 | Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and |
| 2041 | turn them into a vendor boot image. |
| 2042 | |
| 2043 | Return the image data, or None if sourcedir does not appear to contains files |
| 2044 | for building the requested image. |
| 2045 | """ |
| 2046 | |
| 2047 | if info_dict is None: |
| 2048 | info_dict = OPTIONS.info_dict |
| 2049 | |
| 2050 | img = tempfile.NamedTemporaryFile() |
| 2051 | |
TJ Rhoades | 6f488e9 | 2022-05-01 22:16:22 -0700 | [diff] [blame] | 2052 | ramdisk_format = GetRamdiskFormat(info_dict) |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 2053 | ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format) |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2054 | |
| 2055 | # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set |
| 2056 | mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" |
| 2057 | |
| 2058 | cmd = [mkbootimg] |
| 2059 | |
| 2060 | fn = os.path.join(sourcedir, "dtb") |
| 2061 | if os.access(fn, os.F_OK): |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2062 | has_vendor_kernel_boot = (info_dict.get( |
| 2063 | "vendor_kernel_boot", "").lower() == "true") |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 2064 | |
| 2065 | # Pack dtb into vendor_kernel_boot if building vendor_kernel_boot. |
| 2066 | # Otherwise pack dtb into vendor_boot. |
| 2067 | if not has_vendor_kernel_boot or partition_name == "vendor_kernel_boot": |
| 2068 | cmd.append("--dtb") |
| 2069 | cmd.append(fn) |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2070 | |
| 2071 | fn = os.path.join(sourcedir, "vendor_cmdline") |
| 2072 | if os.access(fn, os.F_OK): |
| 2073 | cmd.append("--vendor_cmdline") |
| 2074 | cmd.append(open(fn).read().rstrip("\n")) |
| 2075 | |
| 2076 | fn = os.path.join(sourcedir, "base") |
| 2077 | if os.access(fn, os.F_OK): |
| 2078 | cmd.append("--base") |
| 2079 | cmd.append(open(fn).read().rstrip("\n")) |
| 2080 | |
| 2081 | fn = os.path.join(sourcedir, "pagesize") |
| 2082 | if os.access(fn, os.F_OK): |
| 2083 | cmd.append("--pagesize") |
| 2084 | cmd.append(open(fn).read().rstrip("\n")) |
| 2085 | |
| 2086 | args = info_dict.get("mkbootimg_args") |
| 2087 | if args and args.strip(): |
| 2088 | cmd.extend(shlex.split(args)) |
| 2089 | |
| 2090 | args = info_dict.get("mkbootimg_version_args") |
| 2091 | if args and args.strip(): |
| 2092 | cmd.extend(shlex.split(args)) |
| 2093 | |
| 2094 | cmd.extend(["--vendor_ramdisk", ramdisk_img.name]) |
| 2095 | cmd.extend(["--vendor_boot", img.name]) |
| 2096 | |
Devin Moore | 5050901 | 2021-01-13 10:45:04 -0800 | [diff] [blame] | 2097 | fn = os.path.join(sourcedir, "vendor_bootconfig") |
| 2098 | if os.access(fn, os.F_OK): |
| 2099 | cmd.append("--vendor_bootconfig") |
| 2100 | cmd.append(fn) |
| 2101 | |
Yo Chiang | d21e7dc | 2020-12-10 18:42:47 +0800 | [diff] [blame] | 2102 | ramdisk_fragment_imgs = [] |
| 2103 | fn = os.path.join(sourcedir, "vendor_ramdisk_fragments") |
| 2104 | if os.access(fn, os.F_OK): |
| 2105 | ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n")) |
| 2106 | for ramdisk_fragment in ramdisk_fragments: |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 2107 | fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", |
| 2108 | ramdisk_fragment, "mkbootimg_args") |
Yo Chiang | d21e7dc | 2020-12-10 18:42:47 +0800 | [diff] [blame] | 2109 | cmd.extend(shlex.split(open(fn).read().rstrip("\n"))) |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 2110 | fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", |
| 2111 | ramdisk_fragment, "prebuilt_ramdisk") |
Yo Chiang | d21e7dc | 2020-12-10 18:42:47 +0800 | [diff] [blame] | 2112 | # Use prebuilt image if found, else create ramdisk from supplied files. |
| 2113 | if os.access(fn, os.F_OK): |
| 2114 | ramdisk_fragment_pathname = fn |
| 2115 | else: |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 2116 | ramdisk_fragment_root = os.path.join( |
| 2117 | sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment) |
jiajia tang | 836f76b | 2021-04-02 14:48:26 +0800 | [diff] [blame] | 2118 | ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root, |
| 2119 | ramdisk_format=ramdisk_format) |
Yo Chiang | d21e7dc | 2020-12-10 18:42:47 +0800 | [diff] [blame] | 2120 | ramdisk_fragment_imgs.append(ramdisk_fragment_img) |
| 2121 | ramdisk_fragment_pathname = ramdisk_fragment_img.name |
| 2122 | cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname]) |
| 2123 | |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2124 | RunAndCheckOutput(cmd) |
| 2125 | |
| 2126 | # AVB: if enabled, calculate and add hash. |
| 2127 | if info_dict.get("avb_enable") == "true": |
| 2128 | avbtool = info_dict["avb_avbtool"] |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 2129 | part_size = info_dict[f'{partition_name}_size'] |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2130 | cmd = [avbtool, "add_hash_footer", "--image", img.name, |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 2131 | "--partition_size", str(part_size), "--partition_name", partition_name] |
| 2132 | AppendAVBSigningArgs(cmd, partition_name) |
| 2133 | args = info_dict.get(f'avb_{partition_name}_add_hash_footer_args') |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2134 | if args and args.strip(): |
zhangyongpeng | 7075697 | 2023-04-12 15:31:33 +0800 | [diff] [blame] | 2135 | split_args = ResolveAVBSigningPathArgs(shlex.split(args)) |
| 2136 | cmd.extend(split_args) |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2137 | RunAndCheckOutput(cmd) |
| 2138 | |
| 2139 | img.seek(os.SEEK_SET, 0) |
| 2140 | data = img.read() |
| 2141 | |
Yo Chiang | d21e7dc | 2020-12-10 18:42:47 +0800 | [diff] [blame] | 2142 | for f in ramdisk_fragment_imgs: |
| 2143 | f.close() |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2144 | ramdisk_img.close() |
| 2145 | img.close() |
| 2146 | |
| 2147 | return data |
| 2148 | |
| 2149 | |
| 2150 | def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir, |
| 2151 | info_dict=None): |
| 2152 | """Return a File object with the desired vendor boot image. |
| 2153 | |
| 2154 | Look for it under 'unpack_dir'/IMAGES, otherwise construct it from |
| 2155 | the source files in 'unpack_dir'/'tree_subdir'.""" |
| 2156 | |
| 2157 | prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) |
| 2158 | if os.path.exists(prebuilt_path): |
| 2159 | logger.info("using prebuilt %s from IMAGES...", prebuilt_name) |
| 2160 | return File.FromLocalFile(name, prebuilt_path) |
| 2161 | |
| 2162 | logger.info("building image from target_files %s...", tree_subdir) |
| 2163 | |
| 2164 | if info_dict is None: |
| 2165 | info_dict = OPTIONS.info_dict |
| 2166 | |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2167 | data = _BuildVendorBootImage( |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 2168 | os.path.join(unpack_dir, tree_subdir), "vendor_boot", info_dict) |
| 2169 | if data: |
| 2170 | return File(name, data) |
| 2171 | return None |
| 2172 | |
| 2173 | |
| 2174 | def GetVendorKernelBootImage(name, prebuilt_name, unpack_dir, tree_subdir, |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2175 | info_dict=None): |
Lucas Wei | 0323025 | 2022-04-18 16:00:40 +0800 | [diff] [blame] | 2176 | """Return a File object with the desired vendor kernel boot image. |
| 2177 | |
| 2178 | Look for it under 'unpack_dir'/IMAGES, otherwise construct it from |
| 2179 | the source files in 'unpack_dir'/'tree_subdir'.""" |
| 2180 | |
| 2181 | prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) |
| 2182 | if os.path.exists(prebuilt_path): |
| 2183 | logger.info("using prebuilt %s from IMAGES...", prebuilt_name) |
| 2184 | return File.FromLocalFile(name, prebuilt_path) |
| 2185 | |
| 2186 | logger.info("building image from target_files %s...", tree_subdir) |
| 2187 | |
| 2188 | if info_dict is None: |
| 2189 | info_dict = OPTIONS.info_dict |
| 2190 | |
| 2191 | data = _BuildVendorBootImage( |
| 2192 | os.path.join(unpack_dir, tree_subdir), "vendor_kernel_boot", info_dict) |
Steve Muckle | e1b1086 | 2019-07-10 10:49:37 -0700 | [diff] [blame] | 2193 | if data: |
| 2194 | return File(name, data) |
| 2195 | return None |
| 2196 | |
| 2197 | |
Narayan Kamath | a07bf04 | 2017-08-14 14:49:21 +0100 | [diff] [blame] | 2198 | def Gunzip(in_filename, out_filename): |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 2199 | """Gunzips the given gzip compressed file to a given output file.""" |
| 2200 | with gzip.open(in_filename, "rb") as in_file, \ |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2201 | open(out_filename, "wb") as out_file: |
Narayan Kamath | a07bf04 | 2017-08-14 14:49:21 +0100 | [diff] [blame] | 2202 | shutil.copyfileobj(in_file, out_file) |
| 2203 | |
| 2204 | |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2205 | def UnzipSingleFile(input_zip: zipfile.ZipFile, info: zipfile.ZipInfo, dirname: str): |
| 2206 | # According to https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/6297838#6297838 |
| 2207 | # higher bits of |external_attr| are unix file permission and types |
| 2208 | unix_filetype = info.external_attr >> 16 |
Kelvin Zhang | 4cb28f6 | 2023-07-10 12:30:53 -0700 | [diff] [blame] | 2209 | file_perm = unix_filetype & 0o777 |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2210 | |
| 2211 | def CheckMask(a, mask): |
| 2212 | return (a & mask) == mask |
| 2213 | |
| 2214 | def IsSymlink(a): |
| 2215 | return CheckMask(a, stat.S_IFLNK) |
Kelvin Zhang | 4cb28f6 | 2023-07-10 12:30:53 -0700 | [diff] [blame] | 2216 | |
| 2217 | def IsDir(a): |
| 2218 | return CheckMask(a, stat.S_IFDIR) |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2219 | # python3.11 zipfile implementation doesn't handle symlink correctly |
| 2220 | if not IsSymlink(unix_filetype): |
Kelvin Zhang | 4cb28f6 | 2023-07-10 12:30:53 -0700 | [diff] [blame] | 2221 | target = input_zip.extract(info, dirname) |
| 2222 | # We want to ensure that the file is at least read/writable by owner and readable by all users |
| 2223 | if IsDir(unix_filetype): |
| 2224 | os.chmod(target, file_perm | 0o755) |
| 2225 | else: |
| 2226 | os.chmod(target, file_perm | 0o644) |
| 2227 | return target |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2228 | if dirname is None: |
| 2229 | dirname = os.getcwd() |
| 2230 | target = os.path.join(dirname, info.filename) |
| 2231 | os.makedirs(os.path.dirname(target), exist_ok=True) |
Kelvin Zhang | 4cb28f6 | 2023-07-10 12:30:53 -0700 | [diff] [blame] | 2232 | if os.path.exists(target): |
| 2233 | os.unlink(target) |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2234 | os.symlink(input_zip.read(info).decode(), target) |
Kelvin Zhang | 4cb28f6 | 2023-07-10 12:30:53 -0700 | [diff] [blame] | 2235 | return target |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2236 | |
| 2237 | |
Tao Bao | 0ff15de | 2019-03-20 11:26:06 -0700 | [diff] [blame] | 2238 | def UnzipToDir(filename, dirname, patterns=None): |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2239 | """Unzips the archive to the given directory. |
| 2240 | |
| 2241 | Args: |
| 2242 | filename: The name of the zip file to unzip. |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2243 | dirname: Where the unziped files will land. |
Tao Bao | 0ff15de | 2019-03-20 11:26:06 -0700 | [diff] [blame] | 2244 | patterns: Files to unzip from the archive. If omitted, will unzip the entire |
| 2245 | archvie. Non-matching patterns will be filtered out. If there's no match |
| 2246 | after the filtering, no file will be unzipped. |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2247 | """ |
Kelvin Zhang | 7c9205b | 2023-06-05 09:58:16 -0700 | [diff] [blame] | 2248 | with zipfile.ZipFile(filename, allowZip64=True, mode="r") as input_zip: |
Tao Bao | 0ff15de | 2019-03-20 11:26:06 -0700 | [diff] [blame] | 2249 | # Filter out non-matching patterns. unzip will complain otherwise. |
Kelvin Zhang | 38d0c37 | 2023-06-14 12:53:29 -0700 | [diff] [blame] | 2250 | entries = input_zip.infolist() |
| 2251 | # b/283033491 |
| 2252 | # Per https://en.wikipedia.org/wiki/ZIP_(file_format)#Central_directory_file_header |
| 2253 | # In zip64 mode, central directory record's header_offset field might be |
| 2254 | # set to 0xFFFFFFFF if header offset is > 2^32. In this case, the extra |
| 2255 | # fields will contain an 8 byte little endian integer at offset 20 |
| 2256 | # to indicate the actual local header offset. |
| 2257 | # As of python3.11, python does not handle zip64 central directories |
| 2258 | # correctly, so we will manually do the parsing here. |
Kelvin Zhang | 1e77424 | 2023-06-17 09:18:15 -0700 | [diff] [blame] | 2259 | |
| 2260 | # ZIP64 central directory extra field has two required fields: |
| 2261 | # 2 bytes header ID and 2 bytes size field. Thes two require fields have |
| 2262 | # a total size of 4 bytes. Then it has three other 8 bytes field, followed |
| 2263 | # by a 4 byte disk number field. The last disk number field is not required |
| 2264 | # to be present, but if it is present, the total size of extra field will be |
| 2265 | # divisible by 8(because 2+2+4+8*n is always going to be multiple of 8) |
| 2266 | # Most extra fields are optional, but when they appear, their must appear |
| 2267 | # in the order defined by zip64 spec. Since file header offset is the 2nd |
| 2268 | # to last field in zip64 spec, it will only be at last 8 bytes or last 12-4 |
| 2269 | # bytes, depending on whether disk number is present. |
Kelvin Zhang | 38d0c37 | 2023-06-14 12:53:29 -0700 | [diff] [blame] | 2270 | for entry in entries: |
Kelvin Zhang | 1e77424 | 2023-06-17 09:18:15 -0700 | [diff] [blame] | 2271 | if entry.header_offset == 0xFFFFFFFF: |
| 2272 | if len(entry.extra) % 8 == 0: |
| 2273 | entry.header_offset = int.from_bytes(entry.extra[-12:-4], "little") |
| 2274 | else: |
| 2275 | entry.header_offset = int.from_bytes(entry.extra[-8:], "little") |
Kelvin Zhang | 7c9205b | 2023-06-05 09:58:16 -0700 | [diff] [blame] | 2276 | if patterns is not None: |
Kelvin Zhang | 38d0c37 | 2023-06-14 12:53:29 -0700 | [diff] [blame] | 2277 | filtered = [info for info in entries if any( |
| 2278 | [fnmatch.fnmatch(info.filename, p) for p in patterns])] |
Tao Bao | 0ff15de | 2019-03-20 11:26:06 -0700 | [diff] [blame] | 2279 | |
Kelvin Zhang | 7c9205b | 2023-06-05 09:58:16 -0700 | [diff] [blame] | 2280 | # There isn't any matching files. Don't unzip anything. |
| 2281 | if not filtered: |
| 2282 | return |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2283 | for info in filtered: |
| 2284 | UnzipSingleFile(input_zip, info, dirname) |
Kelvin Zhang | 7c9205b | 2023-06-05 09:58:16 -0700 | [diff] [blame] | 2285 | else: |
Kelvin Zhang | e473ce9 | 2023-06-21 13:06:59 -0700 | [diff] [blame] | 2286 | for info in entries: |
| 2287 | UnzipSingleFile(input_zip, info, dirname) |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2288 | |
| 2289 | |
Daniel Norman | 78554ea | 2021-09-14 10:29:38 -0700 | [diff] [blame] | 2290 | def UnzipTemp(filename, patterns=None): |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2291 | """Unzips the given archive into a temporary directory and returns the name. |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2292 | |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2293 | Args: |
| 2294 | filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into |
| 2295 | a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES. |
| 2296 | |
Daniel Norman | 78554ea | 2021-09-14 10:29:38 -0700 | [diff] [blame] | 2297 | patterns: Files to unzip from the archive. If omitted, will unzip the entire |
Bill Peckham | 8ff3fbd | 2019-02-22 10:57:43 -0800 | [diff] [blame] | 2298 | archvie. |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2299 | |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2300 | Returns: |
Tao Bao | dba59ee | 2018-01-09 13:21:02 -0800 | [diff] [blame] | 2301 | The name of the temporary directory. |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2302 | """ |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2303 | |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2304 | tmp = MakeTempDir(prefix="targetfiles-") |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2305 | m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE) |
| 2306 | if m: |
Daniel Norman | 78554ea | 2021-09-14 10:29:38 -0700 | [diff] [blame] | 2307 | UnzipToDir(m.group(1), tmp, patterns) |
| 2308 | UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), patterns) |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2309 | filename = m.group(1) |
| 2310 | else: |
Daniel Norman | 78554ea | 2021-09-14 10:29:38 -0700 | [diff] [blame] | 2311 | UnzipToDir(filename, tmp, patterns) |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 2312 | |
Tao Bao | dba59ee | 2018-01-09 13:21:02 -0800 | [diff] [blame] | 2313 | return tmp |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2314 | |
| 2315 | |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2316 | def GetUserImage(which, tmpdir, input_zip, |
| 2317 | info_dict=None, |
| 2318 | allow_shared_blocks=None, |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2319 | reset_file_map=False): |
| 2320 | """Returns an Image object suitable for passing to BlockImageDiff. |
| 2321 | |
| 2322 | This function loads the specified image from the given path. If the specified |
| 2323 | image is sparse, it also performs additional processing for OTA purpose. For |
| 2324 | example, it always adds block 0 to clobbered blocks list. It also detects |
| 2325 | files that cannot be reconstructed from the block list, for whom we should |
| 2326 | avoid applying imgdiff. |
| 2327 | |
| 2328 | Args: |
| 2329 | which: The partition name. |
| 2330 | tmpdir: The directory that contains the prebuilt image and block map file. |
| 2331 | input_zip: The target-files ZIP archive. |
| 2332 | info_dict: The dict to be looked up for relevant info. |
| 2333 | allow_shared_blocks: If image is sparse, whether having shared blocks is |
| 2334 | allowed. If none, it is looked up from info_dict. |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2335 | reset_file_map: If true and image is sparse, reset file map before returning |
| 2336 | the image. |
| 2337 | Returns: |
| 2338 | A Image object. If it is a sparse image and reset_file_map is False, the |
| 2339 | image will have file_map info loaded. |
| 2340 | """ |
Tao Bao | c1a1ec3 | 2019-06-18 16:29:37 -0700 | [diff] [blame] | 2341 | if info_dict is None: |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2342 | info_dict = LoadInfoDict(input_zip) |
| 2343 | |
Kelvin Zhang | 0452128 | 2023-03-02 09:42:52 -0800 | [diff] [blame] | 2344 | is_sparse = IsSparseImage(os.path.join(tmpdir, "IMAGES", which + ".img")) |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2345 | |
| 2346 | # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain |
| 2347 | # shared blocks (i.e. some blocks will show up in multiple files' block |
| 2348 | # list). We can only allocate such shared blocks to the first "owner", and |
| 2349 | # disable imgdiff for all later occurrences. |
| 2350 | if allow_shared_blocks is None: |
| 2351 | allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true" |
| 2352 | |
| 2353 | if is_sparse: |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2354 | img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks) |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2355 | if reset_file_map: |
| 2356 | img.ResetFileMap() |
| 2357 | return img |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2358 | return GetNonSparseImage(which, tmpdir) |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2359 | |
| 2360 | |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2361 | def GetNonSparseImage(which, tmpdir): |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2362 | """Returns a Image object suitable for passing to BlockImageDiff. |
| 2363 | |
| 2364 | This function loads the specified non-sparse image from the given path. |
| 2365 | |
| 2366 | Args: |
| 2367 | which: The partition name. |
| 2368 | tmpdir: The directory that contains the prebuilt image and block map file. |
| 2369 | Returns: |
| 2370 | A Image object. |
| 2371 | """ |
| 2372 | path = os.path.join(tmpdir, "IMAGES", which + ".img") |
| 2373 | mappath = os.path.join(tmpdir, "IMAGES", which + ".map") |
| 2374 | |
| 2375 | # The image and map files must have been created prior to calling |
| 2376 | # ota_from_target_files.py (since LMP). |
| 2377 | assert os.path.exists(path) and os.path.exists(mappath) |
| 2378 | |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2379 | return images.FileImage(path) |
Tianjie Xu | 41976c7 | 2019-07-03 13:57:01 -0700 | [diff] [blame] | 2380 | |
Yifan Hong | 8a66a71 | 2019-04-04 15:37:57 -0700 | [diff] [blame] | 2381 | |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2382 | def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks): |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2383 | """Returns a SparseImage object suitable for passing to BlockImageDiff. |
| 2384 | |
| 2385 | This function loads the specified sparse image from the given path, and |
| 2386 | performs additional processing for OTA purpose. For example, it always adds |
| 2387 | block 0 to clobbered blocks list. It also detects files that cannot be |
| 2388 | reconstructed from the block list, for whom we should avoid applying imgdiff. |
| 2389 | |
| 2390 | Args: |
Tao Bao | b2de7d9 | 2019-04-10 10:01:47 -0700 | [diff] [blame] | 2391 | which: The partition name, e.g. "system", "vendor". |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2392 | tmpdir: The directory that contains the prebuilt image and block map file. |
| 2393 | input_zip: The target-files ZIP archive. |
Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 2394 | allow_shared_blocks: Whether having shared blocks is allowed. |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2395 | Returns: |
| 2396 | A SparseImage object, with file_map info loaded. |
| 2397 | """ |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2398 | path = os.path.join(tmpdir, "IMAGES", which + ".img") |
| 2399 | mappath = os.path.join(tmpdir, "IMAGES", which + ".map") |
| 2400 | |
| 2401 | # The image and map files must have been created prior to calling |
| 2402 | # ota_from_target_files.py (since LMP). |
| 2403 | assert os.path.exists(path) and os.path.exists(mappath) |
| 2404 | |
| 2405 | # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add |
| 2406 | # it to clobbered_blocks so that it will be written to the target |
| 2407 | # unconditionally. Note that they are still part of care_map. (Bug: 20939131) |
| 2408 | clobbered_blocks = "0" |
| 2409 | |
Tianjie Xu | 67c7cbb | 2018-08-30 00:32:07 -0700 | [diff] [blame] | 2410 | image = sparse_img.SparseImage( |
hungweichen | cc9c05d | 2022-08-23 05:45:42 +0000 | [diff] [blame] | 2411 | path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks) |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2412 | |
| 2413 | # block.map may contain less blocks, because mke2fs may skip allocating blocks |
| 2414 | # if they contain all zeros. We can't reconstruct such a file from its block |
| 2415 | # list. Tag such entries accordingly. (Bug: 65213616) |
| 2416 | for entry in image.file_map: |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2417 | # Skip artificial names, such as "__ZERO", "__NONZERO-1". |
Tao Bao | d3554e6 | 2018-07-10 15:31:22 -0700 | [diff] [blame] | 2418 | if not entry.startswith('/'): |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2419 | continue |
| 2420 | |
Tom Cherry | d14b895 | 2018-08-09 14:26:00 -0700 | [diff] [blame] | 2421 | # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the |
| 2422 | # filename listed in system.map may contain an additional leading slash |
| 2423 | # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent |
| 2424 | # results. |
wangshumin | 71af07a | 2021-02-24 11:08:47 +0800 | [diff] [blame] | 2425 | # And handle another special case, where files not under /system |
Tom Cherry | d14b895 | 2018-08-09 14:26:00 -0700 | [diff] [blame] | 2426 | # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip. |
wangshumin | 71af07a | 2021-02-24 11:08:47 +0800 | [diff] [blame] | 2427 | arcname = entry.lstrip('/') |
| 2428 | if which == 'system' and not arcname.startswith('system'): |
Tao Bao | d3554e6 | 2018-07-10 15:31:22 -0700 | [diff] [blame] | 2429 | arcname = 'ROOT/' + arcname |
wangshumin | 71af07a | 2021-02-24 11:08:47 +0800 | [diff] [blame] | 2430 | else: |
| 2431 | arcname = arcname.replace(which, which.upper(), 1) |
Tao Bao | d3554e6 | 2018-07-10 15:31:22 -0700 | [diff] [blame] | 2432 | |
| 2433 | assert arcname in input_zip.namelist(), \ |
| 2434 | "Failed to find the ZIP entry for {}".format(entry) |
| 2435 | |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2436 | info = input_zip.getinfo(arcname) |
| 2437 | ranges = image.file_map[entry] |
Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 2438 | |
| 2439 | # If a RangeSet has been tagged as using shared blocks while loading the |
Tao Bao | 2a20f34 | 2018-12-03 15:08:23 -0800 | [diff] [blame] | 2440 | # image, check the original block list to determine its completeness. Note |
| 2441 | # that the 'incomplete' flag would be tagged to the original RangeSet only. |
Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 2442 | if ranges.extra.get('uses_shared_blocks'): |
Tao Bao | 2a20f34 | 2018-12-03 15:08:23 -0800 | [diff] [blame] | 2443 | ranges = ranges.extra['uses_shared_blocks'] |
Tao Bao | e709b09 | 2018-02-07 12:40:00 -0800 | [diff] [blame] | 2444 | |
Tao Bao | c765cca | 2018-01-31 17:32:40 -0800 | [diff] [blame] | 2445 | if RoundUpTo4K(info.file_size) > ranges.size() * 4096: |
| 2446 | ranges.extra['incomplete'] = True |
| 2447 | |
| 2448 | return image |
| 2449 | |
| 2450 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2451 | def GetKeyPasswords(keylist): |
| 2452 | """Given a list of keys, prompt the user to enter passwords for |
| 2453 | those which require them. Return a {key: password} dict. password |
| 2454 | will be None if the key has no password.""" |
| 2455 | |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2456 | no_passwords = [] |
| 2457 | need_passwords = [] |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2458 | key_passwords = {} |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2459 | devnull = open("/dev/null", "w+b") |
Cole Faust | b820bcd | 2021-10-28 13:59:48 -0700 | [diff] [blame] | 2460 | |
| 2461 | # sorted() can't compare strings to None, so convert Nones to strings |
| 2462 | for k in sorted(keylist, key=lambda x: x if x is not None else ""): |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 2463 | # We don't need a password for things that aren't really keys. |
Jooyung Han | 8caba5e | 2021-10-27 03:58:09 +0900 | [diff] [blame] | 2464 | if k in SPECIAL_CERT_STRINGS or k is None: |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2465 | no_passwords.append(k) |
Doug Zongker | 43874f8 | 2009-04-14 14:05:15 -0700 | [diff] [blame] | 2466 | continue |
| 2467 | |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2468 | p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix, |
Doug Zongker | 602a84e | 2009-06-18 08:35:12 -0700 | [diff] [blame] | 2469 | "-inform", "DER", "-nocrypt"], |
| 2470 | stdin=devnull.fileno(), |
| 2471 | stdout=devnull.fileno(), |
| 2472 | stderr=subprocess.STDOUT) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2473 | p.communicate() |
| 2474 | if p.returncode == 0: |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2475 | # Definitely an unencrypted key. |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2476 | no_passwords.append(k) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2477 | else: |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2478 | p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix, |
| 2479 | "-inform", "DER", "-passin", "pass:"], |
| 2480 | stdin=devnull.fileno(), |
| 2481 | stdout=devnull.fileno(), |
| 2482 | stderr=subprocess.PIPE) |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2483 | _, stderr = p.communicate() |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2484 | if p.returncode == 0: |
| 2485 | # Encrypted key with empty string as password. |
| 2486 | key_passwords[k] = '' |
| 2487 | elif stderr.startswith('Error decrypting key'): |
| 2488 | # Definitely encrypted key. |
| 2489 | # It would have said "Error reading key" if it didn't parse correctly. |
| 2490 | need_passwords.append(k) |
| 2491 | else: |
| 2492 | # Potentially, a type of key that openssl doesn't understand. |
| 2493 | # We'll let the routines in signapk.jar handle it. |
| 2494 | no_passwords.append(k) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2495 | devnull.close() |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2496 | |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2497 | key_passwords.update(PasswordManager().GetPasswords(need_passwords)) |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 2498 | key_passwords.update(dict.fromkeys(no_passwords)) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2499 | return key_passwords |
| 2500 | |
| 2501 | |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2502 | def GetMinSdkVersion(apk_name): |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2503 | """Gets the minSdkVersion declared in the APK. |
| 2504 | |
Martin Stjernholm | 58472e8 | 2022-01-07 22:08:47 +0000 | [diff] [blame] | 2505 | It calls OPTIONS.aapt2_path to query the embedded minSdkVersion from the given |
| 2506 | APK file. This can be both a decimal number (API Level) or a codename. |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2507 | |
| 2508 | Args: |
| 2509 | apk_name: The APK filename. |
| 2510 | |
| 2511 | Returns: |
| 2512 | The parsed SDK version string. |
| 2513 | |
| 2514 | Raises: |
| 2515 | ExternalError: On failing to obtain the min SDK version. |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2516 | """ |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2517 | proc = Run( |
Martin Stjernholm | 58472e8 | 2022-01-07 22:08:47 +0000 | [diff] [blame] | 2518 | [OPTIONS.aapt2_path, "dump", "badging", apk_name], stdout=subprocess.PIPE, |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2519 | stderr=subprocess.PIPE) |
| 2520 | stdoutdata, stderrdata = proc.communicate() |
| 2521 | if proc.returncode != 0: |
| 2522 | raise ExternalError( |
Kelvin Zhang | 21118bb | 2022-02-12 09:40:35 -0800 | [diff] [blame] | 2523 | "Failed to obtain minSdkVersion for {}: aapt2 return code {}:\n{}\n{}".format( |
| 2524 | apk_name, proc.returncode, stdoutdata, stderrdata)) |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2525 | |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2526 | for line in stdoutdata.split("\n"): |
James Wu | c5e321a | 2023-08-01 17:45:35 +0000 | [diff] [blame] | 2527 | # Due to ag/24161708, looking for lines such as minSdkVersion:'23',minSdkVersion:'M' |
| 2528 | # or sdkVersion:'23', sdkVersion:'M'. |
| 2529 | m = re.match(r'(?:minSdkVersion|sdkVersion):\'([^\']*)\'', line) |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2530 | if m: |
| 2531 | return m.group(1) |
changho.shin | 0f12536 | 2019-07-08 10:59:00 +0900 | [diff] [blame] | 2532 | raise ExternalError("No minSdkVersion returned by aapt2") |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2533 | |
| 2534 | |
| 2535 | def GetMinSdkVersionInt(apk_name, codename_to_api_level_map): |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2536 | """Returns the minSdkVersion declared in the APK as a number (API Level). |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2537 | |
Tao Bao | f47bf0f | 2018-03-21 23:28:51 -0700 | [diff] [blame] | 2538 | If minSdkVersion is set to a codename, it is translated to a number using the |
| 2539 | provided map. |
| 2540 | |
| 2541 | Args: |
| 2542 | apk_name: The APK filename. |
| 2543 | |
| 2544 | Returns: |
| 2545 | The parsed SDK version number. |
| 2546 | |
| 2547 | Raises: |
| 2548 | ExternalError: On failing to get the min SDK version number. |
| 2549 | """ |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2550 | version = GetMinSdkVersion(apk_name) |
| 2551 | try: |
| 2552 | return int(version) |
| 2553 | except ValueError: |
Paul Duffin | a03f126 | 2023-02-01 12:12:51 +0000 | [diff] [blame] | 2554 | # Not a decimal number. |
| 2555 | # |
| 2556 | # It could be either a straight codename, e.g. |
| 2557 | # UpsideDownCake |
| 2558 | # |
| 2559 | # Or a codename with API fingerprint SHA, e.g. |
| 2560 | # UpsideDownCake.e7d3947f14eb9dc4fec25ff6c5f8563e |
| 2561 | # |
| 2562 | # Extract the codename and try and map it to a version number. |
| 2563 | split = version.split(".") |
| 2564 | codename = split[0] |
| 2565 | if codename in codename_to_api_level_map: |
| 2566 | return codename_to_api_level_map[codename] |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2567 | raise ExternalError( |
Paul Duffin | a03f126 | 2023-02-01 12:12:51 +0000 | [diff] [blame] | 2568 | "Unknown codename: '{}' from minSdkVersion: '{}'. Known codenames: {}".format( |
| 2569 | codename, version, codename_to_api_level_map)) |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2570 | |
| 2571 | |
| 2572 | def SignFile(input_name, output_name, key, password, min_api_level=None, |
Tao Bao | ffc9a30 | 2019-03-22 23:16:58 -0700 | [diff] [blame] | 2573 | codename_to_api_level_map=None, whole_file=False, |
| 2574 | extra_signapk_args=None): |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2575 | """Sign the input_name zip/jar/apk, producing output_name. Use the |
| 2576 | given key and password (the latter may be None if the key does not |
| 2577 | have a password. |
| 2578 | |
Doug Zongker | 951495f | 2009-08-14 12:44:19 -0700 | [diff] [blame] | 2579 | If whole_file is true, use the "-w" option to SignApk to embed a |
| 2580 | signature that covers the whole file in the archive comment of the |
| 2581 | zip file. |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2582 | |
| 2583 | min_api_level is the API Level (int) of the oldest platform this file may end |
| 2584 | up on. If not specified for an APK, the API Level is obtained by interpreting |
| 2585 | the minSdkVersion attribute of the APK's AndroidManifest.xml. |
| 2586 | |
| 2587 | codename_to_api_level_map is needed to translate the codename which may be |
| 2588 | encountered as the APK's minSdkVersion. |
Tao Bao | ffc9a30 | 2019-03-22 23:16:58 -0700 | [diff] [blame] | 2589 | |
| 2590 | Caller may optionally specify extra args to be passed to SignApk, which |
| 2591 | defaults to OPTIONS.extra_signapk_args if omitted. |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2592 | """ |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 2593 | if codename_to_api_level_map is None: |
| 2594 | codename_to_api_level_map = {} |
Tao Bao | ffc9a30 | 2019-03-22 23:16:58 -0700 | [diff] [blame] | 2595 | if extra_signapk_args is None: |
| 2596 | extra_signapk_args = OPTIONS.extra_signapk_args |
Doug Zongker | 951495f | 2009-08-14 12:44:19 -0700 | [diff] [blame] | 2597 | |
Alex Klyubin | 9667b18 | 2015-12-10 13:38:50 -0800 | [diff] [blame] | 2598 | java_library_path = os.path.join( |
| 2599 | OPTIONS.search_path, OPTIONS.signapk_shared_library_path) |
| 2600 | |
Tao Bao | e95540e | 2016-11-08 12:08:53 -0800 | [diff] [blame] | 2601 | cmd = ([OPTIONS.java_path] + OPTIONS.java_args + |
| 2602 | ["-Djava.library.path=" + java_library_path, |
| 2603 | "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] + |
Tao Bao | ffc9a30 | 2019-03-22 23:16:58 -0700 | [diff] [blame] | 2604 | extra_signapk_args) |
Doug Zongker | 951495f | 2009-08-14 12:44:19 -0700 | [diff] [blame] | 2605 | if whole_file: |
| 2606 | cmd.append("-w") |
Alex Klyubin | 2cfd1d1 | 2016-01-13 10:32:47 -0800 | [diff] [blame] | 2607 | |
| 2608 | min_sdk_version = min_api_level |
| 2609 | if min_sdk_version is None: |
| 2610 | if not whole_file: |
| 2611 | min_sdk_version = GetMinSdkVersionInt( |
| 2612 | input_name, codename_to_api_level_map) |
| 2613 | if min_sdk_version is not None: |
| 2614 | cmd.extend(["--min-sdk-version", str(min_sdk_version)]) |
| 2615 | |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2616 | cmd.extend([key + OPTIONS.public_key_suffix, |
| 2617 | key + OPTIONS.private_key_suffix, |
Alex Klyubin | eb756d7 | 2015-12-04 09:21:08 -0800 | [diff] [blame] | 2618 | input_name, output_name]) |
Doug Zongker | 951495f | 2009-08-14 12:44:19 -0700 | [diff] [blame] | 2619 | |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 2620 | proc = Run(cmd, stdin=subprocess.PIPE) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2621 | if password is not None: |
| 2622 | password += "\n" |
Tao Bao | 73dd4f4 | 2018-10-04 16:25:33 -0700 | [diff] [blame] | 2623 | stdoutdata, _ = proc.communicate(password) |
| 2624 | if proc.returncode != 0: |
Tao Bao | 8092198 | 2018-03-21 21:02:19 -0700 | [diff] [blame] | 2625 | raise ExternalError( |
Kelvin Zhang | 197772f | 2022-04-26 15:15:11 -0700 | [diff] [blame] | 2626 | "Failed to run {}: return code {}:\n{}".format(cmd, |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2627 | proc.returncode, stdoutdata)) |
| 2628 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2629 | |
Doug Zongker | 3797473 | 2010-09-16 17:44:38 -0700 | [diff] [blame] | 2630 | def CheckSize(data, target, info_dict): |
Tao Bao | 9dd909e | 2017-11-14 11:27:32 -0800 | [diff] [blame] | 2631 | """Checks the data string passed against the max size limit. |
Doug Zongker | c77a9ad | 2010-09-16 11:28:43 -0700 | [diff] [blame] | 2632 | |
Tao Bao | 9dd909e | 2017-11-14 11:27:32 -0800 | [diff] [blame] | 2633 | For non-AVB images, raise exception if the data is too big. Print a warning |
| 2634 | if the data is nearing the maximum size. |
| 2635 | |
| 2636 | For AVB images, the actual image size should be identical to the limit. |
| 2637 | |
| 2638 | Args: |
| 2639 | data: A string that contains all the data for the partition. |
| 2640 | target: The partition name. The ".img" suffix is optional. |
| 2641 | info_dict: The dict to be looked up for relevant info. |
| 2642 | """ |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2643 | if target.endswith(".img"): |
| 2644 | target = target[:-4] |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 2645 | mount_point = "/" + target |
| 2646 | |
Ying Wang | f8824af | 2014-06-03 14:07:27 -0700 | [diff] [blame] | 2647 | fs_type = None |
| 2648 | limit = None |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 2649 | if info_dict["fstab"]: |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2650 | if mount_point == "/userdata": |
| 2651 | mount_point = "/data" |
Doug Zongker | 9ce0fb6 | 2010-09-20 18:04:41 -0700 | [diff] [blame] | 2652 | p = info_dict["fstab"][mount_point] |
| 2653 | fs_type = p.fs_type |
Andrew Boie | 0f9aec8 | 2012-02-14 09:32:52 -0800 | [diff] [blame] | 2654 | device = p.device |
| 2655 | if "/" in device: |
| 2656 | device = device[device.rfind("/")+1:] |
Kelvin Zhang | 8c9166a | 2023-10-31 13:42:15 -0700 | [diff] [blame] | 2657 | limit = info_dict.get(device + "_size", 0) |
| 2658 | if isinstance(limit, str): |
| 2659 | limit = int(limit, 0) |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2660 | if not fs_type or not limit: |
| 2661 | return |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2662 | |
Andrew Boie | 0f9aec8 | 2012-02-14 09:32:52 -0800 | [diff] [blame] | 2663 | size = len(data) |
Tao Bao | 9dd909e | 2017-11-14 11:27:32 -0800 | [diff] [blame] | 2664 | # target could be 'userdata' or 'cache'. They should follow the non-AVB image |
| 2665 | # path. |
| 2666 | if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS: |
| 2667 | if size != limit: |
| 2668 | raise ExternalError( |
| 2669 | "Mismatching image size for %s: expected %d actual %d" % ( |
| 2670 | target, limit, size)) |
| 2671 | else: |
| 2672 | pct = float(size) * 100.0 / limit |
| 2673 | msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit) |
| 2674 | if pct >= 99.0: |
| 2675 | raise ExternalError(msg) |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2676 | |
| 2677 | if pct >= 95.0: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 2678 | logger.warning("\n WARNING: %s\n", msg) |
| 2679 | else: |
| 2680 | logger.info(" %s", msg) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2681 | |
| 2682 | |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 2683 | def ReadApkCerts(tf_zip): |
Tao Bao | 818ddf5 | 2018-01-05 11:17:34 -0800 | [diff] [blame] | 2684 | """Parses the APK certs info from a given target-files zip. |
| 2685 | |
| 2686 | Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a |
| 2687 | tuple with the following elements: (1) a dictionary that maps packages to |
| 2688 | certs (based on the "certificate" and "private_key" attributes in the file; |
| 2689 | (2) a string representing the extension of compressed APKs in the target files |
| 2690 | (e.g ".gz", ".bro"). |
| 2691 | |
| 2692 | Args: |
| 2693 | tf_zip: The input target_files ZipFile (already open). |
| 2694 | |
| 2695 | Returns: |
| 2696 | (certmap, ext): certmap is a dictionary that maps packages to certs; ext is |
| 2697 | the extension string of compressed APKs (e.g. ".gz"), or None if there's |
| 2698 | no compressed APKs. |
| 2699 | """ |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 2700 | certmap = {} |
Narayan Kamath | a07bf04 | 2017-08-14 14:49:21 +0100 | [diff] [blame] | 2701 | compressed_extension = None |
| 2702 | |
Tao Bao | 0f99033 | 2017-09-08 19:02:54 -0700 | [diff] [blame] | 2703 | # META/apkcerts.txt contains the info for _all_ the packages known at build |
| 2704 | # time. Filter out the ones that are not installed. |
| 2705 | installed_files = set() |
| 2706 | for name in tf_zip.namelist(): |
| 2707 | basename = os.path.basename(name) |
| 2708 | if basename: |
| 2709 | installed_files.add(basename) |
| 2710 | |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 2711 | for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'): |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 2712 | line = line.strip() |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2713 | if not line: |
| 2714 | continue |
Tao Bao | 818ddf5 | 2018-01-05 11:17:34 -0800 | [diff] [blame] | 2715 | m = re.match( |
| 2716 | r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+' |
Bill Peckham | 5c7b034 | 2020-04-03 15:36:23 -0700 | [diff] [blame] | 2717 | r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?' |
| 2718 | r'(\s+partition="(?P<PARTITION>.*?)")?$', |
Tao Bao | 818ddf5 | 2018-01-05 11:17:34 -0800 | [diff] [blame] | 2719 | line) |
| 2720 | if not m: |
| 2721 | continue |
Narayan Kamath | a07bf04 | 2017-08-14 14:49:21 +0100 | [diff] [blame] | 2722 | |
Tao Bao | 818ddf5 | 2018-01-05 11:17:34 -0800 | [diff] [blame] | 2723 | matches = m.groupdict() |
| 2724 | cert = matches["CERT"] |
| 2725 | privkey = matches["PRIVKEY"] |
| 2726 | name = matches["NAME"] |
| 2727 | this_compressed_extension = matches["COMPRESSED"] |
| 2728 | |
| 2729 | public_key_suffix_len = len(OPTIONS.public_key_suffix) |
| 2730 | private_key_suffix_len = len(OPTIONS.private_key_suffix) |
| 2731 | if cert in SPECIAL_CERT_STRINGS and not privkey: |
| 2732 | certmap[name] = cert |
| 2733 | elif (cert.endswith(OPTIONS.public_key_suffix) and |
| 2734 | privkey.endswith(OPTIONS.private_key_suffix) and |
| 2735 | cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]): |
| 2736 | certmap[name] = cert[:-public_key_suffix_len] |
| 2737 | else: |
| 2738 | raise ValueError("Failed to parse line from apkcerts.txt:\n" + line) |
| 2739 | |
| 2740 | if not this_compressed_extension: |
| 2741 | continue |
| 2742 | |
| 2743 | # Only count the installed files. |
| 2744 | filename = name + '.' + this_compressed_extension |
| 2745 | if filename not in installed_files: |
| 2746 | continue |
| 2747 | |
| 2748 | # Make sure that all the values in the compression map have the same |
| 2749 | # extension. We don't support multiple compression methods in the same |
| 2750 | # system image. |
| 2751 | if compressed_extension: |
| 2752 | if this_compressed_extension != compressed_extension: |
| 2753 | raise ValueError( |
| 2754 | "Multiple compressed extensions: {} vs {}".format( |
| 2755 | compressed_extension, this_compressed_extension)) |
| 2756 | else: |
| 2757 | compressed_extension = this_compressed_extension |
| 2758 | |
| 2759 | return (certmap, |
| 2760 | ("." + compressed_extension) if compressed_extension else None) |
Doug Zongker | f6a53aa | 2009-12-15 15:06:55 -0800 | [diff] [blame] | 2761 | |
| 2762 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2763 | COMMON_DOCSTRING = """ |
Tao Bao | 30df8b4 | 2018-04-23 15:32:53 -0700 | [diff] [blame] | 2764 | Global options |
| 2765 | |
| 2766 | -p (--path) <dir> |
| 2767 | Prepend <dir>/bin to the list of places to search for binaries run by this |
| 2768 | script, and expect to find jars in <dir>/framework. |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2769 | |
Doug Zongker | 05d3dea | 2009-06-22 11:32:31 -0700 | [diff] [blame] | 2770 | -s (--device_specific) <file> |
Tao Bao | 30df8b4 | 2018-04-23 15:32:53 -0700 | [diff] [blame] | 2771 | Path to the Python module containing device-specific releasetools code. |
Doug Zongker | 05d3dea | 2009-06-22 11:32:31 -0700 | [diff] [blame] | 2772 | |
Tao Bao | 30df8b4 | 2018-04-23 15:32:53 -0700 | [diff] [blame] | 2773 | -x (--extra) <key=value> |
| 2774 | Add a key/value pair to the 'extras' dict, which device-specific extension |
| 2775 | code may look at. |
Doug Zongker | 8bec09e | 2009-11-30 15:37:14 -0800 | [diff] [blame] | 2776 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2777 | -v (--verbose) |
| 2778 | Show command lines being executed. |
| 2779 | |
| 2780 | -h (--help) |
| 2781 | Display this usage message and exit. |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 2782 | |
| 2783 | --logfile <file> |
| 2784 | Put verbose logs to specified file (regardless of --verbose option.) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2785 | """ |
| 2786 | |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2787 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2788 | def Usage(docstring): |
Tao Bao | 89fbb0f | 2017-01-10 10:47:58 -0800 | [diff] [blame] | 2789 | print(docstring.rstrip("\n")) |
| 2790 | print(COMMON_DOCSTRING) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2791 | |
| 2792 | |
| 2793 | def ParseOptions(argv, |
| 2794 | docstring, |
| 2795 | extra_opts="", extra_long_opts=(), |
| 2796 | extra_option_handler=None): |
| 2797 | """Parse the options in argv and return any arguments that aren't |
| 2798 | flags. docstring is the calling module's docstring, to be displayed |
| 2799 | for errors and -h. extra_opts and extra_long_opts are for flags |
| 2800 | defined by the caller, which are processed by passing them to |
| 2801 | extra_option_handler.""" |
| 2802 | |
| 2803 | try: |
| 2804 | opts, args = getopt.getopt( |
Doug Zongker | 8bec09e | 2009-11-30 15:37:14 -0800 | [diff] [blame] | 2805 | argv, "hvp:s:x:" + extra_opts, |
Alex Klyubin | 9667b18 | 2015-12-10 13:38:50 -0800 | [diff] [blame] | 2806 | ["help", "verbose", "path=", "signapk_path=", |
Thiébaud Weksteen | 62865ca | 2023-10-18 11:08:47 +1100 | [diff] [blame] | 2807 | "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=", |
Tianjie Xu | 88a759d | 2020-01-23 10:47:54 -0800 | [diff] [blame] | 2808 | "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=", |
Baligh Uddin | 601ddea | 2015-06-09 15:48:14 -0700 | [diff] [blame] | 2809 | "private_key_suffix=", "boot_signer_path=", "boot_signer_args=", |
| 2810 | "verity_signer_path=", "verity_signer_args=", "device_specific=", |
Jan Monsch | e147d48 | 2021-06-23 12:30:35 +0200 | [diff] [blame] | 2811 | "extra=", "logfile="] + list(extra_long_opts)) |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2812 | except getopt.GetoptError as err: |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2813 | Usage(docstring) |
Tao Bao | 89fbb0f | 2017-01-10 10:47:58 -0800 | [diff] [blame] | 2814 | print("**", str(err), "**") |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2815 | sys.exit(2) |
| 2816 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2817 | for o, a in opts: |
| 2818 | if o in ("-h", "--help"): |
| 2819 | Usage(docstring) |
| 2820 | sys.exit() |
| 2821 | elif o in ("-v", "--verbose"): |
| 2822 | OPTIONS.verbose = True |
| 2823 | elif o in ("-p", "--path"): |
Doug Zongker | 602a84e | 2009-06-18 08:35:12 -0700 | [diff] [blame] | 2824 | OPTIONS.search_path = a |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2825 | elif o in ("--signapk_path",): |
| 2826 | OPTIONS.signapk_path = a |
Alex Klyubin | 9667b18 | 2015-12-10 13:38:50 -0800 | [diff] [blame] | 2827 | elif o in ("--signapk_shared_library_path",): |
| 2828 | OPTIONS.signapk_shared_library_path = a |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2829 | elif o in ("--extra_signapk_args",): |
| 2830 | OPTIONS.extra_signapk_args = shlex.split(a) |
Martin Stjernholm | 58472e8 | 2022-01-07 22:08:47 +0000 | [diff] [blame] | 2831 | elif o in ("--aapt2_path",): |
| 2832 | OPTIONS.aapt2_path = a |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2833 | elif o in ("--java_path",): |
| 2834 | OPTIONS.java_path = a |
Baligh Uddin | 339ee49 | 2014-09-05 11:18:07 -0700 | [diff] [blame] | 2835 | elif o in ("--java_args",): |
Tao Bao | e95540e | 2016-11-08 12:08:53 -0800 | [diff] [blame] | 2836 | OPTIONS.java_args = shlex.split(a) |
Tianjie Xu | 88a759d | 2020-01-23 10:47:54 -0800 | [diff] [blame] | 2837 | elif o in ("--android_jar_path",): |
| 2838 | OPTIONS.android_jar_path = a |
T.R. Fullhart | 37e1052 | 2013-03-18 10:31:26 -0700 | [diff] [blame] | 2839 | elif o in ("--public_key_suffix",): |
| 2840 | OPTIONS.public_key_suffix = a |
| 2841 | elif o in ("--private_key_suffix",): |
| 2842 | OPTIONS.private_key_suffix = a |
Baligh Uddin | e204868 | 2014-11-20 09:52:05 -0800 | [diff] [blame] | 2843 | elif o in ("--boot_signer_path",): |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2844 | raise ValueError( |
| 2845 | "--boot_signer_path is no longer supported, please switch to AVB") |
Baligh Uddin | 601ddea | 2015-06-09 15:48:14 -0700 | [diff] [blame] | 2846 | elif o in ("--boot_signer_args",): |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2847 | raise ValueError( |
| 2848 | "--boot_signer_args is no longer supported, please switch to AVB") |
Baligh Uddin | 601ddea | 2015-06-09 15:48:14 -0700 | [diff] [blame] | 2849 | elif o in ("--verity_signer_path",): |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2850 | raise ValueError( |
| 2851 | "--verity_signer_path is no longer supported, please switch to AVB") |
Baligh Uddin | 601ddea | 2015-06-09 15:48:14 -0700 | [diff] [blame] | 2852 | elif o in ("--verity_signer_args",): |
Kelvin Zhang | f294c87 | 2022-10-06 14:21:36 -0700 | [diff] [blame] | 2853 | raise ValueError( |
| 2854 | "--verity_signer_args is no longer supported, please switch to AVB") |
Doug Zongker | 05d3dea | 2009-06-22 11:32:31 -0700 | [diff] [blame] | 2855 | elif o in ("-s", "--device_specific"): |
| 2856 | OPTIONS.device_specific = a |
Doug Zongker | 5ecba70 | 2009-12-03 16:36:20 -0800 | [diff] [blame] | 2857 | elif o in ("-x", "--extra"): |
Doug Zongker | 8bec09e | 2009-11-30 15:37:14 -0800 | [diff] [blame] | 2858 | key, value = a.split("=", 1) |
| 2859 | OPTIONS.extras[key] = value |
Yifan Hong | 3091093 | 2019-10-25 20:36:55 -0700 | [diff] [blame] | 2860 | elif o in ("--logfile",): |
| 2861 | OPTIONS.logfile = a |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2862 | else: |
| 2863 | if extra_option_handler is None or not extra_option_handler(o, a): |
| 2864 | assert False, "unknown option \"%s\"" % (o,) |
| 2865 | |
Doug Zongker | 8544877 | 2014-09-09 14:59:20 -0700 | [diff] [blame] | 2866 | if OPTIONS.search_path: |
| 2867 | os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") + |
| 2868 | os.pathsep + os.environ["PATH"]) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2869 | |
| 2870 | return args |
| 2871 | |
| 2872 | |
Tao Bao | 4c851b1 | 2016-09-19 13:54:38 -0700 | [diff] [blame] | 2873 | def MakeTempFile(prefix='tmp', suffix=''): |
Doug Zongker | fc44a51 | 2014-08-26 13:10:25 -0700 | [diff] [blame] | 2874 | """Make a temp file and add it to the list of things to be deleted |
| 2875 | when Cleanup() is called. Return the filename.""" |
| 2876 | fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix) |
| 2877 | os.close(fd) |
| 2878 | OPTIONS.tempfiles.append(fn) |
| 2879 | return fn |
| 2880 | |
| 2881 | |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2882 | def MakeTempDir(prefix='tmp', suffix=''): |
| 2883 | """Makes a temporary dir that will be cleaned up with a call to Cleanup(). |
| 2884 | |
| 2885 | Returns: |
| 2886 | The absolute pathname of the new directory. |
| 2887 | """ |
| 2888 | dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix) |
| 2889 | OPTIONS.tempfiles.append(dir_name) |
| 2890 | return dir_name |
| 2891 | |
| 2892 | |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2893 | def Cleanup(): |
| 2894 | for i in OPTIONS.tempfiles: |
Kelvin Zhang | 2268091 | 2023-05-19 13:12:59 -0700 | [diff] [blame] | 2895 | if not os.path.exists(i): |
| 2896 | continue |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2897 | if os.path.isdir(i): |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2898 | shutil.rmtree(i, ignore_errors=True) |
Doug Zongker | eef3944 | 2009-04-02 12:14:19 -0700 | [diff] [blame] | 2899 | else: |
| 2900 | os.remove(i) |
Tao Bao | 1c830bf | 2017-12-25 10:43:47 -0800 | [diff] [blame] | 2901 | del OPTIONS.tempfiles[:] |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2902 | |
| 2903 | |
| 2904 | class PasswordManager(object): |
| 2905 | def __init__(self): |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 2906 | self.editor = os.getenv("EDITOR") |
| 2907 | self.pwfile = os.getenv("ANDROID_PW_FILE") |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2908 | |
| 2909 | def GetPasswords(self, items): |
| 2910 | """Get passwords corresponding to each string in 'items', |
| 2911 | returning a dict. (The dict may have keys in addition to the |
| 2912 | values in 'items'.) |
| 2913 | |
| 2914 | Uses the passwords in $ANDROID_PW_FILE if available, letting the |
| 2915 | user edit that file to add more needed passwords. If no editor is |
| 2916 | available, or $ANDROID_PW_FILE isn't define, prompts the user |
| 2917 | interactively in the ordinary way. |
| 2918 | """ |
| 2919 | |
| 2920 | current = self.ReadFile() |
| 2921 | |
| 2922 | first = True |
| 2923 | while True: |
| 2924 | missing = [] |
| 2925 | for i in items: |
| 2926 | if i not in current or not current[i]: |
| 2927 | missing.append(i) |
| 2928 | # Are all the passwords already in the file? |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2929 | if not missing: |
| 2930 | return current |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2931 | |
| 2932 | for i in missing: |
| 2933 | current[i] = "" |
| 2934 | |
| 2935 | if not first: |
Tao Bao | 89fbb0f | 2017-01-10 10:47:58 -0800 | [diff] [blame] | 2936 | print("key file %s still missing some passwords." % (self.pwfile,)) |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 2937 | if sys.version_info[0] >= 3: |
| 2938 | raw_input = input # pylint: disable=redefined-builtin |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2939 | answer = raw_input("try to edit again? [y]> ").strip() |
| 2940 | if answer and answer[0] not in 'yY': |
| 2941 | raise RuntimeError("key passwords unavailable") |
| 2942 | first = False |
| 2943 | |
| 2944 | current = self.UpdateAndReadFile(current) |
| 2945 | |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 2946 | def PromptResult(self, current): # pylint: disable=no-self-use |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2947 | """Prompt the user to enter a value (password) for each key in |
| 2948 | 'current' whose value is fales. Returns a new dict with all the |
| 2949 | values. |
| 2950 | """ |
| 2951 | result = {} |
Tao Bao | 3888428 | 2019-07-10 22:20:56 -0700 | [diff] [blame] | 2952 | for k, v in sorted(current.items()): |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2953 | if v: |
| 2954 | result[k] = v |
| 2955 | else: |
| 2956 | while True: |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2957 | result[k] = getpass.getpass( |
| 2958 | "Enter password for %s key> " % k).strip() |
| 2959 | if result[k]: |
| 2960 | break |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2961 | return result |
| 2962 | |
| 2963 | def UpdateAndReadFile(self, current): |
| 2964 | if not self.editor or not self.pwfile: |
| 2965 | return self.PromptResult(current) |
| 2966 | |
| 2967 | f = open(self.pwfile, "w") |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2968 | os.chmod(self.pwfile, 0o600) |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2969 | f.write("# Enter key passwords between the [[[ ]]] brackets.\n") |
| 2970 | f.write("# (Additional spaces are harmless.)\n\n") |
| 2971 | |
| 2972 | first_line = None |
Tao Bao | 3888428 | 2019-07-10 22:20:56 -0700 | [diff] [blame] | 2973 | sorted_list = sorted([(not v, k, v) for (k, v) in current.items()]) |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2974 | for i, (_, k, v) in enumerate(sorted_list): |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2975 | f.write("[[[ %s ]]] %s\n" % (v, k)) |
| 2976 | if not v and first_line is None: |
| 2977 | # position cursor on first line with no password. |
| 2978 | first_line = i + 4 |
| 2979 | f.close() |
| 2980 | |
Tao Bao | 986ee86 | 2018-10-04 15:46:16 -0700 | [diff] [blame] | 2981 | RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile]) |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2982 | |
| 2983 | return self.ReadFile() |
| 2984 | |
| 2985 | def ReadFile(self): |
| 2986 | result = {} |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2987 | if self.pwfile is None: |
| 2988 | return result |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2989 | try: |
| 2990 | f = open(self.pwfile, "r") |
| 2991 | for line in f: |
| 2992 | line = line.strip() |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 2993 | if not line or line[0] == '#': |
| 2994 | continue |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2995 | m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line) |
| 2996 | if not m: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 2997 | logger.warning("Failed to parse password file: %s", line) |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 2998 | else: |
| 2999 | result[m.group(2)] = m.group(1) |
| 3000 | f.close() |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 3001 | except IOError as e: |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 3002 | if e.errno != errno.ENOENT: |
Tao Bao | 32fcdab | 2018-10-12 10:30:39 -0700 | [diff] [blame] | 3003 | logger.exception("Error reading password file:") |
Doug Zongker | 8ce7c25 | 2009-05-22 13:34:54 -0700 | [diff] [blame] | 3004 | return result |
Doug Zongker | 048e7ca | 2009-06-15 14:31:53 -0700 | [diff] [blame] | 3005 | |
| 3006 | |
Dan Albert | 8e0178d | 2015-01-27 15:53:15 -0800 | [diff] [blame] | 3007 | def ZipWrite(zip_file, filename, arcname=None, perms=0o644, |
| 3008 | compress_type=None): |
Dan Albert | 8e0178d | 2015-01-27 15:53:15 -0800 | [diff] [blame] | 3009 | |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 3010 | # http://b/18015246 |
| 3011 | # Python 2.7's zipfile implementation wrongly thinks that zip64 is required |
| 3012 | # for files larger than 2GiB. We can work around this by adjusting their |
| 3013 | # limit. Note that `zipfile.writestr()` will not work for strings larger than |
| 3014 | # 2GiB. The Python interpreter sometimes rejects strings that large (though |
| 3015 | # it isn't clear to me exactly what circumstances cause this). |
| 3016 | # `zipfile.write()` must be used directly to work around this. |
| 3017 | # |
| 3018 | # This mess can be avoided if we port to python3. |
| 3019 | saved_zip64_limit = zipfile.ZIP64_LIMIT |
| 3020 | zipfile.ZIP64_LIMIT = (1 << 32) - 1 |
| 3021 | |
Dan Albert | 8e0178d | 2015-01-27 15:53:15 -0800 | [diff] [blame] | 3022 | if compress_type is None: |
| 3023 | compress_type = zip_file.compression |
| 3024 | if arcname is None: |
| 3025 | arcname = filename |
| 3026 | |
| 3027 | saved_stat = os.stat(filename) |
| 3028 | |
| 3029 | try: |
| 3030 | # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the |
| 3031 | # file to be zipped and reset it when we're done. |
| 3032 | os.chmod(filename, perms) |
| 3033 | |
| 3034 | # Use a fixed timestamp so the output is repeatable. |
Bryan Henry | e6d547d | 2018-07-31 18:32:00 -0700 | [diff] [blame] | 3035 | # Note: Use of fromtimestamp rather than utcfromtimestamp here is |
| 3036 | # intentional. zip stores datetimes in local time without a time zone |
| 3037 | # attached, so we need "epoch" but in the local time zone to get 2009/01/01 |
| 3038 | # in the zip archive. |
| 3039 | local_epoch = datetime.datetime.fromtimestamp(0) |
| 3040 | timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds() |
Dan Albert | 8e0178d | 2015-01-27 15:53:15 -0800 | [diff] [blame] | 3041 | os.utime(filename, (timestamp, timestamp)) |
| 3042 | |
| 3043 | zip_file.write(filename, arcname=arcname, compress_type=compress_type) |
| 3044 | finally: |
| 3045 | os.chmod(filename, saved_stat.st_mode) |
| 3046 | os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime)) |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 3047 | zipfile.ZIP64_LIMIT = saved_zip64_limit |
Dan Albert | 8e0178d | 2015-01-27 15:53:15 -0800 | [diff] [blame] | 3048 | |
| 3049 | |
Tao Bao | 58c1b96 | 2015-05-20 09:32:18 -0700 | [diff] [blame] | 3050 | def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None, |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3051 | compress_type=None): |
| 3052 | """Wrap zipfile.writestr() function to work around the zip64 limit. |
| 3053 | |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 3054 | Even with the ZIP64_LIMIT workaround, it won't allow writing a string |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3055 | longer than 2GiB. It gives 'OverflowError: size does not fit in an int' |
| 3056 | when calling crc32(bytes). |
| 3057 | |
| 3058 | But it still works fine to write a shorter string into a large zip file. |
| 3059 | We should use ZipWrite() whenever possible, and only use ZipWriteStr() |
| 3060 | when we know the string won't be too long. |
| 3061 | """ |
| 3062 | |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 3063 | saved_zip64_limit = zipfile.ZIP64_LIMIT |
| 3064 | zipfile.ZIP64_LIMIT = (1 << 32) - 1 |
| 3065 | |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3066 | if not isinstance(zinfo_or_arcname, zipfile.ZipInfo): |
| 3067 | zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname) |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 3068 | zinfo.compress_type = zip_file.compression |
Tao Bao | 58c1b96 | 2015-05-20 09:32:18 -0700 | [diff] [blame] | 3069 | if perms is None: |
Tao Bao | 2a41058 | 2015-07-10 17:18:23 -0700 | [diff] [blame] | 3070 | perms = 0o100644 |
Geremy Condra | 36bd365 | 2014-02-06 19:45:10 -0800 | [diff] [blame] | 3071 | else: |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3072 | zinfo = zinfo_or_arcname |
Tao Bao | c1a1ec3 | 2019-06-18 16:29:37 -0700 | [diff] [blame] | 3073 | # Python 2 and 3 behave differently when calling ZipFile.writestr() with |
| 3074 | # zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for |
| 3075 | # such a case (since |
| 3076 | # https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9), |
| 3077 | # which seems to make more sense. Otherwise the entry will have 0o000 as the |
| 3078 | # permission bits. We follow the logic in Python 3 to get consistent |
| 3079 | # behavior between using the two versions. |
| 3080 | if not zinfo.external_attr: |
| 3081 | zinfo.external_attr = 0o600 << 16 |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3082 | |
| 3083 | # If compress_type is given, it overrides the value in zinfo. |
| 3084 | if compress_type is not None: |
| 3085 | zinfo.compress_type = compress_type |
| 3086 | |
Tao Bao | 58c1b96 | 2015-05-20 09:32:18 -0700 | [diff] [blame] | 3087 | # If perms is given, it has a priority. |
| 3088 | if perms is not None: |
Tao Bao | 2a41058 | 2015-07-10 17:18:23 -0700 | [diff] [blame] | 3089 | # If perms doesn't set the file type, mark it as a regular file. |
| 3090 | if perms & 0o770000 == 0: |
| 3091 | perms |= 0o100000 |
Tao Bao | 58c1b96 | 2015-05-20 09:32:18 -0700 | [diff] [blame] | 3092 | zinfo.external_attr = perms << 16 |
| 3093 | |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3094 | # Use a fixed timestamp so the output is repeatable. |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3095 | zinfo.date_time = (2009, 1, 1, 0, 0, 0) |
| 3096 | |
Dan Albert | 8b72aef | 2015-03-23 19:13:21 -0700 | [diff] [blame] | 3097 | zip_file.writestr(zinfo, data) |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 3098 | zipfile.ZIP64_LIMIT = saved_zip64_limit |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3099 | |
| 3100 | |
Kelvin Zhang | 1caead0 | 2022-09-23 10:06:03 -0700 | [diff] [blame] | 3101 | def ZipDelete(zip_filename, entries, force=False): |
Tao Bao | 89d7ab2 | 2017-12-14 17:05:33 -0800 | [diff] [blame] | 3102 | """Deletes entries from a ZIP file. |
| 3103 | |
Tao Bao | 89d7ab2 | 2017-12-14 17:05:33 -0800 | [diff] [blame] | 3104 | Args: |
| 3105 | zip_filename: The name of the ZIP file. |
| 3106 | entries: The name of the entry, or the list of names to be deleted. |
Tao Bao | 89d7ab2 | 2017-12-14 17:05:33 -0800 | [diff] [blame] | 3107 | """ |
Tao Bao | c1a1ec3 | 2019-06-18 16:29:37 -0700 | [diff] [blame] | 3108 | if isinstance(entries, str): |
Tao Bao | 89d7ab2 | 2017-12-14 17:05:33 -0800 | [diff] [blame] | 3109 | entries = [entries] |
Kelvin Zhang | 7087614 | 2022-02-09 16:05:29 -0800 | [diff] [blame] | 3110 | # If list is empty, nothing to do |
| 3111 | if not entries: |
| 3112 | return |
Wei Li | 8895f9e | 2022-10-10 17:13:17 -0700 | [diff] [blame] | 3113 | |
| 3114 | with zipfile.ZipFile(zip_filename, 'r') as zin: |
| 3115 | if not force and len(set(zin.namelist()).intersection(entries)) == 0: |
| 3116 | raise ExternalError( |
| 3117 | "Failed to delete zip entries, name not matched: %s" % entries) |
| 3118 | |
| 3119 | fd, new_zipfile = tempfile.mkstemp(dir=os.path.dirname(zip_filename)) |
| 3120 | os.close(fd) |
Kelvin Zhang | c8ff84b | 2023-02-15 16:52:46 -0800 | [diff] [blame] | 3121 | cmd = ["zip2zip", "-i", zip_filename, "-o", new_zipfile] |
| 3122 | for entry in entries: |
| 3123 | cmd.append("-x") |
| 3124 | cmd.append(entry) |
| 3125 | RunAndCheckOutput(cmd) |
Wei Li | 8895f9e | 2022-10-10 17:13:17 -0700 | [diff] [blame] | 3126 | |
Wei Li | 8895f9e | 2022-10-10 17:13:17 -0700 | [diff] [blame] | 3127 | os.replace(new_zipfile, zip_filename) |
Tao Bao | 89d7ab2 | 2017-12-14 17:05:33 -0800 | [diff] [blame] | 3128 | |
| 3129 | |
Kelvin Zhang | f92f7f0 | 2023-04-14 21:32:54 +0000 | [diff] [blame] | 3130 | def ZipClose(zip_file): |
| 3131 | # http://b/18015246 |
| 3132 | # zipfile also refers to ZIP64_LIMIT during close() when it writes out the |
| 3133 | # central directory. |
| 3134 | saved_zip64_limit = zipfile.ZIP64_LIMIT |
| 3135 | zipfile.ZIP64_LIMIT = (1 << 32) - 1 |
| 3136 | |
| 3137 | zip_file.close() |
| 3138 | |
| 3139 | zipfile.ZIP64_LIMIT = saved_zip64_limit |
| 3140 | |
| 3141 | |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 3142 | class DeviceSpecificParams(object): |
| 3143 | module = None |
| 3144 | |
| 3145 | def __init__(self, **kwargs): |
| 3146 | """Keyword arguments to the constructor become attributes of this |
| 3147 | object, which is passed to all functions in the device-specific |
| 3148 | module.""" |
| 3149 | for k, v in kwargs.items(): |
| 3150 | setattr(self, k, v) |
| 3151 | self.extras = OPTIONS.extras |
| 3152 | |
| 3153 | if self.module is None: |
| 3154 | path = OPTIONS.device_specific |
| 3155 | if not path: |
| 3156 | return |
| 3157 | try: |
| 3158 | if os.path.isdir(path): |
| 3159 | info = imp.find_module("releasetools", [path]) |
| 3160 | else: |
| 3161 | d, f = os.path.split(path) |
| 3162 | b, x = os.path.splitext(f) |
| 3163 | if x == ".py": |
| 3164 | f = b |
| 3165 | info = imp.find_module(f, [d]) |
| 3166 | logger.info("loaded device-specific extensions from %s", path) |
| 3167 | self.module = imp.load_module("device_specific", *info) |
| 3168 | except ImportError: |
| 3169 | logger.info("unable to load device-specific module; assuming none") |
| 3170 | |
| 3171 | def _DoCall(self, function_name, *args, **kwargs): |
| 3172 | """Call the named function in the device-specific module, passing |
| 3173 | the given args and kwargs. The first argument to the call will be |
| 3174 | the DeviceSpecific object itself. If there is no module, or the |
| 3175 | module does not define the function, return the value of the |
| 3176 | 'default' kwarg (which itself defaults to None).""" |
| 3177 | if self.module is None or not hasattr(self.module, function_name): |
| 3178 | return kwargs.get("default") |
| 3179 | return getattr(self.module, function_name)(*((self,) + args), **kwargs) |
| 3180 | |
| 3181 | def FullOTA_Assertions(self): |
| 3182 | """Called after emitting the block of assertions at the top of a |
| 3183 | full OTA package. Implementations can add whatever additional |
| 3184 | assertions they like.""" |
| 3185 | return self._DoCall("FullOTA_Assertions") |
| 3186 | |
| 3187 | def FullOTA_InstallBegin(self): |
| 3188 | """Called at the start of full OTA installation.""" |
| 3189 | return self._DoCall("FullOTA_InstallBegin") |
| 3190 | |
| 3191 | def FullOTA_GetBlockDifferences(self): |
| 3192 | """Called during full OTA installation and verification. |
| 3193 | Implementation should return a list of BlockDifference objects describing |
| 3194 | the update on each additional partitions. |
| 3195 | """ |
| 3196 | return self._DoCall("FullOTA_GetBlockDifferences") |
| 3197 | |
| 3198 | def FullOTA_InstallEnd(self): |
| 3199 | """Called at the end of full OTA installation; typically this is |
| 3200 | used to install the image for the device's baseband processor.""" |
| 3201 | return self._DoCall("FullOTA_InstallEnd") |
| 3202 | |
| 3203 | def IncrementalOTA_Assertions(self): |
| 3204 | """Called after emitting the block of assertions at the top of an |
| 3205 | incremental OTA package. Implementations can add whatever |
| 3206 | additional assertions they like.""" |
| 3207 | return self._DoCall("IncrementalOTA_Assertions") |
| 3208 | |
| 3209 | def IncrementalOTA_VerifyBegin(self): |
| 3210 | """Called at the start of the verification phase of incremental |
| 3211 | OTA installation; additional checks can be placed here to abort |
| 3212 | the script before any changes are made.""" |
| 3213 | return self._DoCall("IncrementalOTA_VerifyBegin") |
| 3214 | |
| 3215 | def IncrementalOTA_VerifyEnd(self): |
| 3216 | """Called at the end of the verification phase of incremental OTA |
| 3217 | installation; additional checks can be placed here to abort the |
| 3218 | script before any changes are made.""" |
| 3219 | return self._DoCall("IncrementalOTA_VerifyEnd") |
| 3220 | |
| 3221 | def IncrementalOTA_InstallBegin(self): |
| 3222 | """Called at the start of incremental OTA installation (after |
| 3223 | verification is complete).""" |
| 3224 | return self._DoCall("IncrementalOTA_InstallBegin") |
| 3225 | |
| 3226 | def IncrementalOTA_GetBlockDifferences(self): |
| 3227 | """Called during incremental OTA installation and verification. |
| 3228 | Implementation should return a list of BlockDifference objects describing |
| 3229 | the update on each additional partitions. |
| 3230 | """ |
| 3231 | return self._DoCall("IncrementalOTA_GetBlockDifferences") |
| 3232 | |
| 3233 | def IncrementalOTA_InstallEnd(self): |
| 3234 | """Called at the end of incremental OTA installation; typically |
| 3235 | this is used to install the image for the device's baseband |
| 3236 | processor.""" |
| 3237 | return self._DoCall("IncrementalOTA_InstallEnd") |
| 3238 | |
| 3239 | def VerifyOTA_Assertions(self): |
| 3240 | return self._DoCall("VerifyOTA_Assertions") |
| 3241 | |
| 3242 | |
Doug Zongker | ea5d7a9 | 2010-09-12 15:26:16 -0700 | [diff] [blame] | 3243 | class File(object): |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 3244 | def __init__(self, name, data, compress_size=None): |
Doug Zongker | ea5d7a9 | 2010-09-12 15:26:16 -0700 | [diff] [blame] | 3245 | self.name = name |
| 3246 | self.data = data |
| 3247 | self.size = len(data) |
YOUNG HO CHA | ccc5c40 | 2016-10-13 13:40:46 +0900 | [diff] [blame] | 3248 | self.compress_size = compress_size or self.size |
Doug Zongker | 55d9328 | 2011-01-25 17:03:34 -0800 | [diff] [blame] | 3249 | self.sha1 = sha1(data).hexdigest() |
| 3250 | |
| 3251 | @classmethod |
| 3252 | def FromLocalFile(cls, name, diskname): |
| 3253 | f = open(diskname, "rb") |
| 3254 | data = f.read() |
| 3255 | f.close() |
| 3256 | return File(name, data) |
Doug Zongker | ea5d7a9 | 2010-09-12 15:26:16 -0700 | [diff] [blame] | 3257 | |
| 3258 | def WriteToTemp(self): |
| 3259 | t = tempfile.NamedTemporaryFile() |
| 3260 | t.write(self.data) |
| 3261 | t.flush() |
| 3262 | return t |
| 3263 | |
Dan Willemsen | 2ee00d5 | 2017-03-05 19:51:56 -0800 | [diff] [blame] | 3264 | def WriteToDir(self, d): |
| 3265 | with open(os.path.join(d, self.name), "wb") as fp: |
| 3266 | fp.write(self.data) |
| 3267 | |
Geremy Condra | 36bd365 | 2014-02-06 19:45:10 -0800 | [diff] [blame] | 3268 | def AddToZip(self, z, compression=None): |
Tao Bao | f3282b4 | 2015-04-01 11:21:55 -0700 | [diff] [blame] | 3269 | ZipWriteStr(z, self.name, self.data, compress_type=compression) |
Doug Zongker | ea5d7a9 | 2010-09-12 15:26:16 -0700 | [diff] [blame] | 3270 | |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 3271 | |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 3272 | DIFF_PROGRAM_BY_EXT = { |
| 3273 | ".gz": "imgdiff", |
| 3274 | ".zip": ["imgdiff", "-z"], |
| 3275 | ".jar": ["imgdiff", "-z"], |
| 3276 | ".apk": ["imgdiff", "-z"], |
| 3277 | ".img": "imgdiff", |
| 3278 | } |
| 3279 | |
| 3280 | |
| 3281 | class Difference(object): |
| 3282 | def __init__(self, tf, sf, diff_program=None): |
| 3283 | self.tf = tf |
| 3284 | self.sf = sf |
| 3285 | self.patch = None |
| 3286 | self.diff_program = diff_program |
| 3287 | |
| 3288 | def ComputePatch(self): |
| 3289 | """Compute the patch (as a string of data) needed to turn sf into |
| 3290 | tf. Returns the same tuple as GetPatch().""" |
| 3291 | |
| 3292 | tf = self.tf |
| 3293 | sf = self.sf |
| 3294 | |
| 3295 | if self.diff_program: |
| 3296 | diff_program = self.diff_program |
| 3297 | else: |
| 3298 | ext = os.path.splitext(tf.name)[1] |
| 3299 | diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff") |
| 3300 | |
| 3301 | ttemp = tf.WriteToTemp() |
| 3302 | stemp = sf.WriteToTemp() |
| 3303 | |
| 3304 | ext = os.path.splitext(tf.name)[1] |
| 3305 | |
| 3306 | try: |
| 3307 | ptemp = tempfile.NamedTemporaryFile() |
| 3308 | if isinstance(diff_program, list): |
| 3309 | cmd = copy.copy(diff_program) |
| 3310 | else: |
| 3311 | cmd = [diff_program] |
| 3312 | cmd.append(stemp.name) |
| 3313 | cmd.append(ttemp.name) |
| 3314 | cmd.append(ptemp.name) |
| 3315 | p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) |
| 3316 | err = [] |
| 3317 | |
| 3318 | def run(): |
| 3319 | _, e = p.communicate() |
| 3320 | if e: |
| 3321 | err.append(e) |
| 3322 | th = threading.Thread(target=run) |
| 3323 | th.start() |
| 3324 | th.join(timeout=300) # 5 mins |
| 3325 | if th.is_alive(): |
| 3326 | logger.warning("diff command timed out") |
| 3327 | p.terminate() |
| 3328 | th.join(5) |
| 3329 | if th.is_alive(): |
| 3330 | p.kill() |
| 3331 | th.join() |
| 3332 | |
| 3333 | if p.returncode != 0: |
| 3334 | logger.warning("Failure running %s:\n%s\n", cmd, "".join(err)) |
| 3335 | self.patch = None |
| 3336 | return None, None, None |
| 3337 | diff = ptemp.read() |
| 3338 | finally: |
| 3339 | ptemp.close() |
| 3340 | stemp.close() |
| 3341 | ttemp.close() |
| 3342 | |
| 3343 | self.patch = diff |
| 3344 | return self.tf, self.sf, self.patch |
| 3345 | |
| 3346 | def GetPatch(self): |
| 3347 | """Returns a tuple of (target_file, source_file, patch_data). |
| 3348 | |
| 3349 | patch_data may be None if ComputePatch hasn't been called, or if |
| 3350 | computing the patch failed. |
| 3351 | """ |
| 3352 | return self.tf, self.sf, self.patch |
| 3353 | |
| 3354 | |
| 3355 | def ComputeDifferences(diffs): |
| 3356 | """Call ComputePatch on all the Difference objects in 'diffs'.""" |
| 3357 | logger.info("%d diffs to compute", len(diffs)) |
| 3358 | |
| 3359 | # Do the largest files first, to try and reduce the long-pole effect. |
| 3360 | by_size = [(i.tf.size, i) for i in diffs] |
| 3361 | by_size.sort(reverse=True) |
| 3362 | by_size = [i[1] for i in by_size] |
| 3363 | |
| 3364 | lock = threading.Lock() |
| 3365 | diff_iter = iter(by_size) # accessed under lock |
| 3366 | |
| 3367 | def worker(): |
| 3368 | try: |
| 3369 | lock.acquire() |
| 3370 | for d in diff_iter: |
| 3371 | lock.release() |
| 3372 | start = time.time() |
| 3373 | d.ComputePatch() |
| 3374 | dur = time.time() - start |
| 3375 | lock.acquire() |
| 3376 | |
| 3377 | tf, sf, patch = d.GetPatch() |
| 3378 | if sf.name == tf.name: |
| 3379 | name = tf.name |
| 3380 | else: |
| 3381 | name = "%s (%s)" % (tf.name, sf.name) |
| 3382 | if patch is None: |
| 3383 | logger.error("patching failed! %40s", name) |
| 3384 | else: |
| 3385 | logger.info( |
| 3386 | "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch), |
| 3387 | tf.size, 100.0 * len(patch) / tf.size, name) |
| 3388 | lock.release() |
| 3389 | except Exception: |
| 3390 | logger.exception("Failed to compute diff from worker") |
| 3391 | raise |
| 3392 | |
| 3393 | # start worker threads; wait for them all to finish. |
| 3394 | threads = [threading.Thread(target=worker) |
| 3395 | for i in range(OPTIONS.worker_threads)] |
| 3396 | for th in threads: |
| 3397 | th.start() |
| 3398 | while threads: |
| 3399 | threads.pop().join() |
| 3400 | |
| 3401 | |
| 3402 | class BlockDifference(object): |
| 3403 | def __init__(self, partition, tgt, src=None, check_first_block=False, |
| 3404 | version=None, disable_imgdiff=False): |
| 3405 | self.tgt = tgt |
| 3406 | self.src = src |
| 3407 | self.partition = partition |
| 3408 | self.check_first_block = check_first_block |
| 3409 | self.disable_imgdiff = disable_imgdiff |
| 3410 | |
| 3411 | if version is None: |
| 3412 | version = max( |
| 3413 | int(i) for i in |
| 3414 | OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) |
| 3415 | assert version >= 3 |
| 3416 | self.version = version |
| 3417 | |
| 3418 | b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads, |
| 3419 | version=self.version, |
| 3420 | disable_imgdiff=self.disable_imgdiff) |
| 3421 | self.path = os.path.join(MakeTempDir(), partition) |
| 3422 | b.Compute(self.path) |
| 3423 | self._required_cache = b.max_stashed_size |
| 3424 | self.touched_src_ranges = b.touched_src_ranges |
| 3425 | self.touched_src_sha1 = b.touched_src_sha1 |
| 3426 | |
| 3427 | # On devices with dynamic partitions, for new partitions, |
| 3428 | # src is None but OPTIONS.source_info_dict is not. |
| 3429 | if OPTIONS.source_info_dict is None: |
| 3430 | is_dynamic_build = OPTIONS.info_dict.get( |
| 3431 | "use_dynamic_partitions") == "true" |
| 3432 | is_dynamic_source = False |
| 3433 | else: |
| 3434 | is_dynamic_build = OPTIONS.source_info_dict.get( |
| 3435 | "use_dynamic_partitions") == "true" |
| 3436 | is_dynamic_source = partition in shlex.split( |
| 3437 | OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip()) |
| 3438 | |
| 3439 | is_dynamic_target = partition in shlex.split( |
| 3440 | OPTIONS.info_dict.get("dynamic_partition_list", "").strip()) |
| 3441 | |
| 3442 | # For dynamic partitions builds, check partition list in both source |
| 3443 | # and target build because new partitions may be added, and existing |
| 3444 | # partitions may be removed. |
| 3445 | is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target) |
| 3446 | |
| 3447 | if is_dynamic: |
| 3448 | self.device = 'map_partition("%s")' % partition |
| 3449 | else: |
| 3450 | if OPTIONS.source_info_dict is None: |
| 3451 | _, device_expr = GetTypeAndDeviceExpr("/" + partition, |
| 3452 | OPTIONS.info_dict) |
| 3453 | else: |
| 3454 | _, device_expr = GetTypeAndDeviceExpr("/" + partition, |
| 3455 | OPTIONS.source_info_dict) |
| 3456 | self.device = device_expr |
| 3457 | |
| 3458 | @property |
| 3459 | def required_cache(self): |
| 3460 | return self._required_cache |
| 3461 | |
| 3462 | def WriteScript(self, script, output_zip, progress=None, |
| 3463 | write_verify_script=False): |
| 3464 | if not self.src: |
| 3465 | # write the output unconditionally |
| 3466 | script.Print("Patching %s image unconditionally..." % (self.partition,)) |
| 3467 | else: |
| 3468 | script.Print("Patching %s image after verification." % (self.partition,)) |
| 3469 | |
| 3470 | if progress: |
| 3471 | script.ShowProgress(progress, 0) |
| 3472 | self._WriteUpdate(script, output_zip) |
| 3473 | |
| 3474 | if write_verify_script: |
| 3475 | self.WritePostInstallVerifyScript(script) |
| 3476 | |
| 3477 | def WriteStrictVerifyScript(self, script): |
| 3478 | """Verify all the blocks in the care_map, including clobbered blocks. |
| 3479 | |
| 3480 | This differs from the WriteVerifyScript() function: a) it prints different |
| 3481 | error messages; b) it doesn't allow half-way updated images to pass the |
| 3482 | verification.""" |
| 3483 | |
| 3484 | partition = self.partition |
| 3485 | script.Print("Verifying %s..." % (partition,)) |
| 3486 | ranges = self.tgt.care_map |
| 3487 | ranges_str = ranges.to_string_raw() |
| 3488 | script.AppendExtra( |
| 3489 | 'range_sha1(%s, "%s") == "%s" && ui_print(" Verified.") || ' |
| 3490 | 'ui_print("%s has unexpected contents.");' % ( |
| 3491 | self.device, ranges_str, |
| 3492 | self.tgt.TotalSha1(include_clobbered_blocks=True), |
| 3493 | self.partition)) |
| 3494 | script.AppendExtra("") |
| 3495 | |
| 3496 | def WriteVerifyScript(self, script, touched_blocks_only=False): |
| 3497 | partition = self.partition |
| 3498 | |
| 3499 | # full OTA |
| 3500 | if not self.src: |
| 3501 | script.Print("Image %s will be patched unconditionally." % (partition,)) |
| 3502 | |
| 3503 | # incremental OTA |
| 3504 | else: |
| 3505 | if touched_blocks_only: |
| 3506 | ranges = self.touched_src_ranges |
| 3507 | expected_sha1 = self.touched_src_sha1 |
| 3508 | else: |
| 3509 | ranges = self.src.care_map.subtract(self.src.clobbered_blocks) |
| 3510 | expected_sha1 = self.src.TotalSha1() |
| 3511 | |
| 3512 | # No blocks to be checked, skipping. |
| 3513 | if not ranges: |
| 3514 | return |
| 3515 | |
| 3516 | ranges_str = ranges.to_string_raw() |
| 3517 | script.AppendExtra( |
| 3518 | 'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, ' |
| 3519 | 'package_extract_file("%s.transfer.list"), "%s.new.dat", ' |
| 3520 | '"%s.patch.dat")) then' % ( |
| 3521 | self.device, ranges_str, expected_sha1, |
| 3522 | self.device, partition, partition, partition)) |
| 3523 | script.Print('Verified %s image...' % (partition,)) |
| 3524 | script.AppendExtra('else') |
| 3525 | |
| 3526 | if self.version >= 4: |
| 3527 | |
| 3528 | # Bug: 21124327 |
| 3529 | # When generating incrementals for the system and vendor partitions in |
| 3530 | # version 4 or newer, explicitly check the first block (which contains |
| 3531 | # the superblock) of the partition to see if it's what we expect. If |
| 3532 | # this check fails, give an explicit log message about the partition |
| 3533 | # having been remounted R/W (the most likely explanation). |
| 3534 | if self.check_first_block: |
| 3535 | script.AppendExtra('check_first_block(%s);' % (self.device,)) |
| 3536 | |
| 3537 | # If version >= 4, try block recovery before abort update |
| 3538 | if partition == "system": |
| 3539 | code = ErrorCode.SYSTEM_RECOVER_FAILURE |
| 3540 | else: |
| 3541 | code = ErrorCode.VENDOR_RECOVER_FAILURE |
| 3542 | script.AppendExtra(( |
| 3543 | 'ifelse (block_image_recover({device}, "{ranges}") && ' |
| 3544 | 'block_image_verify({device}, ' |
| 3545 | 'package_extract_file("{partition}.transfer.list"), ' |
| 3546 | '"{partition}.new.dat", "{partition}.patch.dat"), ' |
| 3547 | 'ui_print("{partition} recovered successfully."), ' |
| 3548 | 'abort("E{code}: {partition} partition fails to recover"));\n' |
| 3549 | 'endif;').format(device=self.device, ranges=ranges_str, |
| 3550 | partition=partition, code=code)) |
| 3551 | |
| 3552 | # Abort the OTA update. Note that the incremental OTA cannot be applied |
| 3553 | # even if it may match the checksum of the target partition. |
| 3554 | # a) If version < 3, operations like move and erase will make changes |
| 3555 | # unconditionally and damage the partition. |
| 3556 | # b) If version >= 3, it won't even reach here. |
| 3557 | else: |
| 3558 | if partition == "system": |
| 3559 | code = ErrorCode.SYSTEM_VERIFICATION_FAILURE |
| 3560 | else: |
| 3561 | code = ErrorCode.VENDOR_VERIFICATION_FAILURE |
| 3562 | script.AppendExtra(( |
| 3563 | 'abort("E%d: %s partition has unexpected contents");\n' |
| 3564 | 'endif;') % (code, partition)) |
| 3565 | |
| 3566 | def WritePostInstallVerifyScript(self, script): |
| 3567 | partition = self.partition |
| 3568 | script.Print('Verifying the updated %s image...' % (partition,)) |
| 3569 | # Unlike pre-install verification, clobbered_blocks should not be ignored. |
| 3570 | ranges = self.tgt.care_map |
| 3571 | ranges_str = ranges.to_string_raw() |
| 3572 | script.AppendExtra( |
| 3573 | 'if range_sha1(%s, "%s") == "%s" then' % ( |
| 3574 | self.device, ranges_str, |
| 3575 | self.tgt.TotalSha1(include_clobbered_blocks=True))) |
| 3576 | |
| 3577 | # Bug: 20881595 |
| 3578 | # Verify that extended blocks are really zeroed out. |
| 3579 | if self.tgt.extended: |
| 3580 | ranges_str = self.tgt.extended.to_string_raw() |
| 3581 | script.AppendExtra( |
| 3582 | 'if range_sha1(%s, "%s") == "%s" then' % ( |
| 3583 | self.device, ranges_str, |
| 3584 | self._HashZeroBlocks(self.tgt.extended.size()))) |
| 3585 | script.Print('Verified the updated %s image.' % (partition,)) |
| 3586 | if partition == "system": |
| 3587 | code = ErrorCode.SYSTEM_NONZERO_CONTENTS |
| 3588 | else: |
| 3589 | code = ErrorCode.VENDOR_NONZERO_CONTENTS |
| 3590 | script.AppendExtra( |
| 3591 | 'else\n' |
| 3592 | ' abort("E%d: %s partition has unexpected non-zero contents after ' |
| 3593 | 'OTA update");\n' |
| 3594 | 'endif;' % (code, partition)) |
| 3595 | else: |
| 3596 | script.Print('Verified the updated %s image.' % (partition,)) |
| 3597 | |
| 3598 | if partition == "system": |
| 3599 | code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS |
| 3600 | else: |
| 3601 | code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS |
| 3602 | |
| 3603 | script.AppendExtra( |
| 3604 | 'else\n' |
| 3605 | ' abort("E%d: %s partition has unexpected contents after OTA ' |
| 3606 | 'update");\n' |
| 3607 | 'endif;' % (code, partition)) |
| 3608 | |
| 3609 | def _WriteUpdate(self, script, output_zip): |
| 3610 | ZipWrite(output_zip, |
| 3611 | '{}.transfer.list'.format(self.path), |
| 3612 | '{}.transfer.list'.format(self.partition)) |
| 3613 | |
| 3614 | # For full OTA, compress the new.dat with brotli with quality 6 to reduce |
| 3615 | # its size. Quailty 9 almost triples the compression time but doesn't |
| 3616 | # further reduce the size too much. For a typical 1.8G system.new.dat |
| 3617 | # zip | brotli(quality 6) | brotli(quality 9) |
| 3618 | # compressed_size: 942M | 869M (~8% reduced) | 854M |
| 3619 | # compression_time: 75s | 265s | 719s |
| 3620 | # decompression_time: 15s | 25s | 25s |
| 3621 | |
| 3622 | if not self.src: |
| 3623 | brotli_cmd = ['brotli', '--quality=6', |
| 3624 | '--output={}.new.dat.br'.format(self.path), |
| 3625 | '{}.new.dat'.format(self.path)] |
| 3626 | print("Compressing {}.new.dat with brotli".format(self.partition)) |
| 3627 | RunAndCheckOutput(brotli_cmd) |
| 3628 | |
| 3629 | new_data_name = '{}.new.dat.br'.format(self.partition) |
| 3630 | ZipWrite(output_zip, |
| 3631 | '{}.new.dat.br'.format(self.path), |
| 3632 | new_data_name, |
| 3633 | compress_type=zipfile.ZIP_STORED) |
| 3634 | else: |
| 3635 | new_data_name = '{}.new.dat'.format(self.partition) |
| 3636 | ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name) |
| 3637 | |
| 3638 | ZipWrite(output_zip, |
| 3639 | '{}.patch.dat'.format(self.path), |
| 3640 | '{}.patch.dat'.format(self.partition), |
| 3641 | compress_type=zipfile.ZIP_STORED) |
| 3642 | |
| 3643 | if self.partition == "system": |
| 3644 | code = ErrorCode.SYSTEM_UPDATE_FAILURE |
| 3645 | else: |
| 3646 | code = ErrorCode.VENDOR_UPDATE_FAILURE |
| 3647 | |
| 3648 | call = ('block_image_update({device}, ' |
| 3649 | 'package_extract_file("{partition}.transfer.list"), ' |
| 3650 | '"{new_data_name}", "{partition}.patch.dat") ||\n' |
| 3651 | ' abort("E{code}: Failed to update {partition} image.");'.format( |
| 3652 | device=self.device, partition=self.partition, |
| 3653 | new_data_name=new_data_name, code=code)) |
| 3654 | script.AppendExtra(script.WordWrap(call)) |
| 3655 | |
| 3656 | def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use |
| 3657 | data = source.ReadRangeSet(ranges) |
| 3658 | ctx = sha1() |
| 3659 | |
| 3660 | for p in data: |
| 3661 | ctx.update(p) |
| 3662 | |
| 3663 | return ctx.hexdigest() |
| 3664 | |
| 3665 | def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use |
| 3666 | """Return the hash value for all zero blocks.""" |
| 3667 | zero_block = '\x00' * 4096 |
| 3668 | ctx = sha1() |
| 3669 | for _ in range(num_blocks): |
| 3670 | ctx.update(zero_block) |
| 3671 | |
| 3672 | return ctx.hexdigest() |
| 3673 | |
| 3674 | |
Tianjie Xu | 41976c7 | 2019-07-03 13:57:01 -0700 | [diff] [blame] | 3675 | # Expose these two classes to support vendor-specific scripts |
| 3676 | DataImage = images.DataImage |
| 3677 | EmptyImage = images.EmptyImage |
| 3678 | |
Tao Bao | 76def24 | 2017-11-21 09:25:31 -0800 | [diff] [blame] | 3679 | |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 3680 | # map recovery.fstab's fs_types to mount/format "partition types" |
| 3681 | PARTITION_TYPES = { |
| 3682 | "ext4": "EMMC", |
| 3683 | "emmc": "EMMC", |
| 3684 | "f2fs": "EMMC", |
| 3685 | "squashfs": "EMMC", |
| 3686 | "erofs": "EMMC" |
| 3687 | } |
| 3688 | |
| 3689 | |
| 3690 | def GetTypeAndDevice(mount_point, info, check_no_slot=True): |
| 3691 | """ |
| 3692 | Use GetTypeAndDeviceExpr whenever possible. This function is kept for |
| 3693 | backwards compatibility. It aborts if the fstab entry has slotselect option |
| 3694 | (unless check_no_slot is explicitly set to False). |
| 3695 | """ |
| 3696 | fstab = info["fstab"] |
| 3697 | if fstab: |
| 3698 | if check_no_slot: |
| 3699 | assert not fstab[mount_point].slotselect, \ |
| 3700 | "Use GetTypeAndDeviceExpr instead" |
| 3701 | return (PARTITION_TYPES[fstab[mount_point].fs_type], |
| 3702 | fstab[mount_point].device) |
| 3703 | raise KeyError |
| 3704 | |
| 3705 | |
| 3706 | def GetTypeAndDeviceExpr(mount_point, info): |
| 3707 | """ |
| 3708 | Return the filesystem of the partition, and an edify expression that evaluates |
| 3709 | to the device at runtime. |
| 3710 | """ |
| 3711 | fstab = info["fstab"] |
| 3712 | if fstab: |
| 3713 | p = fstab[mount_point] |
| 3714 | device_expr = '"%s"' % fstab[mount_point].device |
| 3715 | if p.slotselect: |
| 3716 | device_expr = 'add_slot_suffix(%s)' % device_expr |
| 3717 | return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr) |
| 3718 | raise KeyError |
| 3719 | |
Yifan Hong | bdb3201 | 2020-05-07 12:38:53 -0700 | [diff] [blame] | 3720 | |
| 3721 | def GetEntryForDevice(fstab, device): |
| 3722 | """ |
| 3723 | Returns: |
| 3724 | The first entry in fstab whose device is the given value. |
| 3725 | """ |
| 3726 | if not fstab: |
| 3727 | return None |
| 3728 | for mount_point in fstab: |
| 3729 | if fstab[mount_point].device == device: |
| 3730 | return fstab[mount_point] |
| 3731 | return None |
| 3732 | |
Kelvin Zhang | 0876c41 | 2020-06-23 15:06:58 -0400 | [diff] [blame] | 3733 | |
Baligh Uddin | beb6afd | 2013-11-13 00:22:34 +0000 | [diff] [blame] | 3734 | def ParseCertificate(data): |
Tao Bao | 17e4e61 | 2018-02-16 17:12:54 -0800 | [diff] [blame] | 3735 | """Parses and converts a PEM-encoded certificate into DER-encoded. |
| 3736 | |
| 3737 | This gives the same result as `openssl x509 -in <filename> -outform DER`. |
| 3738 | |
| 3739 | Returns: |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 3740 | The decoded certificate bytes. |
Tao Bao | 17e4e61 | 2018-02-16 17:12:54 -0800 | [diff] [blame] | 3741 | """ |
| 3742 | cert_buffer = [] |
Baligh Uddin | beb6afd | 2013-11-13 00:22:34 +0000 | [diff] [blame] | 3743 | save = False |
| 3744 | for line in data.split("\n"): |
| 3745 | if "--END CERTIFICATE--" in line: |
| 3746 | break |
| 3747 | if save: |
Tao Bao | 17e4e61 | 2018-02-16 17:12:54 -0800 | [diff] [blame] | 3748 | cert_buffer.append(line) |
Baligh Uddin | beb6afd | 2013-11-13 00:22:34 +0000 | [diff] [blame] | 3749 | if "--BEGIN CERTIFICATE--" in line: |
| 3750 | save = True |
Tao Bao | da30cfa | 2017-12-01 16:19:46 -0800 | [diff] [blame] | 3751 | cert = base64.b64decode("".join(cert_buffer)) |
Baligh Uddin | beb6afd | 2013-11-13 00:22:34 +0000 | [diff] [blame] | 3752 | return cert |
Doug Zongker | c925382 | 2014-02-04 12:17:58 -0800 | [diff] [blame] | 3753 | |
Tao Bao | 04e1f01 | 2018-02-04 12:13:35 -0800 | [diff] [blame] | 3754 | |
| 3755 | def ExtractPublicKey(cert): |
| 3756 | """Extracts the public key (PEM-encoded) from the given certificate file. |
| 3757 | |
| 3758 | Args: |
| 3759 | cert: The certificate filename. |
| 3760 | |
| 3761 | Returns: |
| 3762 | The public key string. |
| 3763 | |
| 3764 | Raises: |
| 3765 | AssertionError: On non-zero return from 'openssl'. |
| 3766 | """ |
| 3767 | # The behavior with '-out' is different between openssl 1.1 and openssl 1.0. |
| 3768 | # While openssl 1.1 writes the key into the given filename followed by '-out', |
| 3769 | # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from |
| 3770 | # stdout instead. |
| 3771 | cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert] |
| 3772 | proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) |
| 3773 | pubkey, stderrdata = proc.communicate() |
| 3774 | assert proc.returncode == 0, \ |
| 3775 | 'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata) |
| 3776 | return pubkey |
| 3777 | |
| 3778 | |
Tao Bao | 1ac886e | 2019-06-26 11:58:22 -0700 | [diff] [blame] | 3779 | def ExtractAvbPublicKey(avbtool, key): |
Tao Bao | 2cc0ca1 | 2019-03-15 10:44:43 -0700 | [diff] [blame] | 3780 | """Extracts the AVB public key from the given public or private key. |
| 3781 | |
| 3782 | Args: |
Tao Bao | 1ac886e | 2019-06-26 11:58:22 -0700 | [diff] [blame] | 3783 | avbtool: The AVB tool to use. |
Tao Bao | 2cc0ca1 | 2019-03-15 10:44:43 -0700 | [diff] [blame] | 3784 | key: The input key file, which should be PEM-encoded public or private key. |
| 3785 | |
| 3786 | Returns: |
| 3787 | The path to the extracted AVB public key file. |
| 3788 | """ |
| 3789 | output = MakeTempFile(prefix='avb-', suffix='.avbpubkey') |
| 3790 | RunAndCheckOutput( |
Tao Bao | 1ac886e | 2019-06-26 11:58:22 -0700 | [diff] [blame] | 3791 | [avbtool, 'extract_public_key', "--key", key, "--output", output]) |
Tao Bao | 2cc0ca1 | 2019-03-15 10:44:43 -0700 | [diff] [blame] | 3792 | return output |
| 3793 | |
| 3794 | |
Abhishek Nigam | 1dfca46 | 2023-11-08 02:21:39 +0000 | [diff] [blame] | 3795 | def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img, |
| 3796 | info_dict=None): |
| 3797 | """Generates the recovery-from-boot patch and writes the script to output. |
| 3798 | |
| 3799 | Most of the space in the boot and recovery images is just the kernel, which is |
| 3800 | identical for the two, so the resulting patch should be efficient. Add it to |
| 3801 | the output zip, along with a shell script that is run from init.rc on first |
| 3802 | boot to actually do the patching and install the new recovery image. |
| 3803 | |
| 3804 | Args: |
| 3805 | input_dir: The top-level input directory of the target-files.zip. |
| 3806 | output_sink: The callback function that writes the result. |
| 3807 | recovery_img: File object for the recovery image. |
| 3808 | boot_img: File objects for the boot image. |
| 3809 | info_dict: A dict returned by common.LoadInfoDict() on the input |
| 3810 | target_files. Will use OPTIONS.info_dict if None has been given. |
| 3811 | """ |
| 3812 | if info_dict is None: |
| 3813 | info_dict = OPTIONS.info_dict |
| 3814 | |
| 3815 | full_recovery_image = info_dict.get("full_recovery_image") == "true" |
| 3816 | board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true" |
| 3817 | |
| 3818 | if board_uses_vendorimage: |
| 3819 | # In this case, the output sink is rooted at VENDOR |
| 3820 | recovery_img_path = "etc/recovery.img" |
| 3821 | recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat" |
| 3822 | sh_dir = "bin" |
| 3823 | else: |
| 3824 | # In this case the output sink is rooted at SYSTEM |
| 3825 | recovery_img_path = "vendor/etc/recovery.img" |
| 3826 | recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat" |
| 3827 | sh_dir = "vendor/bin" |
| 3828 | |
| 3829 | if full_recovery_image: |
| 3830 | output_sink(recovery_img_path, recovery_img.data) |
| 3831 | |
| 3832 | else: |
| 3833 | system_root_image = info_dict.get("system_root_image") == "true" |
| 3834 | include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true" |
| 3835 | include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true" |
| 3836 | path = os.path.join(input_dir, recovery_resource_dat_path) |
| 3837 | # With system-root-image, boot and recovery images will have mismatching |
| 3838 | # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff |
| 3839 | # to handle such a case. |
| 3840 | if system_root_image or include_recovery_dtbo or include_recovery_acpio: |
| 3841 | diff_program = ["bsdiff"] |
| 3842 | bonus_args = "" |
| 3843 | assert not os.path.exists(path) |
| 3844 | else: |
| 3845 | diff_program = ["imgdiff"] |
| 3846 | if os.path.exists(path): |
| 3847 | diff_program.append("-b") |
| 3848 | diff_program.append(path) |
| 3849 | bonus_args = "--bonus /vendor/etc/recovery-resource.dat" |
| 3850 | else: |
| 3851 | bonus_args = "" |
| 3852 | |
| 3853 | d = Difference(recovery_img, boot_img, diff_program=diff_program) |
| 3854 | _, _, patch = d.ComputePatch() |
| 3855 | output_sink("recovery-from-boot.p", patch) |
| 3856 | |
| 3857 | try: |
| 3858 | # The following GetTypeAndDevice()s need to use the path in the target |
| 3859 | # info_dict instead of source_info_dict. |
| 3860 | boot_type, boot_device = GetTypeAndDevice("/boot", info_dict, |
| 3861 | check_no_slot=False) |
| 3862 | recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict, |
| 3863 | check_no_slot=False) |
| 3864 | except KeyError: |
| 3865 | return |
| 3866 | |
| 3867 | if full_recovery_image: |
| 3868 | |
| 3869 | # Note that we use /vendor to refer to the recovery resources. This will |
| 3870 | # work for a separate vendor partition mounted at /vendor or a |
| 3871 | # /system/vendor subdirectory on the system partition, for which init will |
| 3872 | # create a symlink from /vendor to /system/vendor. |
| 3873 | |
| 3874 | sh = """#!/vendor/bin/sh |
| 3875 | if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then |
| 3876 | applypatch \\ |
| 3877 | --flash /vendor/etc/recovery.img \\ |
| 3878 | --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\ |
| 3879 | log -t recovery "Installing new recovery image: succeeded" || \\ |
| 3880 | log -t recovery "Installing new recovery image: failed" |
| 3881 | else |
| 3882 | log -t recovery "Recovery image already installed" |
| 3883 | fi |
| 3884 | """ % {'type': recovery_type, |
| 3885 | 'device': recovery_device, |
| 3886 | 'sha1': recovery_img.sha1, |
| 3887 | 'size': recovery_img.size} |
| 3888 | else: |
| 3889 | sh = """#!/vendor/bin/sh |
| 3890 | if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then |
| 3891 | applypatch %(bonus_args)s \\ |
| 3892 | --patch /vendor/recovery-from-boot.p \\ |
| 3893 | --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\ |
| 3894 | --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\ |
| 3895 | log -t recovery "Installing new recovery image: succeeded" || \\ |
| 3896 | log -t recovery "Installing new recovery image: failed" |
| 3897 | else |
| 3898 | log -t recovery "Recovery image already installed" |
| 3899 | fi |
| 3900 | """ % {'boot_size': boot_img.size, |
| 3901 | 'boot_sha1': boot_img.sha1, |
| 3902 | 'recovery_size': recovery_img.size, |
| 3903 | 'recovery_sha1': recovery_img.sha1, |
| 3904 | 'boot_type': boot_type, |
| 3905 | 'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)', |
| 3906 | 'recovery_type': recovery_type, |
| 3907 | 'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)', |
| 3908 | 'bonus_args': bonus_args} |
| 3909 | |
| 3910 | # The install script location moved from /system/etc to /system/bin in the L |
| 3911 | # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin. |
| 3912 | sh_location = os.path.join(sh_dir, "install-recovery.sh") |
| 3913 | |
| 3914 | logger.info("putting script in %s", sh_location) |
| 3915 | |
| 3916 | output_sink(sh_location, sh.encode()) |
| 3917 | |
| 3918 | |
| 3919 | class DynamicPartitionUpdate(object): |
| 3920 | def __init__(self, src_group=None, tgt_group=None, progress=None, |
| 3921 | block_difference=None): |
| 3922 | self.src_group = src_group |
| 3923 | self.tgt_group = tgt_group |
| 3924 | self.progress = progress |
| 3925 | self.block_difference = block_difference |
| 3926 | |
| 3927 | @property |
| 3928 | def src_size(self): |
| 3929 | if not self.block_difference: |
| 3930 | return 0 |
| 3931 | return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src) |
| 3932 | |
| 3933 | @property |
| 3934 | def tgt_size(self): |
| 3935 | if not self.block_difference: |
| 3936 | return 0 |
| 3937 | return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt) |
| 3938 | |
| 3939 | @staticmethod |
| 3940 | def _GetSparseImageSize(img): |
| 3941 | if not img: |
| 3942 | return 0 |
| 3943 | return img.blocksize * img.total_blocks |
| 3944 | |
| 3945 | |
| 3946 | class DynamicGroupUpdate(object): |
| 3947 | def __init__(self, src_size=None, tgt_size=None): |
| 3948 | # None: group does not exist. 0: no size limits. |
| 3949 | self.src_size = src_size |
| 3950 | self.tgt_size = tgt_size |
| 3951 | |
| 3952 | |
| 3953 | class DynamicPartitionsDifference(object): |
| 3954 | def __init__(self, info_dict, block_diffs, progress_dict=None, |
| 3955 | source_info_dict=None): |
| 3956 | if progress_dict is None: |
| 3957 | progress_dict = {} |
| 3958 | |
| 3959 | self._remove_all_before_apply = False |
| 3960 | if source_info_dict is None: |
| 3961 | self._remove_all_before_apply = True |
| 3962 | source_info_dict = {} |
| 3963 | |
| 3964 | block_diff_dict = collections.OrderedDict( |
| 3965 | [(e.partition, e) for e in block_diffs]) |
| 3966 | |
| 3967 | assert len(block_diff_dict) == len(block_diffs), \ |
| 3968 | "Duplicated BlockDifference object for {}".format( |
| 3969 | [partition for partition, count in |
| 3970 | collections.Counter(e.partition for e in block_diffs).items() |
| 3971 | if count > 1]) |
| 3972 | |
| 3973 | self._partition_updates = collections.OrderedDict() |
| 3974 | |
| 3975 | for p, block_diff in block_diff_dict.items(): |
| 3976 | self._partition_updates[p] = DynamicPartitionUpdate() |
| 3977 | self._partition_updates[p].block_difference = block_diff |
| 3978 | |
| 3979 | for p, progress in progress_dict.items(): |
| 3980 | if p in self._partition_updates: |
| 3981 | self._partition_updates[p].progress = progress |
| 3982 | |
| 3983 | tgt_groups = shlex.split(info_dict.get( |
| 3984 | "super_partition_groups", "").strip()) |
| 3985 | src_groups = shlex.split(source_info_dict.get( |
| 3986 | "super_partition_groups", "").strip()) |
| 3987 | |
| 3988 | for g in tgt_groups: |
| 3989 | for p in shlex.split(info_dict.get( |
| 3990 | "super_%s_partition_list" % g, "").strip()): |
| 3991 | assert p in self._partition_updates, \ |
| 3992 | "{} is in target super_{}_partition_list but no BlockDifference " \ |
| 3993 | "object is provided.".format(p, g) |
| 3994 | self._partition_updates[p].tgt_group = g |
| 3995 | |
| 3996 | for g in src_groups: |
| 3997 | for p in shlex.split(source_info_dict.get( |
| 3998 | "super_%s_partition_list" % g, "").strip()): |
| 3999 | assert p in self._partition_updates, \ |
| 4000 | "{} is in source super_{}_partition_list but no BlockDifference " \ |
| 4001 | "object is provided.".format(p, g) |
| 4002 | self._partition_updates[p].src_group = g |
| 4003 | |
| 4004 | target_dynamic_partitions = set(shlex.split(info_dict.get( |
| 4005 | "dynamic_partition_list", "").strip())) |
| 4006 | block_diffs_with_target = set(p for p, u in self._partition_updates.items() |
| 4007 | if u.tgt_size) |
| 4008 | assert block_diffs_with_target == target_dynamic_partitions, \ |
| 4009 | "Target Dynamic partitions: {}, BlockDifference with target: {}".format( |
| 4010 | list(target_dynamic_partitions), list(block_diffs_with_target)) |
| 4011 | |
| 4012 | source_dynamic_partitions = set(shlex.split(source_info_dict.get( |
| 4013 | "dynamic_partition_list", "").strip())) |
| 4014 | block_diffs_with_source = set(p for p, u in self._partition_updates.items() |
| 4015 | if u.src_size) |
| 4016 | assert block_diffs_with_source == source_dynamic_partitions, \ |
| 4017 | "Source Dynamic partitions: {}, BlockDifference with source: {}".format( |
| 4018 | list(source_dynamic_partitions), list(block_diffs_with_source)) |
| 4019 | |
| 4020 | if self._partition_updates: |
| 4021 | logger.info("Updating dynamic partitions %s", |
| 4022 | self._partition_updates.keys()) |
| 4023 | |
| 4024 | self._group_updates = collections.OrderedDict() |
| 4025 | |
| 4026 | for g in tgt_groups: |
| 4027 | self._group_updates[g] = DynamicGroupUpdate() |
| 4028 | self._group_updates[g].tgt_size = int(info_dict.get( |
| 4029 | "super_%s_group_size" % g, "0").strip()) |
| 4030 | |
| 4031 | for g in src_groups: |
| 4032 | if g not in self._group_updates: |
| 4033 | self._group_updates[g] = DynamicGroupUpdate() |
| 4034 | self._group_updates[g].src_size = int(source_info_dict.get( |
| 4035 | "super_%s_group_size" % g, "0").strip()) |
| 4036 | |
| 4037 | self._Compute() |
| 4038 | |
| 4039 | def WriteScript(self, script, output_zip, write_verify_script=False): |
| 4040 | script.Comment('--- Start patching dynamic partitions ---') |
| 4041 | for p, u in self._partition_updates.items(): |
| 4042 | if u.src_size and u.tgt_size and u.src_size > u.tgt_size: |
| 4043 | script.Comment('Patch partition %s' % p) |
| 4044 | u.block_difference.WriteScript(script, output_zip, progress=u.progress, |
| 4045 | write_verify_script=False) |
| 4046 | |
| 4047 | op_list_path = MakeTempFile() |
| 4048 | with open(op_list_path, 'w') as f: |
| 4049 | for line in self._op_list: |
| 4050 | f.write('{}\n'.format(line)) |
| 4051 | |
| 4052 | ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list") |
| 4053 | |
| 4054 | script.Comment('Update dynamic partition metadata') |
| 4055 | script.AppendExtra('assert(update_dynamic_partitions(' |
| 4056 | 'package_extract_file("dynamic_partitions_op_list")));') |
| 4057 | |
| 4058 | if write_verify_script: |
| 4059 | for p, u in self._partition_updates.items(): |
| 4060 | if u.src_size and u.tgt_size and u.src_size > u.tgt_size: |
| 4061 | u.block_difference.WritePostInstallVerifyScript(script) |
| 4062 | script.AppendExtra('unmap_partition("%s");' % p) # ignore errors |
| 4063 | |
| 4064 | for p, u in self._partition_updates.items(): |
| 4065 | if u.tgt_size and u.src_size <= u.tgt_size: |
| 4066 | script.Comment('Patch partition %s' % p) |
| 4067 | u.block_difference.WriteScript(script, output_zip, progress=u.progress, |
| 4068 | write_verify_script=write_verify_script) |
| 4069 | if write_verify_script: |
| 4070 | script.AppendExtra('unmap_partition("%s");' % p) # ignore errors |
| 4071 | |
| 4072 | script.Comment('--- End patching dynamic partitions ---') |
| 4073 | |
| 4074 | def _Compute(self): |
| 4075 | self._op_list = list() |
| 4076 | |
| 4077 | def append(line): |
| 4078 | self._op_list.append(line) |
| 4079 | |
| 4080 | def comment(line): |
| 4081 | self._op_list.append("# %s" % line) |
| 4082 | |
| 4083 | if self._remove_all_before_apply: |
| 4084 | comment('Remove all existing dynamic partitions and groups before ' |
| 4085 | 'applying full OTA') |
| 4086 | append('remove_all_groups') |
| 4087 | |
| 4088 | for p, u in self._partition_updates.items(): |
| 4089 | if u.src_group and not u.tgt_group: |
| 4090 | append('remove %s' % p) |
| 4091 | |
| 4092 | for p, u in self._partition_updates.items(): |
| 4093 | if u.src_group and u.tgt_group and u.src_group != u.tgt_group: |
| 4094 | comment('Move partition %s from %s to default' % (p, u.src_group)) |
| 4095 | append('move %s default' % p) |
| 4096 | |
| 4097 | for p, u in self._partition_updates.items(): |
| 4098 | if u.src_size and u.tgt_size and u.src_size > u.tgt_size: |
| 4099 | comment('Shrink partition %s from %d to %d' % |
| 4100 | (p, u.src_size, u.tgt_size)) |
| 4101 | append('resize %s %s' % (p, u.tgt_size)) |
| 4102 | |
| 4103 | for g, u in self._group_updates.items(): |
| 4104 | if u.src_size is not None and u.tgt_size is None: |
| 4105 | append('remove_group %s' % g) |
| 4106 | if (u.src_size is not None and u.tgt_size is not None and |
| 4107 | u.src_size > u.tgt_size): |
| 4108 | comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size)) |
| 4109 | append('resize_group %s %d' % (g, u.tgt_size)) |
| 4110 | |
| 4111 | for g, u in self._group_updates.items(): |
| 4112 | if u.src_size is None and u.tgt_size is not None: |
| 4113 | comment('Add group %s with maximum size %d' % (g, u.tgt_size)) |
| 4114 | append('add_group %s %d' % (g, u.tgt_size)) |
| 4115 | if (u.src_size is not None and u.tgt_size is not None and |
| 4116 | u.src_size < u.tgt_size): |
| 4117 | comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size)) |
| 4118 | append('resize_group %s %d' % (g, u.tgt_size)) |
| 4119 | |
| 4120 | for p, u in self._partition_updates.items(): |
| 4121 | if u.tgt_group and not u.src_group: |
| 4122 | comment('Add partition %s to group %s' % (p, u.tgt_group)) |
| 4123 | append('add %s %s' % (p, u.tgt_group)) |
| 4124 | |
| 4125 | for p, u in self._partition_updates.items(): |
| 4126 | if u.tgt_size and u.src_size < u.tgt_size: |
| 4127 | comment('Grow partition %s from %d to %d' % |
| 4128 | (p, u.src_size, u.tgt_size)) |
| 4129 | append('resize %s %d' % (p, u.tgt_size)) |
| 4130 | |
| 4131 | for p, u in self._partition_updates.items(): |
| 4132 | if u.src_group and u.tgt_group and u.src_group != u.tgt_group: |
| 4133 | comment('Move partition %s from default to %s' % |
| 4134 | (p, u.tgt_group)) |
| 4135 | append('move %s %s' % (p, u.tgt_group)) |
| 4136 | |
| 4137 | |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 4138 | def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4): |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4139 | """ |
Yifan Hong | 85ac501 | 2021-01-07 14:43:46 -0800 | [diff] [blame] | 4140 | Get build.prop from ramdisk within the boot image |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4141 | |
| 4142 | Args: |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 4143 | boot_img: the boot image file. Ramdisk must be compressed with lz4 or gzip format. |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4144 | |
| 4145 | Return: |
Yifan Hong | 85ac501 | 2021-01-07 14:43:46 -0800 | [diff] [blame] | 4146 | An extracted file that stores properties in the boot image. |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4147 | """ |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4148 | tmp_dir = MakeTempDir('boot_', suffix='.img') |
| 4149 | try: |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 4150 | RunAndCheckOutput(['unpack_bootimg', '--boot_img', |
| 4151 | boot_img, '--out', tmp_dir]) |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4152 | ramdisk = os.path.join(tmp_dir, 'ramdisk') |
| 4153 | if not os.path.isfile(ramdisk): |
| 4154 | logger.warning('Unable to get boot image timestamp: no ramdisk in boot') |
| 4155 | return None |
| 4156 | uncompressed_ramdisk = os.path.join(tmp_dir, 'uncompressed_ramdisk') |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 4157 | if ramdisk_format == RamdiskFormat.LZ4: |
| 4158 | RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk]) |
| 4159 | elif ramdisk_format == RamdiskFormat.GZ: |
| 4160 | with open(ramdisk, 'rb') as input_stream: |
| 4161 | with open(uncompressed_ramdisk, 'wb') as output_stream: |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 4162 | p2 = Run(['gzip', '-d'], stdin=input_stream.fileno(), |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 4163 | stdout=output_stream.fileno()) |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 4164 | p2.wait() |
| 4165 | else: |
Elliott Hughes | 97ad120 | 2023-06-20 16:41:58 -0700 | [diff] [blame] | 4166 | logger.error('Only support lz4 or gzip ramdisk format.') |
jiajia tang | f3f842b | 2021-03-17 21:49:44 +0800 | [diff] [blame] | 4167 | return None |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4168 | |
| 4169 | abs_uncompressed_ramdisk = os.path.abspath(uncompressed_ramdisk) |
| 4170 | extracted_ramdisk = MakeTempDir('extracted_ramdisk') |
| 4171 | # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from |
| 4172 | # the host environment. |
| 4173 | RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'], |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 4174 | cwd=extracted_ramdisk) |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4175 | |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4176 | for search_path in RAMDISK_BUILD_PROP_REL_PATHS: |
| 4177 | prop_file = os.path.join(extracted_ramdisk, search_path) |
| 4178 | if os.path.isfile(prop_file): |
Yifan Hong | 7dc5117 | 2021-01-12 11:27:39 -0800 | [diff] [blame] | 4179 | return prop_file |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 4180 | logger.warning( |
| 4181 | 'Unable to get boot image timestamp: no %s in ramdisk', search_path) |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4182 | |
Yifan Hong | 7dc5117 | 2021-01-12 11:27:39 -0800 | [diff] [blame] | 4183 | return None |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4184 | |
Yifan Hong | 85ac501 | 2021-01-07 14:43:46 -0800 | [diff] [blame] | 4185 | except ExternalError as e: |
| 4186 | logger.warning('Unable to get boot image build props: %s', e) |
| 4187 | return None |
| 4188 | |
| 4189 | |
| 4190 | def GetBootImageTimestamp(boot_img): |
| 4191 | """ |
| 4192 | Get timestamp from ramdisk within the boot image |
| 4193 | |
| 4194 | Args: |
| 4195 | boot_img: the boot image file. Ramdisk must be compressed with lz4 format. |
| 4196 | |
| 4197 | Return: |
| 4198 | An integer that corresponds to the timestamp of the boot image, or None |
| 4199 | if file has unknown format. Raise exception if an unexpected error has |
| 4200 | occurred. |
| 4201 | """ |
| 4202 | prop_file = GetBootImageBuildProp(boot_img) |
| 4203 | if not prop_file: |
| 4204 | return None |
| 4205 | |
| 4206 | props = PartitionBuildProps.FromBuildPropFile('boot', prop_file) |
| 4207 | if props is None: |
| 4208 | return None |
| 4209 | |
| 4210 | try: |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4211 | timestamp = props.GetProp('ro.bootimage.build.date.utc') |
| 4212 | if timestamp: |
| 4213 | return int(timestamp) |
Kelvin Zhang | 563750f | 2021-04-28 12:46:17 -0400 | [diff] [blame] | 4214 | logger.warning( |
| 4215 | 'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined') |
Yifan Hong | c65a054 | 2021-01-07 14:21:01 -0800 | [diff] [blame] | 4216 | return None |
| 4217 | |
| 4218 | except ExternalError as e: |
| 4219 | logger.warning('Unable to get boot image timestamp: %s', e) |
| 4220 | return None |
Kelvin Zhang | 2732413 | 2021-03-22 15:38:38 -0400 | [diff] [blame] | 4221 | |
| 4222 | |
Kelvin Zhang | 2639048 | 2021-11-02 14:31:10 -0700 | [diff] [blame] | 4223 | def IsSparseImage(filepath): |
Kelvin Zhang | 1caead0 | 2022-09-23 10:06:03 -0700 | [diff] [blame] | 4224 | if not os.path.exists(filepath): |
| 4225 | return False |
Kelvin Zhang | 2639048 | 2021-11-02 14:31:10 -0700 | [diff] [blame] | 4226 | with open(filepath, 'rb') as fp: |
| 4227 | # Magic for android sparse image format |
| 4228 | # https://source.android.com/devices/bootloader/images |
| 4229 | return fp.read(4) == b'\x3A\xFF\x26\xED' |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 4230 | |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 4231 | |
Kelvin Zhang | 2268091 | 2023-05-19 13:12:59 -0700 | [diff] [blame] | 4232 | def UnsparseImage(filepath, target_path=None): |
| 4233 | if not IsSparseImage(filepath): |
| 4234 | return |
| 4235 | if target_path is None: |
| 4236 | tmp_img = MakeTempFile(suffix=".img") |
| 4237 | RunAndCheckOutput(["simg2img", filepath, tmp_img]) |
| 4238 | os.rename(tmp_img, filepath) |
| 4239 | else: |
| 4240 | RunAndCheckOutput(["simg2img", filepath, target_path]) |
| 4241 | |
| 4242 | |
Kelvin Zhang | fcd731e | 2023-04-04 10:28:11 -0700 | [diff] [blame] | 4243 | def ParseUpdateEngineConfig(path: str): |
| 4244 | """Parse the update_engine config stored in file `path` |
| 4245 | Args |
| 4246 | path: Path to update_engine_config.txt file in target_files |
| 4247 | |
| 4248 | Returns |
| 4249 | A tuple of (major, minor) version number . E.g. (2, 8) |
| 4250 | """ |
| 4251 | with open(path, "r") as fp: |
| 4252 | # update_engine_config.txt is only supposed to contain two lines, |
| 4253 | # PAYLOAD_MAJOR_VERSION and PAYLOAD_MINOR_VERSION. 1024 should be more than |
| 4254 | # sufficient. If the length is more than that, something is wrong. |
| 4255 | data = fp.read(1024) |
| 4256 | major = re.search(r"PAYLOAD_MAJOR_VERSION=(\d+)", data) |
| 4257 | if not major: |
| 4258 | raise ValueError( |
| 4259 | f"{path} is an invalid update_engine config, missing PAYLOAD_MAJOR_VERSION {data}") |
| 4260 | minor = re.search(r"PAYLOAD_MINOR_VERSION=(\d+)", data) |
| 4261 | if not minor: |
| 4262 | raise ValueError( |
| 4263 | f"{path} is an invalid update_engine config, missing PAYLOAD_MINOR_VERSION {data}") |
Kelvin Zhang | 9dbe2ce | 2023-04-17 16:38:08 -0700 | [diff] [blame] | 4264 | return (int(major.group(1)), int(minor.group(1))) |