blob: 441312cf25ead652c59bc6abc4c504918bd121d6 [file] [log] [blame]
Kelvin Zhang197772f2022-04-26 15:15:11 -07001# Copyright (C) 2022 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import argparse
16import logging
Satoshi Futenma1f93ce22023-04-18 16:41:35 +090017import shlex
Kelvin Zhang197772f2022-04-26 15:15:11 -070018import struct
19import sys
20import update_payload
21import tempfile
22import zipfile
23import os
24import care_map_pb2
25
26import common
27from typing import BinaryIO, List
28from update_metadata_pb2 import DeltaArchiveManifest, DynamicPartitionMetadata, DynamicPartitionGroup
29from ota_metadata_pb2 import OtaMetadata
30from update_payload import Payload
31
32from payload_signer import PayloadSigner
33from ota_utils import PayloadGenerator, METADATA_PROTO_NAME, FinalizeMetadata
34
35logger = logging.getLogger(__name__)
36
37CARE_MAP_ENTRY = "care_map.pb"
Satoshi Futenma1f93ce22023-04-18 16:41:35 +090038APEX_INFO_ENTRY = "apex_info.pb"
Kelvin Zhang197772f2022-04-26 15:15:11 -070039
40
41def WriteDataBlob(payload: Payload, outfp: BinaryIO, read_size=1024*64):
42 for i in range(0, payload.total_data_length, read_size):
43 blob = payload.ReadDataBlob(
44 i, min(i+read_size, payload.total_data_length)-i)
45 outfp.write(blob)
46
47
48def ConcatBlobs(payloads: List[Payload], outfp: BinaryIO):
49 for payload in payloads:
50 WriteDataBlob(payload, outfp)
51
52
53def TotalDataLength(partitions):
54 for partition in reversed(partitions):
55 for op in reversed(partition.operations):
56 if op.data_length > 0:
57 return op.data_offset + op.data_length
58 return 0
59
60
61def ExtendPartitionUpdates(partitions, new_partitions):
62 prefix_blob_length = TotalDataLength(partitions)
63 partitions.extend(new_partitions)
64 for part in partitions[-len(new_partitions):]:
65 for op in part.operations:
66 if op.HasField("data_length") and op.data_length != 0:
67 op.data_offset += prefix_blob_length
68
69
70class DuplicatePartitionError(ValueError):
71 pass
72
73
74def MergeDynamicPartitionGroups(groups: List[DynamicPartitionGroup], new_groups: List[DynamicPartitionGroup]):
75 new_groups = {new_group.name: new_group for new_group in new_groups}
76 for group in groups:
77 if group.name not in new_groups:
78 continue
79 new_group = new_groups[group.name]
80 common_partitions = set(group.partition_names).intersection(
81 set(new_group.partition_names))
82 if len(common_partitions) != 0:
83 raise DuplicatePartitionError(
84 f"Old group and new group should not have any intersections, {group.partition_names}, {new_group.partition_names}, common partitions: {common_partitions}")
85 group.partition_names.extend(new_group.partition_names)
86 group.size = max(new_group.size, group.size)
87 del new_groups[group.name]
88 for new_group in new_groups.values():
89 groups.append(new_group)
90
91
92def MergeDynamicPartitionMetadata(metadata: DynamicPartitionMetadata, new_metadata: DynamicPartitionMetadata):
93 MergeDynamicPartitionGroups(metadata.groups, new_metadata.groups)
94 metadata.snapshot_enabled &= new_metadata.snapshot_enabled
95 metadata.vabc_enabled &= new_metadata.vabc_enabled
96 assert metadata.vabc_compression_param == new_metadata.vabc_compression_param, f"{metadata.vabc_compression_param} vs. {new_metadata.vabc_compression_param}"
97 metadata.cow_version = max(metadata.cow_version, new_metadata.cow_version)
98
99
100def MergeManifests(payloads: List[Payload]) -> DeltaArchiveManifest:
101 if len(payloads) == 0:
102 return None
103 if len(payloads) == 1:
104 return payloads[0].manifest
105
106 output_manifest = DeltaArchiveManifest()
107 output_manifest.block_size = payloads[0].manifest.block_size
108 output_manifest.partial_update = True
109 output_manifest.dynamic_partition_metadata.snapshot_enabled = payloads[
110 0].manifest.dynamic_partition_metadata.snapshot_enabled
111 output_manifest.dynamic_partition_metadata.vabc_enabled = payloads[
112 0].manifest.dynamic_partition_metadata.vabc_enabled
113 output_manifest.dynamic_partition_metadata.vabc_compression_param = payloads[
114 0].manifest.dynamic_partition_metadata.vabc_compression_param
115 apex_info = {}
116 for payload in payloads:
117 manifest = payload.manifest
118 assert manifest.block_size == output_manifest.block_size
119 output_manifest.minor_version = max(
120 output_manifest.minor_version, manifest.minor_version)
121 output_manifest.max_timestamp = max(
122 output_manifest.max_timestamp, manifest.max_timestamp)
123 output_manifest.apex_info.extend(manifest.apex_info)
124 for apex in manifest.apex_info:
125 apex_info[apex.package_name] = apex
126 ExtendPartitionUpdates(output_manifest.partitions, manifest.partitions)
127 try:
128 MergeDynamicPartitionMetadata(
129 output_manifest.dynamic_partition_metadata, manifest.dynamic_partition_metadata)
130 except DuplicatePartitionError:
131 logger.error(
132 "OTA %s has duplicate partition with some of the previous OTAs", payload.name)
133 raise
134
135 for apex_name in sorted(apex_info.keys()):
136 output_manifest.apex_info.extend(apex_info[apex_name])
137
138 return output_manifest
139
140
141def MergePayloads(payloads: List[Payload]):
142 with tempfile.NamedTemporaryFile(prefix="payload_blob") as tmpfile:
143 ConcatBlobs(payloads, tmpfile)
144
145
146def MergeCareMap(paths: List[str]):
147 care_map = care_map_pb2.CareMap()
148 for path in paths:
149 with zipfile.ZipFile(path, "r", allowZip64=True) as zfp:
150 if CARE_MAP_ENTRY in zfp.namelist():
151 care_map_bytes = zfp.read(CARE_MAP_ENTRY)
152 partial_care_map = care_map_pb2.CareMap()
153 partial_care_map.ParseFromString(care_map_bytes)
154 care_map.partitions.extend(partial_care_map.partitions)
155 if len(care_map.partitions) == 0:
156 return b""
157 return care_map.SerializeToString()
158
159
160def WriteHeaderAndManifest(manifest: DeltaArchiveManifest, fp: BinaryIO):
161 __MAGIC = b"CrAU"
162 __MAJOR_VERSION = 2
163 manifest_bytes = manifest.SerializeToString()
164 fp.write(struct.pack(f">4sQQL", __MAGIC,
165 __MAJOR_VERSION, len(manifest_bytes), 0))
166 fp.write(manifest_bytes)
167
168
169def AddOtaMetadata(input_ota, metadata_ota, output_ota, package_key, pw):
170 with zipfile.ZipFile(metadata_ota, 'r') as zfp:
171 metadata = OtaMetadata()
172 metadata.ParseFromString(zfp.read(METADATA_PROTO_NAME))
173 FinalizeMetadata(metadata, input_ota, output_ota,
174 package_key=package_key, pw=pw)
175 return output_ota
176
177
178def CheckOutput(output_ota):
179 payload = update_payload.Payload(output_ota)
180 payload.CheckOpDataHash()
181
182
183def CheckDuplicatePartitions(payloads: List[Payload]):
184 partition_to_ota = {}
185 for payload in payloads:
186 for group in payload.manifest.dynamic_partition_metadata.groups:
187 for part in group.partition_names:
188 if part in partition_to_ota:
189 raise DuplicatePartitionError(
190 f"OTA {partition_to_ota[part].name} and {payload.name} have duplicating partition {part}")
191 partition_to_ota[part] = payload
192
Satoshi Futenma1f93ce22023-04-18 16:41:35 +0900193def ApexInfo(file_paths):
194 if len(file_paths) > 1:
195 logger.info("More than one target file specified, will ignore "
196 "apex_info.pb (if any)")
197 return None
198 with zipfile.ZipFile(file_paths[0], "r", allowZip64=True) as zfp:
199 if APEX_INFO_ENTRY in zfp.namelist():
200 apex_info_bytes = zfp.read(APEX_INFO_ENTRY)
201 return apex_info_bytes
202 return None
203
204def ParseSignerArgs(args):
205 if args is None:
206 return None
207 return shlex.split(args)
208
Kelvin Zhang197772f2022-04-26 15:15:11 -0700209def main(argv):
210 parser = argparse.ArgumentParser(description='Merge multiple partial OTAs')
211 parser.add_argument('packages', type=str, nargs='+',
212 help='Paths to OTA packages to merge')
213 parser.add_argument('--package_key', type=str,
214 help='Paths to private key for signing payload')
215 parser.add_argument('--search_path', type=str,
216 help='Search path for framework/signapk.jar')
Satoshi Futenma1f93ce22023-04-18 16:41:35 +0900217 parser.add_argument('--payload_signer', type=str,
218 help='Path to custom payload signer')
219 parser.add_argument('--payload_signer_args', type=ParseSignerArgs,
220 help='Arguments for payload signer if necessary')
221 parser.add_argument('--payload_signer_maximum_signature_size', type=str,
222 help='Maximum signature size (in bytes) that would be '
223 'generated by the given payload signer')
Kelvin Zhang197772f2022-04-26 15:15:11 -0700224 parser.add_argument('--output', type=str,
225 help='Paths to output merged ota', required=True)
226 parser.add_argument('--metadata_ota', type=str,
227 help='Output zip will use build metadata from this OTA package, if unspecified, use the last OTA package in merge list')
228 parser.add_argument('--private_key_suffix', type=str,
229 help='Suffix to be appended to package_key path', default=".pk8")
230 parser.add_argument('-v', action="store_true", help="Enable verbose logging", dest="verbose")
Satoshi Futenma1f93ce22023-04-18 16:41:35 +0900231 parser.epilog = ('This tool can also be used to resign a regular OTA. For a single regular OTA, '
232 'apex_info.pb will be written to output. When merging multiple OTAs, '
233 'apex_info.pb will not be written.')
Kelvin Zhang197772f2022-04-26 15:15:11 -0700234 args = parser.parse_args(argv[1:])
235 file_paths = args.packages
236
237 common.OPTIONS.verbose = args.verbose
238 if args.verbose:
239 logger.setLevel(logging.INFO)
240
241 logger.info(args)
242 if args.search_path:
243 common.OPTIONS.search_path = args.search_path
244
245 metadata_ota = args.packages[-1]
246 if args.metadata_ota is not None:
247 metadata_ota = args.metadata_ota
248 assert os.path.exists(metadata_ota)
249
250 payloads = [Payload(path) for path in file_paths]
251
252 CheckDuplicatePartitions(payloads)
253
254 merged_manifest = MergeManifests(payloads)
255
Satoshi Futenma1f93ce22023-04-18 16:41:35 +0900256 # Get signing keys
257 key_passwords = common.GetKeyPasswords([args.package_key])
258
259 generator = PayloadGenerator()
260
261 apex_info_bytes = ApexInfo(file_paths)
262
Kelvin Zhang197772f2022-04-26 15:15:11 -0700263 with tempfile.NamedTemporaryFile() as unsigned_payload:
264 WriteHeaderAndManifest(merged_manifest, unsigned_payload)
265 ConcatBlobs(payloads, unsigned_payload)
266 unsigned_payload.flush()
267
268 generator = PayloadGenerator()
269 generator.payload_file = unsigned_payload.name
270 logger.info("Payload size: %d", os.path.getsize(generator.payload_file))
271
272 if args.package_key:
273 logger.info("Signing payload...")
Satoshi Futenma1f93ce22023-04-18 16:41:35 +0900274 # TODO: remove OPTIONS when no longer used as fallback in payload_signer
275 common.OPTIONS.payload_signer_args = None
276 common.OPTIONS.payload_signer_maximum_signature_size = None
277 signer = PayloadSigner(args.package_key, args.private_key_suffix,
278 key_passwords[args.package_key],
279 payload_signer=args.payload_signer,
280 payload_signer_args=args.payload_signer_args,
281 payload_signer_maximum_signature_size=args.payload_signer_maximum_signature_size)
Kelvin Zhang197772f2022-04-26 15:15:11 -0700282 generator.payload_file = unsigned_payload.name
283 generator.Sign(signer)
284
285 logger.info("Payload size: %d", os.path.getsize(generator.payload_file))
286
287 logger.info("Writing to %s", args.output)
Satoshi Futenma1f93ce22023-04-18 16:41:35 +0900288
Kelvin Zhang197772f2022-04-26 15:15:11 -0700289 key_passwords = common.GetKeyPasswords([args.package_key])
290 with tempfile.NamedTemporaryFile(prefix="signed_ota", suffix=".zip") as signed_ota:
291 with zipfile.ZipFile(signed_ota, "w") as zfp:
292 generator.WriteToZip(zfp)
293 care_map_bytes = MergeCareMap(args.packages)
294 if care_map_bytes:
Satoshi Futenma1f93ce22023-04-18 16:41:35 +0900295 common.ZipWriteStr(zfp, CARE_MAP_ENTRY, care_map_bytes)
296 if apex_info_bytes:
297 logger.info("Writing %s", APEX_INFO_ENTRY)
298 common.ZipWriteStr(zfp, APEX_INFO_ENTRY, apex_info_bytes)
Kelvin Zhang197772f2022-04-26 15:15:11 -0700299 AddOtaMetadata(signed_ota.name, metadata_ota,
300 args.output, args.package_key, key_passwords[args.package_key])
301 return 0
302
303
304
305
306if __name__ == '__main__':
307 logging.basicConfig()
308 sys.exit(main(sys.argv))