blob: 7d3d3a3fc3cb5e087e73eea33043ce242704283f [file] [log] [blame]
Kelvin Zhang197772f2022-04-26 15:15:11 -07001# Copyright (C) 2022 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import argparse
16import logging
17import struct
18import sys
19import update_payload
20import tempfile
21import zipfile
22import os
23import care_map_pb2
24
25import common
26from typing import BinaryIO, List
27from update_metadata_pb2 import DeltaArchiveManifest, DynamicPartitionMetadata, DynamicPartitionGroup
28from ota_metadata_pb2 import OtaMetadata
29from update_payload import Payload
30
31from payload_signer import PayloadSigner
32from ota_utils import PayloadGenerator, METADATA_PROTO_NAME, FinalizeMetadata
33
34logger = logging.getLogger(__name__)
35
36CARE_MAP_ENTRY = "care_map.pb"
37
38
39def WriteDataBlob(payload: Payload, outfp: BinaryIO, read_size=1024*64):
40 for i in range(0, payload.total_data_length, read_size):
41 blob = payload.ReadDataBlob(
42 i, min(i+read_size, payload.total_data_length)-i)
43 outfp.write(blob)
44
45
46def ConcatBlobs(payloads: List[Payload], outfp: BinaryIO):
47 for payload in payloads:
48 WriteDataBlob(payload, outfp)
49
50
51def TotalDataLength(partitions):
52 for partition in reversed(partitions):
53 for op in reversed(partition.operations):
54 if op.data_length > 0:
55 return op.data_offset + op.data_length
56 return 0
57
58
59def ExtendPartitionUpdates(partitions, new_partitions):
60 prefix_blob_length = TotalDataLength(partitions)
61 partitions.extend(new_partitions)
62 for part in partitions[-len(new_partitions):]:
63 for op in part.operations:
64 if op.HasField("data_length") and op.data_length != 0:
65 op.data_offset += prefix_blob_length
66
67
68class DuplicatePartitionError(ValueError):
69 pass
70
71
72def MergeDynamicPartitionGroups(groups: List[DynamicPartitionGroup], new_groups: List[DynamicPartitionGroup]):
73 new_groups = {new_group.name: new_group for new_group in new_groups}
74 for group in groups:
75 if group.name not in new_groups:
76 continue
77 new_group = new_groups[group.name]
78 common_partitions = set(group.partition_names).intersection(
79 set(new_group.partition_names))
80 if len(common_partitions) != 0:
81 raise DuplicatePartitionError(
82 f"Old group and new group should not have any intersections, {group.partition_names}, {new_group.partition_names}, common partitions: {common_partitions}")
83 group.partition_names.extend(new_group.partition_names)
84 group.size = max(new_group.size, group.size)
85 del new_groups[group.name]
86 for new_group in new_groups.values():
87 groups.append(new_group)
88
89
90def MergeDynamicPartitionMetadata(metadata: DynamicPartitionMetadata, new_metadata: DynamicPartitionMetadata):
91 MergeDynamicPartitionGroups(metadata.groups, new_metadata.groups)
92 metadata.snapshot_enabled &= new_metadata.snapshot_enabled
93 metadata.vabc_enabled &= new_metadata.vabc_enabled
94 assert metadata.vabc_compression_param == new_metadata.vabc_compression_param, f"{metadata.vabc_compression_param} vs. {new_metadata.vabc_compression_param}"
95 metadata.cow_version = max(metadata.cow_version, new_metadata.cow_version)
96
97
98def MergeManifests(payloads: List[Payload]) -> DeltaArchiveManifest:
99 if len(payloads) == 0:
100 return None
101 if len(payloads) == 1:
102 return payloads[0].manifest
103
104 output_manifest = DeltaArchiveManifest()
105 output_manifest.block_size = payloads[0].manifest.block_size
106 output_manifest.partial_update = True
107 output_manifest.dynamic_partition_metadata.snapshot_enabled = payloads[
108 0].manifest.dynamic_partition_metadata.snapshot_enabled
109 output_manifest.dynamic_partition_metadata.vabc_enabled = payloads[
110 0].manifest.dynamic_partition_metadata.vabc_enabled
111 output_manifest.dynamic_partition_metadata.vabc_compression_param = payloads[
112 0].manifest.dynamic_partition_metadata.vabc_compression_param
113 apex_info = {}
114 for payload in payloads:
115 manifest = payload.manifest
116 assert manifest.block_size == output_manifest.block_size
117 output_manifest.minor_version = max(
118 output_manifest.minor_version, manifest.minor_version)
119 output_manifest.max_timestamp = max(
120 output_manifest.max_timestamp, manifest.max_timestamp)
121 output_manifest.apex_info.extend(manifest.apex_info)
122 for apex in manifest.apex_info:
123 apex_info[apex.package_name] = apex
124 ExtendPartitionUpdates(output_manifest.partitions, manifest.partitions)
125 try:
126 MergeDynamicPartitionMetadata(
127 output_manifest.dynamic_partition_metadata, manifest.dynamic_partition_metadata)
128 except DuplicatePartitionError:
129 logger.error(
130 "OTA %s has duplicate partition with some of the previous OTAs", payload.name)
131 raise
132
133 for apex_name in sorted(apex_info.keys()):
134 output_manifest.apex_info.extend(apex_info[apex_name])
135
136 return output_manifest
137
138
139def MergePayloads(payloads: List[Payload]):
140 with tempfile.NamedTemporaryFile(prefix="payload_blob") as tmpfile:
141 ConcatBlobs(payloads, tmpfile)
142
143
144def MergeCareMap(paths: List[str]):
145 care_map = care_map_pb2.CareMap()
146 for path in paths:
147 with zipfile.ZipFile(path, "r", allowZip64=True) as zfp:
148 if CARE_MAP_ENTRY in zfp.namelist():
149 care_map_bytes = zfp.read(CARE_MAP_ENTRY)
150 partial_care_map = care_map_pb2.CareMap()
151 partial_care_map.ParseFromString(care_map_bytes)
152 care_map.partitions.extend(partial_care_map.partitions)
153 if len(care_map.partitions) == 0:
154 return b""
155 return care_map.SerializeToString()
156
157
158def WriteHeaderAndManifest(manifest: DeltaArchiveManifest, fp: BinaryIO):
159 __MAGIC = b"CrAU"
160 __MAJOR_VERSION = 2
161 manifest_bytes = manifest.SerializeToString()
162 fp.write(struct.pack(f">4sQQL", __MAGIC,
163 __MAJOR_VERSION, len(manifest_bytes), 0))
164 fp.write(manifest_bytes)
165
166
167def AddOtaMetadata(input_ota, metadata_ota, output_ota, package_key, pw):
168 with zipfile.ZipFile(metadata_ota, 'r') as zfp:
169 metadata = OtaMetadata()
170 metadata.ParseFromString(zfp.read(METADATA_PROTO_NAME))
171 FinalizeMetadata(metadata, input_ota, output_ota,
172 package_key=package_key, pw=pw)
173 return output_ota
174
175
176def CheckOutput(output_ota):
177 payload = update_payload.Payload(output_ota)
178 payload.CheckOpDataHash()
179
180
181def CheckDuplicatePartitions(payloads: List[Payload]):
182 partition_to_ota = {}
183 for payload in payloads:
184 for group in payload.manifest.dynamic_partition_metadata.groups:
185 for part in group.partition_names:
186 if part in partition_to_ota:
187 raise DuplicatePartitionError(
188 f"OTA {partition_to_ota[part].name} and {payload.name} have duplicating partition {part}")
189 partition_to_ota[part] = payload
190
191def main(argv):
192 parser = argparse.ArgumentParser(description='Merge multiple partial OTAs')
193 parser.add_argument('packages', type=str, nargs='+',
194 help='Paths to OTA packages to merge')
195 parser.add_argument('--package_key', type=str,
196 help='Paths to private key for signing payload')
197 parser.add_argument('--search_path', type=str,
198 help='Search path for framework/signapk.jar')
199 parser.add_argument('--output', type=str,
200 help='Paths to output merged ota', required=True)
201 parser.add_argument('--metadata_ota', type=str,
202 help='Output zip will use build metadata from this OTA package, if unspecified, use the last OTA package in merge list')
203 parser.add_argument('--private_key_suffix', type=str,
204 help='Suffix to be appended to package_key path', default=".pk8")
205 parser.add_argument('-v', action="store_true", help="Enable verbose logging", dest="verbose")
206 args = parser.parse_args(argv[1:])
207 file_paths = args.packages
208
209 common.OPTIONS.verbose = args.verbose
210 if args.verbose:
211 logger.setLevel(logging.INFO)
212
213 logger.info(args)
214 if args.search_path:
215 common.OPTIONS.search_path = args.search_path
216
217 metadata_ota = args.packages[-1]
218 if args.metadata_ota is not None:
219 metadata_ota = args.metadata_ota
220 assert os.path.exists(metadata_ota)
221
222 payloads = [Payload(path) for path in file_paths]
223
224 CheckDuplicatePartitions(payloads)
225
226 merged_manifest = MergeManifests(payloads)
227
228 with tempfile.NamedTemporaryFile() as unsigned_payload:
229 WriteHeaderAndManifest(merged_manifest, unsigned_payload)
230 ConcatBlobs(payloads, unsigned_payload)
231 unsigned_payload.flush()
232
233 generator = PayloadGenerator()
234 generator.payload_file = unsigned_payload.name
235 logger.info("Payload size: %d", os.path.getsize(generator.payload_file))
236
237 if args.package_key:
238 logger.info("Signing payload...")
239 signer = PayloadSigner(args.package_key, args.private_key_suffix)
240 generator.payload_file = unsigned_payload.name
241 generator.Sign(signer)
242
243 logger.info("Payload size: %d", os.path.getsize(generator.payload_file))
244
245 logger.info("Writing to %s", args.output)
246 key_passwords = common.GetKeyPasswords([args.package_key])
247 with tempfile.NamedTemporaryFile(prefix="signed_ota", suffix=".zip") as signed_ota:
248 with zipfile.ZipFile(signed_ota, "w") as zfp:
249 generator.WriteToZip(zfp)
250 care_map_bytes = MergeCareMap(args.packages)
251 if care_map_bytes:
252 zfp.writestr(CARE_MAP_ENTRY, care_map_bytes)
253 AddOtaMetadata(signed_ota.name, metadata_ota,
254 args.output, args.package_key, key_passwords[args.package_key])
255 return 0
256
257
258
259
260if __name__ == '__main__':
261 logging.basicConfig()
262 sys.exit(main(sys.argv))