blob: 3f644448c54830d95ed6365e298fb5e4ff516211 [file] [log] [blame]
Amin Hassanif94b6432018-01-26 17:39:47 -08001#
2# Copyright (C) 2013 The Android Open Source Project
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
Gilad Arnold553b0ec2013-01-26 01:00:39 -080016
17"""Applying a Chrome OS update payload.
18
19This module is used internally by the main Payload class for applying an update
20payload. The interface for invoking the applier is as follows:
21
22 applier = PayloadApplier(payload)
23 applier.Run(...)
24
25"""
26
Allie Wood12f59aa2015-04-06 11:05:12 -070027from __future__ import print_function
28
Gilad Arnold553b0ec2013-01-26 01:00:39 -080029import array
30import bz2
31import hashlib
Gilad Arnold658185a2013-05-08 17:57:54 -070032import itertools
Amin Hassani0de7f782017-12-07 12:13:03 -080033# Not everywhere we can have the lzma library so we ignore it if we didn't have
34# it because it is not going to be used. For example, 'cros flash' uses
35# devserver code which eventually loads this file, but the lzma library is not
36# included in the client test devices, and it is not necessary to do so. But
37# lzma is not used in 'cros flash' so it should be fine. Python 3.x include
38# lzma, but for backward compatibility with Python 2.7, backports-lzma is
39# needed.
40try:
41 import lzma
42except ImportError:
43 try:
44 from backports import lzma
45 except ImportError:
46 pass
Gilad Arnold553b0ec2013-01-26 01:00:39 -080047import os
48import shutil
49import subprocess
50import sys
51import tempfile
52
Amin Hassanib05a65a2017-12-18 15:15:32 -080053from update_payload import common
54from update_payload.error import PayloadError
Gilad Arnold553b0ec2013-01-26 01:00:39 -080055
56
57#
58# Helper functions.
59#
Gilad Arnold382df5c2013-05-03 12:49:28 -070060def _VerifySha256(file_obj, expected_hash, name, length=-1):
Gilad Arnold553b0ec2013-01-26 01:00:39 -080061 """Verifies the SHA256 hash of a file.
62
63 Args:
64 file_obj: file object to read
65 expected_hash: the hash digest we expect to be getting
66 name: name string of this hash, for error reporting
Gilad Arnold382df5c2013-05-03 12:49:28 -070067 length: precise length of data to verify (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -070068
Gilad Arnold553b0ec2013-01-26 01:00:39 -080069 Raises:
Gilad Arnold382df5c2013-05-03 12:49:28 -070070 PayloadError if computed hash doesn't match expected one, or if fails to
71 read the specified length of data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -080072 """
Gilad Arnold553b0ec2013-01-26 01:00:39 -080073 hasher = hashlib.sha256()
74 block_length = 1024 * 1024
Gilad Arnold382df5c2013-05-03 12:49:28 -070075 max_length = length if length >= 0 else sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -080076
Gilad Arnold382df5c2013-05-03 12:49:28 -070077 while max_length > 0:
Gilad Arnold553b0ec2013-01-26 01:00:39 -080078 read_length = min(max_length, block_length)
79 data = file_obj.read(read_length)
80 if not data:
81 break
82 max_length -= len(data)
83 hasher.update(data)
84
Gilad Arnold382df5c2013-05-03 12:49:28 -070085 if length >= 0 and max_length > 0:
86 raise PayloadError(
87 'insufficient data (%d instead of %d) when verifying %s' %
88 (length - max_length, length, name))
89
Gilad Arnold553b0ec2013-01-26 01:00:39 -080090 actual_hash = hasher.digest()
91 if actual_hash != expected_hash:
92 raise PayloadError('%s hash (%s) not as expected (%s)' %
Gilad Arnold96405372013-05-04 00:24:58 -070093 (name, common.FormatSha256(actual_hash),
94 common.FormatSha256(expected_hash)))
Gilad Arnold553b0ec2013-01-26 01:00:39 -080095
96
97def _ReadExtents(file_obj, extents, block_size, max_length=-1):
98 """Reads data from file as defined by extent sequence.
99
100 This tries to be efficient by not copying data as it is read in chunks.
101
102 Args:
103 file_obj: file object
104 extents: sequence of block extents (offset and length)
105 block_size: size of each block
106 max_length: maximum length to read (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700107
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800108 Returns:
109 A character array containing the concatenated read data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800110 """
111 data = array.array('c')
Gilad Arnold272a4992013-05-08 13:12:53 -0700112 if max_length < 0:
113 max_length = sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800114 for ex in extents:
115 if max_length == 0:
116 break
Gilad Arnold272a4992013-05-08 13:12:53 -0700117 read_length = min(max_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -0700118
119 # Fill with zeros or read from file, depending on the type of extent.
120 if ex.start_block == common.PSEUDO_EXTENT_MARKER:
121 data.extend(itertools.repeat('\0', read_length))
122 else:
123 file_obj.seek(ex.start_block * block_size)
124 data.fromfile(file_obj, read_length)
125
Gilad Arnold272a4992013-05-08 13:12:53 -0700126 max_length -= read_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700127
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800128 return data
129
130
131def _WriteExtents(file_obj, data, extents, block_size, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700132 """Writes data to file as defined by extent sequence.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800133
134 This tries to be efficient by not copy data as it is written in chunks.
135
136 Args:
137 file_obj: file object
138 data: data to write
139 extents: sequence of block extents (offset and length)
140 block_size: size of each block
Gilad Arnold272a4992013-05-08 13:12:53 -0700141 base_name: name string of extent sequence for error reporting
Allie Wood12f59aa2015-04-06 11:05:12 -0700142
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800143 Raises:
144 PayloadError when things don't add up.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800145 """
146 data_offset = 0
147 data_length = len(data)
148 for ex, ex_name in common.ExtentIter(extents, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700149 if not data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800150 raise PayloadError('%s: more write extents than data' % ex_name)
Gilad Arnold272a4992013-05-08 13:12:53 -0700151 write_length = min(data_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -0700152
153 # Only do actual writing if this is not a pseudo-extent.
154 if ex.start_block != common.PSEUDO_EXTENT_MARKER:
155 file_obj.seek(ex.start_block * block_size)
156 data_view = buffer(data, data_offset, write_length)
157 file_obj.write(data_view)
158
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800159 data_offset += write_length
Gilad Arnold272a4992013-05-08 13:12:53 -0700160 data_length -= write_length
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800161
Gilad Arnold272a4992013-05-08 13:12:53 -0700162 if data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800163 raise PayloadError('%s: more data than write extents' % base_name)
164
165
Gilad Arnold272a4992013-05-08 13:12:53 -0700166def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
167 """Translates an extent sequence into a bspatch-compatible string argument.
168
169 Args:
170 extents: sequence of block extents (offset and length)
171 block_size: size of each block
172 base_name: name string of extent sequence for error reporting
173 data_length: the actual total length of the data in bytes (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700174
Gilad Arnold272a4992013-05-08 13:12:53 -0700175 Returns:
176 A tuple consisting of (i) a string of the form
177 "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
178 for filling the last extent, (iii) the length of the padding (zero means no
179 padding is needed and the extents cover the full length of data).
Allie Wood12f59aa2015-04-06 11:05:12 -0700180
Gilad Arnold272a4992013-05-08 13:12:53 -0700181 Raises:
182 PayloadError if data_length is too short or too long.
Gilad Arnold272a4992013-05-08 13:12:53 -0700183 """
184 arg = ''
185 pad_off = pad_len = 0
186 if data_length < 0:
187 data_length = sys.maxint
188 for ex, ex_name in common.ExtentIter(extents, base_name):
189 if not data_length:
190 raise PayloadError('%s: more extents than total data length' % ex_name)
Gilad Arnold658185a2013-05-08 17:57:54 -0700191
192 is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
193 start_byte = -1 if is_pseudo else ex.start_block * block_size
Gilad Arnold272a4992013-05-08 13:12:53 -0700194 num_bytes = ex.num_blocks * block_size
195 if data_length < num_bytes:
Gilad Arnold658185a2013-05-08 17:57:54 -0700196 # We're only padding a real extent.
197 if not is_pseudo:
198 pad_off = start_byte + data_length
199 pad_len = num_bytes - data_length
200
Gilad Arnold272a4992013-05-08 13:12:53 -0700201 num_bytes = data_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700202
Gilad Arnold272a4992013-05-08 13:12:53 -0700203 arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
204 data_length -= num_bytes
205
206 if data_length:
207 raise PayloadError('%s: extents not covering full data length' % base_name)
208
209 return arg, pad_off, pad_len
210
211
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800212#
213# Payload application.
214#
215class PayloadApplier(object):
216 """Applying an update payload.
217
218 This is a short-lived object whose purpose is to isolate the logic used for
219 applying an update payload.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800220 """
221
Gilad Arnold21a02502013-08-22 16:59:48 -0700222 def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
Amin Hassani5ef5d452017-08-04 13:10:59 -0700223 puffpatch_path=None, truncate_to_expected_size=True):
Gilad Arnold272a4992013-05-08 13:12:53 -0700224 """Initialize the applier.
225
226 Args:
227 payload: the payload object to check
228 bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
Gilad Arnold21a02502013-08-22 16:59:48 -0700229 bspatch_path: path to the bspatch binary (optional)
Amin Hassani5ef5d452017-08-04 13:10:59 -0700230 puffpatch_path: path to the puffpatch binary (optional)
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700231 truncate_to_expected_size: whether to truncate the resulting partitions
232 to their expected sizes, as specified in the
233 payload (optional)
Gilad Arnold272a4992013-05-08 13:12:53 -0700234 """
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800235 assert payload.is_init, 'uninitialized update payload'
236 self.payload = payload
237 self.block_size = payload.manifest.block_size
Allie Wood12f59aa2015-04-06 11:05:12 -0700238 self.minor_version = payload.manifest.minor_version
Gilad Arnold272a4992013-05-08 13:12:53 -0700239 self.bsdiff_in_place = bsdiff_in_place
Gilad Arnold21a02502013-08-22 16:59:48 -0700240 self.bspatch_path = bspatch_path or 'bspatch'
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700241 self.puffpatch_path = puffpatch_path or 'puffin'
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700242 self.truncate_to_expected_size = truncate_to_expected_size
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800243
244 def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
Amin Hassani0de7f782017-12-07 12:13:03 -0800245 """Applies a REPLACE{,_BZ,_XZ} operation.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800246
247 Args:
248 op: the operation object
249 op_name: name string for error reporting
250 out_data: the data to be written
251 part_file: the partition file object
252 part_size: the size of the partition
Allie Wood12f59aa2015-04-06 11:05:12 -0700253
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800254 Raises:
255 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800256 """
257 block_size = self.block_size
258 data_length = len(out_data)
259
260 # Decompress data if needed.
261 if op.type == common.OpType.REPLACE_BZ:
262 out_data = bz2.decompress(out_data)
263 data_length = len(out_data)
Amin Hassani0de7f782017-12-07 12:13:03 -0800264 elif op.type == common.OpType.REPLACE_XZ:
265 # pylint: disable=no-member
266 out_data = lzma.decompress(out_data)
267 data_length = len(out_data)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800268
269 # Write data to blocks specified in dst extents.
270 data_start = 0
271 for ex, ex_name in common.ExtentIter(op.dst_extents,
272 '%s.dst_extents' % op_name):
273 start_block = ex.start_block
274 num_blocks = ex.num_blocks
275 count = num_blocks * block_size
276
277 # Make sure it's not a fake (signature) operation.
278 if start_block != common.PSEUDO_EXTENT_MARKER:
279 data_end = data_start + count
280
281 # Make sure we're not running past partition boundary.
282 if (start_block + num_blocks) * block_size > part_size:
283 raise PayloadError(
284 '%s: extent (%s) exceeds partition size (%d)' %
285 (ex_name, common.FormatExtent(ex, block_size),
286 part_size))
287
288 # Make sure that we have enough data to write.
289 if data_end >= data_length + block_size:
290 raise PayloadError(
291 '%s: more dst blocks than data (even with padding)')
292
293 # Pad with zeros if necessary.
294 if data_end > data_length:
295 padding = data_end - data_length
296 out_data += '\0' * padding
297
298 self.payload.payload_file.seek(start_block * block_size)
299 part_file.seek(start_block * block_size)
300 part_file.write(out_data[data_start:data_end])
301
302 data_start += count
303
304 # Make sure we wrote all data.
305 if data_start < data_length:
306 raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
307 (op_name, data_start, data_length))
308
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700309 def _ApplyZeroOperation(self, op, op_name, part_file):
310 """Applies a ZERO operation.
311
312 Args:
313 op: the operation object
314 op_name: name string for error reporting
315 part_file: the partition file object
316
317 Raises:
318 PayloadError if something goes wrong.
319 """
320 block_size = self.block_size
321 base_name = '%s.dst_extents' % op_name
322
323 # Iterate over the extents and write zero.
Amin Hassanib05a65a2017-12-18 15:15:32 -0800324 # pylint: disable=unused-variable
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700325 for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
326 # Only do actual writing if this is not a pseudo-extent.
327 if ex.start_block != common.PSEUDO_EXTENT_MARKER:
328 part_file.seek(ex.start_block * block_size)
329 part_file.write('\0' * (ex.num_blocks * block_size))
330
Allie Wood12f59aa2015-04-06 11:05:12 -0700331 def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
332 new_part_file):
333 """Applies a SOURCE_COPY operation.
334
335 Args:
336 op: the operation object
337 op_name: name string for error reporting
338 old_part_file: the old partition file object
339 new_part_file: the new partition file object
340
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800341 Raises:
342 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800343 """
Allie Wood12f59aa2015-04-06 11:05:12 -0700344 if not old_part_file:
345 raise PayloadError(
346 '%s: no source partition file provided for operation type (%d)' %
347 (op_name, op.type))
348
349 block_size = self.block_size
350
351 # Gather input raw data from src extents.
352 in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
353
354 # Dump extracted data to dst extents.
355 _WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
356 '%s.dst_extents' % op_name)
357
Amin Hassaniefa62d92017-11-09 13:46:56 -0800358 def _BytesInExtents(self, extents, base_name):
359 """Counts the length of extents in bytes.
360
361 Args:
362 extents: The list of Extents.
363 base_name: For error reporting.
364
365 Returns:
366 The number of bytes in extents.
367 """
368
369 length = 0
Amin Hassanib05a65a2017-12-18 15:15:32 -0800370 # pylint: disable=unused-variable
Amin Hassaniefa62d92017-11-09 13:46:56 -0800371 for ex, ex_name in common.ExtentIter(extents, base_name):
372 length += ex.num_blocks * self.block_size
373 return length
374
Sen Jiang92161a72016-06-28 16:09:38 -0700375 def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file,
376 new_part_file):
Amin Hassaniefa62d92017-11-09 13:46:56 -0800377 """Applies a SOURCE_BSDIFF, BROTLI_BSDIFF or PUFFDIFF operation.
Allie Wood12f59aa2015-04-06 11:05:12 -0700378
379 Args:
380 op: the operation object
381 op_name: name string for error reporting
382 patch_data: the binary patch content
383 old_part_file: the source partition file object
384 new_part_file: the target partition file object
385
386 Raises:
387 PayloadError if something goes wrong.
Allie Wood12f59aa2015-04-06 11:05:12 -0700388 """
389 if not old_part_file:
390 raise PayloadError(
391 '%s: no source partition file provided for operation type (%d)' %
392 (op_name, op.type))
393
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800394 block_size = self.block_size
395
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800396 # Dump patch data to file.
397 with tempfile.NamedTemporaryFile(delete=False) as patch_file:
398 patch_file_name = patch_file.name
399 patch_file.write(patch_data)
400
Allie Wood12f59aa2015-04-06 11:05:12 -0700401 if (hasattr(new_part_file, 'fileno') and
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700402 ((not old_part_file) or hasattr(old_part_file, 'fileno'))):
Gilad Arnold272a4992013-05-08 13:12:53 -0700403 # Construct input and output extents argument for bspatch.
Amin Hassaniefa62d92017-11-09 13:46:56 -0800404
Gilad Arnold272a4992013-05-08 13:12:53 -0700405 in_extents_arg, _, _ = _ExtentsToBspatchArg(
406 op.src_extents, block_size, '%s.src_extents' % op_name,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800407 data_length=op.src_length if op.src_length else
408 self._BytesInExtents(op.src_extents, "%s.src_extents"))
Gilad Arnold272a4992013-05-08 13:12:53 -0700409 out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
410 op.dst_extents, block_size, '%s.dst_extents' % op_name,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800411 data_length=op.dst_length if op.dst_length else
412 self._BytesInExtents(op.dst_extents, "%s.dst_extents"))
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800413
Allie Wood12f59aa2015-04-06 11:05:12 -0700414 new_file_name = '/dev/fd/%d' % new_part_file.fileno()
415 # Diff from source partition.
416 old_file_name = '/dev/fd/%d' % old_part_file.fileno()
417
Amin Hassani0f59a9a2019-09-27 10:24:31 -0700418 if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700419 # Invoke bspatch on partition file with extents args.
420 bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
421 patch_file_name, in_extents_arg, out_extents_arg]
422 subprocess.check_call(bspatch_cmd)
423 elif op.type == common.OpType.PUFFDIFF:
424 # Invoke puffpatch on partition file with extents args.
425 puffpatch_cmd = [self.puffpatch_path,
426 "--operation=puffpatch",
427 "--src_file=%s" % old_file_name,
428 "--dst_file=%s" % new_file_name,
429 "--patch_file=%s" % patch_file_name,
430 "--src_extents=%s" % in_extents_arg,
431 "--dst_extents=%s" % out_extents_arg]
432 subprocess.check_call(puffpatch_cmd)
433 else:
434 raise PayloadError("Unknown operation %s", op.type)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800435
Gilad Arnold272a4992013-05-08 13:12:53 -0700436 # Pad with zeros past the total output length.
437 if pad_len:
Allie Wood12f59aa2015-04-06 11:05:12 -0700438 new_part_file.seek(pad_off)
439 new_part_file.write('\0' * pad_len)
Gilad Arnold272a4992013-05-08 13:12:53 -0700440 else:
441 # Gather input raw data and write to a temp file.
Allie Wood12f59aa2015-04-06 11:05:12 -0700442 input_part_file = old_part_file if old_part_file else new_part_file
443 in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800444 max_length=op.src_length if op.src_length else
445 self._BytesInExtents(op.src_extents,
446 "%s.src_extents"))
Gilad Arnold272a4992013-05-08 13:12:53 -0700447 with tempfile.NamedTemporaryFile(delete=False) as in_file:
448 in_file_name = in_file.name
449 in_file.write(in_data)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800450
Allie Wood12f59aa2015-04-06 11:05:12 -0700451 # Allocate temporary output file.
Gilad Arnold272a4992013-05-08 13:12:53 -0700452 with tempfile.NamedTemporaryFile(delete=False) as out_file:
453 out_file_name = out_file.name
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800454
Amin Hassani0f59a9a2019-09-27 10:24:31 -0700455 if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700456 # Invoke bspatch.
457 bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
458 patch_file_name]
459 subprocess.check_call(bspatch_cmd)
460 elif op.type == common.OpType.PUFFDIFF:
461 # Invoke puffpatch.
462 puffpatch_cmd = [self.puffpatch_path,
463 "--operation=puffpatch",
464 "--src_file=%s" % in_file_name,
465 "--dst_file=%s" % out_file_name,
466 "--patch_file=%s" % patch_file_name]
467 subprocess.check_call(puffpatch_cmd)
468 else:
469 raise PayloadError("Unknown operation %s", op.type)
Gilad Arnold272a4992013-05-08 13:12:53 -0700470
471 # Read output.
472 with open(out_file_name, 'rb') as out_file:
473 out_data = out_file.read()
474 if len(out_data) != op.dst_length:
475 raise PayloadError(
476 '%s: actual patched data length (%d) not as expected (%d)' %
477 (op_name, len(out_data), op.dst_length))
478
479 # Write output back to partition, with padding.
480 unaligned_out_len = len(out_data) % block_size
481 if unaligned_out_len:
482 out_data += '\0' * (block_size - unaligned_out_len)
Allie Wood12f59aa2015-04-06 11:05:12 -0700483 _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
Gilad Arnold272a4992013-05-08 13:12:53 -0700484 '%s.dst_extents' % op_name)
485
486 # Delete input/output files.
487 os.remove(in_file_name)
488 os.remove(out_file_name)
489
490 # Delete patch file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800491 os.remove(patch_file_name)
492
Allie Wood12f59aa2015-04-06 11:05:12 -0700493 def _ApplyOperations(self, operations, base_name, old_part_file,
494 new_part_file, part_size):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800495 """Applies a sequence of update operations to a partition.
496
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800497 Args:
498 operations: the sequence of operations
499 base_name: the name of the operation sequence
Allie Wood12f59aa2015-04-06 11:05:12 -0700500 old_part_file: the old partition file object, open for reading/writing
501 new_part_file: the new partition file object, open for reading/writing
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800502 part_size: the partition size
Allie Wood12f59aa2015-04-06 11:05:12 -0700503
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800504 Raises:
505 PayloadError if anything goes wrong while processing the payload.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800506 """
507 for op, op_name in common.OperationIter(operations, base_name):
508 # Read data blob.
509 data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
510
Amin Hassani0de7f782017-12-07 12:13:03 -0800511 if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
512 common.OpType.REPLACE_XZ):
Allie Wood12f59aa2015-04-06 11:05:12 -0700513 self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700514 elif op.type == common.OpType.ZERO:
515 self._ApplyZeroOperation(op, op_name, new_part_file)
Allie Wood12f59aa2015-04-06 11:05:12 -0700516 elif op.type == common.OpType.SOURCE_COPY:
517 self._ApplySourceCopyOperation(op, op_name, old_part_file,
518 new_part_file)
Amin Hassaniefa62d92017-11-09 13:46:56 -0800519 elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF,
520 common.OpType.BROTLI_BSDIFF):
Sen Jiang92161a72016-06-28 16:09:38 -0700521 self._ApplyDiffOperation(op, op_name, data, old_part_file,
522 new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800523 else:
524 raise PayloadError('%s: unknown operation type (%d)' %
525 (op_name, op.type))
526
527 def _ApplyToPartition(self, operations, part_name, base_name,
Gilad Arnold16416602013-05-04 21:40:39 -0700528 new_part_file_name, new_part_info,
529 old_part_file_name=None, old_part_info=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800530 """Applies an update to a partition.
531
532 Args:
533 operations: the sequence of update operations to apply
534 part_name: the name of the partition, for error reporting
535 base_name: the name of the operation sequence
Gilad Arnold16416602013-05-04 21:40:39 -0700536 new_part_file_name: file name to write partition data to
537 new_part_info: size and expected hash of dest partition
538 old_part_file_name: file name of source partition (optional)
539 old_part_info: size and expected hash of source partition (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700540
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800541 Raises:
542 PayloadError if anything goes wrong with the update.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800543 """
544 # Do we have a source partition?
Gilad Arnold16416602013-05-04 21:40:39 -0700545 if old_part_file_name:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800546 # Verify the source partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700547 with open(old_part_file_name, 'rb') as old_part_file:
Gilad Arnold4b8f4c22015-07-16 11:45:39 -0700548 _VerifySha256(old_part_file, old_part_info.hash,
549 'old ' + part_name, length=old_part_info.size)
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700550 new_part_file_mode = 'r+b'
Amin Hassani0f59a9a2019-09-27 10:24:31 -0700551 open(new_part_file_name, 'w').close()
552
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800553 else:
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700554 # We need to create/truncate the dst partition file.
555 new_part_file_mode = 'w+b'
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800556
557 # Apply operations.
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700558 with open(new_part_file_name, new_part_file_mode) as new_part_file:
Allie Wood12f59aa2015-04-06 11:05:12 -0700559 old_part_file = (open(old_part_file_name, 'r+b')
560 if old_part_file_name else None)
561 try:
562 self._ApplyOperations(operations, base_name, old_part_file,
563 new_part_file, new_part_info.size)
564 finally:
565 if old_part_file:
566 old_part_file.close()
567
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700568 # Truncate the result, if so instructed.
569 if self.truncate_to_expected_size:
570 new_part_file.seek(0, 2)
571 if new_part_file.tell() > new_part_info.size:
572 new_part_file.seek(new_part_info.size)
573 new_part_file.truncate()
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800574
575 # Verify the resulting partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700576 with open(new_part_file_name, 'rb') as new_part_file:
Gilad Arnold4b8f4c22015-07-16 11:45:39 -0700577 _VerifySha256(new_part_file, new_part_info.hash,
578 'new ' + part_name, length=new_part_info.size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800579
Tudor Brindus2d22c1a2018-06-15 13:07:13 -0700580 def Run(self, new_parts, old_parts=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800581 """Applier entry point, invoking all update operations.
582
583 Args:
Tudor Brindus2d22c1a2018-06-15 13:07:13 -0700584 new_parts: map of partition name to dest partition file
585 old_parts: map of partition name to source partition file (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700586
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800587 Raises:
588 PayloadError if payload application failed.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800589 """
Tudor Brindusacd20392018-06-19 11:46:16 -0700590 if old_parts is None:
591 old_parts = {}
592
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800593 self.payload.ResetFile()
594
Tudor Brindusacd20392018-06-19 11:46:16 -0700595 new_part_info = {}
596 old_part_info = {}
597 install_operations = []
598
599 manifest = self.payload.manifest
600 if self.payload.header.version == 1:
Tudor Brindusb220d662018-07-10 23:55:51 -0700601 for real_name, proto_name in common.CROS_PARTITIONS:
602 new_part_info[real_name] = getattr(manifest, 'new_%s_info' % proto_name)
603 old_part_info[real_name] = getattr(manifest, 'old_%s_info' % proto_name)
Tudor Brindusacd20392018-06-19 11:46:16 -0700604
Tudor Brindusb220d662018-07-10 23:55:51 -0700605 install_operations.append((common.ROOTFS, manifest.install_operations))
606 install_operations.append((common.KERNEL,
607 manifest.kernel_install_operations))
Tudor Brindusacd20392018-06-19 11:46:16 -0700608 else:
609 for part in manifest.partitions:
610 name = part.partition_name
611 new_part_info[name] = part.new_partition_info
612 old_part_info[name] = part.old_partition_info
613 install_operations.append((name, part.operations))
614
615 part_names = set(new_part_info.keys()) # Equivalently, old_part_info.keys()
Tudor Brindus2d22c1a2018-06-15 13:07:13 -0700616
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800617 # Make sure the arguments are sane and match the payload.
Tudor Brindusacd20392018-06-19 11:46:16 -0700618 new_part_names = set(new_parts.keys())
619 if new_part_names != part_names:
620 raise PayloadError('missing dst partition(s) %s' %
621 ', '.join(part_names - new_part_names))
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800622
Tudor Brindusacd20392018-06-19 11:46:16 -0700623 old_part_names = set(old_parts.keys())
624 if part_names - old_part_names:
625 if self.payload.IsDelta():
626 raise PayloadError('trying to apply a delta update without src '
627 'partition(s) %s' %
628 ', '.join(part_names - old_part_names))
629 elif old_part_names == part_names:
630 if self.payload.IsFull():
631 raise PayloadError('trying to apply a full update onto src partitions')
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800632 else:
633 raise PayloadError('not all src partitions provided')
634
Tudor Brindusacd20392018-06-19 11:46:16 -0700635 for name, operations in install_operations:
636 # Apply update to partition.
637 self._ApplyToPartition(
638 operations, name, '%s_install_operations' % name, new_parts[name],
639 new_part_info[name], old_parts.get(name, None), old_part_info[name])