blob: 8af89e794c8b80f5a4f8b4f2e2ad4b779f8f645d [file] [log] [blame]
Gilad Arnold553b0ec2013-01-26 01:00:39 -08001# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Applying a Chrome OS update payload.
6
7This module is used internally by the main Payload class for applying an update
8payload. The interface for invoking the applier is as follows:
9
10 applier = PayloadApplier(payload)
11 applier.Run(...)
12
13"""
14
Allie Wood12f59aa2015-04-06 11:05:12 -070015from __future__ import print_function
16
Gilad Arnold553b0ec2013-01-26 01:00:39 -080017import array
18import bz2
19import hashlib
Gilad Arnold658185a2013-05-08 17:57:54 -070020import itertools
Amin Hassani0de7f782017-12-07 12:13:03 -080021# Not everywhere we can have the lzma library so we ignore it if we didn't have
22# it because it is not going to be used. For example, 'cros flash' uses
23# devserver code which eventually loads this file, but the lzma library is not
24# included in the client test devices, and it is not necessary to do so. But
25# lzma is not used in 'cros flash' so it should be fine. Python 3.x include
26# lzma, but for backward compatibility with Python 2.7, backports-lzma is
27# needed.
28try:
29 import lzma
30except ImportError:
31 try:
32 from backports import lzma
33 except ImportError:
34 pass
Gilad Arnold553b0ec2013-01-26 01:00:39 -080035import os
36import shutil
37import subprocess
38import sys
39import tempfile
40
Amin Hassanib05a65a2017-12-18 15:15:32 -080041from update_payload import common
42from update_payload.error import PayloadError
Gilad Arnold553b0ec2013-01-26 01:00:39 -080043
44
45#
46# Helper functions.
47#
Gilad Arnold382df5c2013-05-03 12:49:28 -070048def _VerifySha256(file_obj, expected_hash, name, length=-1):
Gilad Arnold553b0ec2013-01-26 01:00:39 -080049 """Verifies the SHA256 hash of a file.
50
51 Args:
52 file_obj: file object to read
53 expected_hash: the hash digest we expect to be getting
54 name: name string of this hash, for error reporting
Gilad Arnold382df5c2013-05-03 12:49:28 -070055 length: precise length of data to verify (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -070056
Gilad Arnold553b0ec2013-01-26 01:00:39 -080057 Raises:
Gilad Arnold382df5c2013-05-03 12:49:28 -070058 PayloadError if computed hash doesn't match expected one, or if fails to
59 read the specified length of data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -080060 """
Gilad Arnold553b0ec2013-01-26 01:00:39 -080061 hasher = hashlib.sha256()
62 block_length = 1024 * 1024
Gilad Arnold382df5c2013-05-03 12:49:28 -070063 max_length = length if length >= 0 else sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -080064
Gilad Arnold382df5c2013-05-03 12:49:28 -070065 while max_length > 0:
Gilad Arnold553b0ec2013-01-26 01:00:39 -080066 read_length = min(max_length, block_length)
67 data = file_obj.read(read_length)
68 if not data:
69 break
70 max_length -= len(data)
71 hasher.update(data)
72
Gilad Arnold382df5c2013-05-03 12:49:28 -070073 if length >= 0 and max_length > 0:
74 raise PayloadError(
75 'insufficient data (%d instead of %d) when verifying %s' %
76 (length - max_length, length, name))
77
Gilad Arnold553b0ec2013-01-26 01:00:39 -080078 actual_hash = hasher.digest()
79 if actual_hash != expected_hash:
80 raise PayloadError('%s hash (%s) not as expected (%s)' %
Gilad Arnold96405372013-05-04 00:24:58 -070081 (name, common.FormatSha256(actual_hash),
82 common.FormatSha256(expected_hash)))
Gilad Arnold553b0ec2013-01-26 01:00:39 -080083
84
85def _ReadExtents(file_obj, extents, block_size, max_length=-1):
86 """Reads data from file as defined by extent sequence.
87
88 This tries to be efficient by not copying data as it is read in chunks.
89
90 Args:
91 file_obj: file object
92 extents: sequence of block extents (offset and length)
93 block_size: size of each block
94 max_length: maximum length to read (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -070095
Gilad Arnold553b0ec2013-01-26 01:00:39 -080096 Returns:
97 A character array containing the concatenated read data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -080098 """
99 data = array.array('c')
Gilad Arnold272a4992013-05-08 13:12:53 -0700100 if max_length < 0:
101 max_length = sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800102 for ex in extents:
103 if max_length == 0:
104 break
Gilad Arnold272a4992013-05-08 13:12:53 -0700105 read_length = min(max_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -0700106
107 # Fill with zeros or read from file, depending on the type of extent.
108 if ex.start_block == common.PSEUDO_EXTENT_MARKER:
109 data.extend(itertools.repeat('\0', read_length))
110 else:
111 file_obj.seek(ex.start_block * block_size)
112 data.fromfile(file_obj, read_length)
113
Gilad Arnold272a4992013-05-08 13:12:53 -0700114 max_length -= read_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700115
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800116 return data
117
118
119def _WriteExtents(file_obj, data, extents, block_size, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700120 """Writes data to file as defined by extent sequence.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800121
122 This tries to be efficient by not copy data as it is written in chunks.
123
124 Args:
125 file_obj: file object
126 data: data to write
127 extents: sequence of block extents (offset and length)
128 block_size: size of each block
Gilad Arnold272a4992013-05-08 13:12:53 -0700129 base_name: name string of extent sequence for error reporting
Allie Wood12f59aa2015-04-06 11:05:12 -0700130
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800131 Raises:
132 PayloadError when things don't add up.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800133 """
134 data_offset = 0
135 data_length = len(data)
136 for ex, ex_name in common.ExtentIter(extents, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700137 if not data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800138 raise PayloadError('%s: more write extents than data' % ex_name)
Gilad Arnold272a4992013-05-08 13:12:53 -0700139 write_length = min(data_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -0700140
141 # Only do actual writing if this is not a pseudo-extent.
142 if ex.start_block != common.PSEUDO_EXTENT_MARKER:
143 file_obj.seek(ex.start_block * block_size)
144 data_view = buffer(data, data_offset, write_length)
145 file_obj.write(data_view)
146
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800147 data_offset += write_length
Gilad Arnold272a4992013-05-08 13:12:53 -0700148 data_length -= write_length
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800149
Gilad Arnold272a4992013-05-08 13:12:53 -0700150 if data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800151 raise PayloadError('%s: more data than write extents' % base_name)
152
153
Gilad Arnold272a4992013-05-08 13:12:53 -0700154def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
155 """Translates an extent sequence into a bspatch-compatible string argument.
156
157 Args:
158 extents: sequence of block extents (offset and length)
159 block_size: size of each block
160 base_name: name string of extent sequence for error reporting
161 data_length: the actual total length of the data in bytes (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700162
Gilad Arnold272a4992013-05-08 13:12:53 -0700163 Returns:
164 A tuple consisting of (i) a string of the form
165 "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
166 for filling the last extent, (iii) the length of the padding (zero means no
167 padding is needed and the extents cover the full length of data).
Allie Wood12f59aa2015-04-06 11:05:12 -0700168
Gilad Arnold272a4992013-05-08 13:12:53 -0700169 Raises:
170 PayloadError if data_length is too short or too long.
Gilad Arnold272a4992013-05-08 13:12:53 -0700171 """
172 arg = ''
173 pad_off = pad_len = 0
174 if data_length < 0:
175 data_length = sys.maxint
176 for ex, ex_name in common.ExtentIter(extents, base_name):
177 if not data_length:
178 raise PayloadError('%s: more extents than total data length' % ex_name)
Gilad Arnold658185a2013-05-08 17:57:54 -0700179
180 is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
181 start_byte = -1 if is_pseudo else ex.start_block * block_size
Gilad Arnold272a4992013-05-08 13:12:53 -0700182 num_bytes = ex.num_blocks * block_size
183 if data_length < num_bytes:
Gilad Arnold658185a2013-05-08 17:57:54 -0700184 # We're only padding a real extent.
185 if not is_pseudo:
186 pad_off = start_byte + data_length
187 pad_len = num_bytes - data_length
188
Gilad Arnold272a4992013-05-08 13:12:53 -0700189 num_bytes = data_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700190
Gilad Arnold272a4992013-05-08 13:12:53 -0700191 arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
192 data_length -= num_bytes
193
194 if data_length:
195 raise PayloadError('%s: extents not covering full data length' % base_name)
196
197 return arg, pad_off, pad_len
198
199
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800200#
201# Payload application.
202#
203class PayloadApplier(object):
204 """Applying an update payload.
205
206 This is a short-lived object whose purpose is to isolate the logic used for
207 applying an update payload.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800208 """
209
Gilad Arnold21a02502013-08-22 16:59:48 -0700210 def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
Amin Hassani5ef5d452017-08-04 13:10:59 -0700211 puffpatch_path=None, truncate_to_expected_size=True):
Gilad Arnold272a4992013-05-08 13:12:53 -0700212 """Initialize the applier.
213
214 Args:
215 payload: the payload object to check
216 bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
Gilad Arnold21a02502013-08-22 16:59:48 -0700217 bspatch_path: path to the bspatch binary (optional)
Amin Hassani5ef5d452017-08-04 13:10:59 -0700218 puffpatch_path: path to the puffpatch binary (optional)
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700219 truncate_to_expected_size: whether to truncate the resulting partitions
220 to their expected sizes, as specified in the
221 payload (optional)
Gilad Arnold272a4992013-05-08 13:12:53 -0700222 """
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800223 assert payload.is_init, 'uninitialized update payload'
224 self.payload = payload
225 self.block_size = payload.manifest.block_size
Allie Wood12f59aa2015-04-06 11:05:12 -0700226 self.minor_version = payload.manifest.minor_version
Gilad Arnold272a4992013-05-08 13:12:53 -0700227 self.bsdiff_in_place = bsdiff_in_place
Gilad Arnold21a02502013-08-22 16:59:48 -0700228 self.bspatch_path = bspatch_path or 'bspatch'
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700229 self.puffpatch_path = puffpatch_path or 'puffin'
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700230 self.truncate_to_expected_size = truncate_to_expected_size
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800231
232 def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
Amin Hassani0de7f782017-12-07 12:13:03 -0800233 """Applies a REPLACE{,_BZ,_XZ} operation.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800234
235 Args:
236 op: the operation object
237 op_name: name string for error reporting
238 out_data: the data to be written
239 part_file: the partition file object
240 part_size: the size of the partition
Allie Wood12f59aa2015-04-06 11:05:12 -0700241
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800242 Raises:
243 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800244 """
245 block_size = self.block_size
246 data_length = len(out_data)
247
248 # Decompress data if needed.
249 if op.type == common.OpType.REPLACE_BZ:
250 out_data = bz2.decompress(out_data)
251 data_length = len(out_data)
Amin Hassani0de7f782017-12-07 12:13:03 -0800252 elif op.type == common.OpType.REPLACE_XZ:
253 # pylint: disable=no-member
254 out_data = lzma.decompress(out_data)
255 data_length = len(out_data)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800256
257 # Write data to blocks specified in dst extents.
258 data_start = 0
259 for ex, ex_name in common.ExtentIter(op.dst_extents,
260 '%s.dst_extents' % op_name):
261 start_block = ex.start_block
262 num_blocks = ex.num_blocks
263 count = num_blocks * block_size
264
265 # Make sure it's not a fake (signature) operation.
266 if start_block != common.PSEUDO_EXTENT_MARKER:
267 data_end = data_start + count
268
269 # Make sure we're not running past partition boundary.
270 if (start_block + num_blocks) * block_size > part_size:
271 raise PayloadError(
272 '%s: extent (%s) exceeds partition size (%d)' %
273 (ex_name, common.FormatExtent(ex, block_size),
274 part_size))
275
276 # Make sure that we have enough data to write.
277 if data_end >= data_length + block_size:
278 raise PayloadError(
279 '%s: more dst blocks than data (even with padding)')
280
281 # Pad with zeros if necessary.
282 if data_end > data_length:
283 padding = data_end - data_length
284 out_data += '\0' * padding
285
286 self.payload.payload_file.seek(start_block * block_size)
287 part_file.seek(start_block * block_size)
288 part_file.write(out_data[data_start:data_end])
289
290 data_start += count
291
292 # Make sure we wrote all data.
293 if data_start < data_length:
294 raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
295 (op_name, data_start, data_length))
296
297 def _ApplyMoveOperation(self, op, op_name, part_file):
298 """Applies a MOVE operation.
299
Gilad Arnold658185a2013-05-08 17:57:54 -0700300 Note that this operation must read the whole block data from the input and
301 only then dump it, due to our in-place update semantics; otherwise, it
302 might clobber data midway through.
303
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800304 Args:
305 op: the operation object
306 op_name: name string for error reporting
307 part_file: the partition file object
Allie Wood12f59aa2015-04-06 11:05:12 -0700308
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800309 Raises:
310 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800311 """
312 block_size = self.block_size
313
314 # Gather input raw data from src extents.
315 in_data = _ReadExtents(part_file, op.src_extents, block_size)
316
317 # Dump extracted data to dst extents.
318 _WriteExtents(part_file, in_data, op.dst_extents, block_size,
319 '%s.dst_extents' % op_name)
320
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700321 def _ApplyZeroOperation(self, op, op_name, part_file):
322 """Applies a ZERO operation.
323
324 Args:
325 op: the operation object
326 op_name: name string for error reporting
327 part_file: the partition file object
328
329 Raises:
330 PayloadError if something goes wrong.
331 """
332 block_size = self.block_size
333 base_name = '%s.dst_extents' % op_name
334
335 # Iterate over the extents and write zero.
Amin Hassanib05a65a2017-12-18 15:15:32 -0800336 # pylint: disable=unused-variable
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700337 for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
338 # Only do actual writing if this is not a pseudo-extent.
339 if ex.start_block != common.PSEUDO_EXTENT_MARKER:
340 part_file.seek(ex.start_block * block_size)
341 part_file.write('\0' * (ex.num_blocks * block_size))
342
Allie Wood12f59aa2015-04-06 11:05:12 -0700343 def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
344 new_part_file):
345 """Applies a SOURCE_COPY operation.
346
347 Args:
348 op: the operation object
349 op_name: name string for error reporting
350 old_part_file: the old partition file object
351 new_part_file: the new partition file object
352
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800353 Raises:
354 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800355 """
Allie Wood12f59aa2015-04-06 11:05:12 -0700356 if not old_part_file:
357 raise PayloadError(
358 '%s: no source partition file provided for operation type (%d)' %
359 (op_name, op.type))
360
361 block_size = self.block_size
362
363 # Gather input raw data from src extents.
364 in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
365
366 # Dump extracted data to dst extents.
367 _WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
368 '%s.dst_extents' % op_name)
369
Amin Hassaniefa62d92017-11-09 13:46:56 -0800370 def _BytesInExtents(self, extents, base_name):
371 """Counts the length of extents in bytes.
372
373 Args:
374 extents: The list of Extents.
375 base_name: For error reporting.
376
377 Returns:
378 The number of bytes in extents.
379 """
380
381 length = 0
Amin Hassanib05a65a2017-12-18 15:15:32 -0800382 # pylint: disable=unused-variable
Amin Hassaniefa62d92017-11-09 13:46:56 -0800383 for ex, ex_name in common.ExtentIter(extents, base_name):
384 length += ex.num_blocks * self.block_size
385 return length
386
Sen Jiang92161a72016-06-28 16:09:38 -0700387 def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file,
388 new_part_file):
Amin Hassaniefa62d92017-11-09 13:46:56 -0800389 """Applies a SOURCE_BSDIFF, BROTLI_BSDIFF or PUFFDIFF operation.
Allie Wood12f59aa2015-04-06 11:05:12 -0700390
391 Args:
392 op: the operation object
393 op_name: name string for error reporting
394 patch_data: the binary patch content
395 old_part_file: the source partition file object
396 new_part_file: the target partition file object
397
398 Raises:
399 PayloadError if something goes wrong.
Allie Wood12f59aa2015-04-06 11:05:12 -0700400 """
401 if not old_part_file:
402 raise PayloadError(
403 '%s: no source partition file provided for operation type (%d)' %
404 (op_name, op.type))
405
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800406 block_size = self.block_size
407
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800408 # Dump patch data to file.
409 with tempfile.NamedTemporaryFile(delete=False) as patch_file:
410 patch_file_name = patch_file.name
411 patch_file.write(patch_data)
412
Allie Wood12f59aa2015-04-06 11:05:12 -0700413 if (hasattr(new_part_file, 'fileno') and
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700414 ((not old_part_file) or hasattr(old_part_file, 'fileno'))):
Gilad Arnold272a4992013-05-08 13:12:53 -0700415 # Construct input and output extents argument for bspatch.
Amin Hassaniefa62d92017-11-09 13:46:56 -0800416
Gilad Arnold272a4992013-05-08 13:12:53 -0700417 in_extents_arg, _, _ = _ExtentsToBspatchArg(
418 op.src_extents, block_size, '%s.src_extents' % op_name,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800419 data_length=op.src_length if op.src_length else
420 self._BytesInExtents(op.src_extents, "%s.src_extents"))
Gilad Arnold272a4992013-05-08 13:12:53 -0700421 out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
422 op.dst_extents, block_size, '%s.dst_extents' % op_name,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800423 data_length=op.dst_length if op.dst_length else
424 self._BytesInExtents(op.dst_extents, "%s.dst_extents"))
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800425
Allie Wood12f59aa2015-04-06 11:05:12 -0700426 new_file_name = '/dev/fd/%d' % new_part_file.fileno()
427 # Diff from source partition.
428 old_file_name = '/dev/fd/%d' % old_part_file.fileno()
429
Amin Hassaniefa62d92017-11-09 13:46:56 -0800430 if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
431 common.OpType.BROTLI_BSDIFF):
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700432 # Invoke bspatch on partition file with extents args.
433 bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
434 patch_file_name, in_extents_arg, out_extents_arg]
435 subprocess.check_call(bspatch_cmd)
436 elif op.type == common.OpType.PUFFDIFF:
437 # Invoke puffpatch on partition file with extents args.
438 puffpatch_cmd = [self.puffpatch_path,
439 "--operation=puffpatch",
440 "--src_file=%s" % old_file_name,
441 "--dst_file=%s" % new_file_name,
442 "--patch_file=%s" % patch_file_name,
443 "--src_extents=%s" % in_extents_arg,
444 "--dst_extents=%s" % out_extents_arg]
445 subprocess.check_call(puffpatch_cmd)
446 else:
447 raise PayloadError("Unknown operation %s", op.type)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800448
Gilad Arnold272a4992013-05-08 13:12:53 -0700449 # Pad with zeros past the total output length.
450 if pad_len:
Allie Wood12f59aa2015-04-06 11:05:12 -0700451 new_part_file.seek(pad_off)
452 new_part_file.write('\0' * pad_len)
Gilad Arnold272a4992013-05-08 13:12:53 -0700453 else:
454 # Gather input raw data and write to a temp file.
Allie Wood12f59aa2015-04-06 11:05:12 -0700455 input_part_file = old_part_file if old_part_file else new_part_file
456 in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800457 max_length=op.src_length if op.src_length else
458 self._BytesInExtents(op.src_extents,
459 "%s.src_extents"))
Gilad Arnold272a4992013-05-08 13:12:53 -0700460 with tempfile.NamedTemporaryFile(delete=False) as in_file:
461 in_file_name = in_file.name
462 in_file.write(in_data)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800463
Allie Wood12f59aa2015-04-06 11:05:12 -0700464 # Allocate temporary output file.
Gilad Arnold272a4992013-05-08 13:12:53 -0700465 with tempfile.NamedTemporaryFile(delete=False) as out_file:
466 out_file_name = out_file.name
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800467
Amin Hassaniefa62d92017-11-09 13:46:56 -0800468 if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
469 common.OpType.BROTLI_BSDIFF):
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700470 # Invoke bspatch.
471 bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
472 patch_file_name]
473 subprocess.check_call(bspatch_cmd)
474 elif op.type == common.OpType.PUFFDIFF:
475 # Invoke puffpatch.
476 puffpatch_cmd = [self.puffpatch_path,
477 "--operation=puffpatch",
478 "--src_file=%s" % in_file_name,
479 "--dst_file=%s" % out_file_name,
480 "--patch_file=%s" % patch_file_name]
481 subprocess.check_call(puffpatch_cmd)
482 else:
483 raise PayloadError("Unknown operation %s", op.type)
Gilad Arnold272a4992013-05-08 13:12:53 -0700484
485 # Read output.
486 with open(out_file_name, 'rb') as out_file:
487 out_data = out_file.read()
488 if len(out_data) != op.dst_length:
489 raise PayloadError(
490 '%s: actual patched data length (%d) not as expected (%d)' %
491 (op_name, len(out_data), op.dst_length))
492
493 # Write output back to partition, with padding.
494 unaligned_out_len = len(out_data) % block_size
495 if unaligned_out_len:
496 out_data += '\0' * (block_size - unaligned_out_len)
Allie Wood12f59aa2015-04-06 11:05:12 -0700497 _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
Gilad Arnold272a4992013-05-08 13:12:53 -0700498 '%s.dst_extents' % op_name)
499
500 # Delete input/output files.
501 os.remove(in_file_name)
502 os.remove(out_file_name)
503
504 # Delete patch file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800505 os.remove(patch_file_name)
506
Allie Wood12f59aa2015-04-06 11:05:12 -0700507 def _ApplyOperations(self, operations, base_name, old_part_file,
508 new_part_file, part_size):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800509 """Applies a sequence of update operations to a partition.
510
Allie Wood12f59aa2015-04-06 11:05:12 -0700511 This assumes an in-place update semantics for MOVE and BSDIFF, namely all
512 reads are performed first, then the data is processed and written back to
513 the same file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800514
515 Args:
516 operations: the sequence of operations
517 base_name: the name of the operation sequence
Allie Wood12f59aa2015-04-06 11:05:12 -0700518 old_part_file: the old partition file object, open for reading/writing
519 new_part_file: the new partition file object, open for reading/writing
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800520 part_size: the partition size
Allie Wood12f59aa2015-04-06 11:05:12 -0700521
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800522 Raises:
523 PayloadError if anything goes wrong while processing the payload.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800524 """
525 for op, op_name in common.OperationIter(operations, base_name):
526 # Read data blob.
527 data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
528
Amin Hassani0de7f782017-12-07 12:13:03 -0800529 if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
530 common.OpType.REPLACE_XZ):
Allie Wood12f59aa2015-04-06 11:05:12 -0700531 self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800532 elif op.type == common.OpType.MOVE:
Allie Wood12f59aa2015-04-06 11:05:12 -0700533 self._ApplyMoveOperation(op, op_name, new_part_file)
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700534 elif op.type == common.OpType.ZERO:
535 self._ApplyZeroOperation(op, op_name, new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800536 elif op.type == common.OpType.BSDIFF:
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700537 self._ApplyDiffOperation(op, op_name, data, new_part_file,
538 new_part_file)
Allie Wood12f59aa2015-04-06 11:05:12 -0700539 elif op.type == common.OpType.SOURCE_COPY:
540 self._ApplySourceCopyOperation(op, op_name, old_part_file,
541 new_part_file)
Amin Hassaniefa62d92017-11-09 13:46:56 -0800542 elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF,
543 common.OpType.BROTLI_BSDIFF):
Sen Jiang92161a72016-06-28 16:09:38 -0700544 self._ApplyDiffOperation(op, op_name, data, old_part_file,
545 new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800546 else:
547 raise PayloadError('%s: unknown operation type (%d)' %
548 (op_name, op.type))
549
550 def _ApplyToPartition(self, operations, part_name, base_name,
Gilad Arnold16416602013-05-04 21:40:39 -0700551 new_part_file_name, new_part_info,
552 old_part_file_name=None, old_part_info=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800553 """Applies an update to a partition.
554
555 Args:
556 operations: the sequence of update operations to apply
557 part_name: the name of the partition, for error reporting
558 base_name: the name of the operation sequence
Gilad Arnold16416602013-05-04 21:40:39 -0700559 new_part_file_name: file name to write partition data to
560 new_part_info: size and expected hash of dest partition
561 old_part_file_name: file name of source partition (optional)
562 old_part_info: size and expected hash of source partition (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700563
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800564 Raises:
565 PayloadError if anything goes wrong with the update.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800566 """
567 # Do we have a source partition?
Gilad Arnold16416602013-05-04 21:40:39 -0700568 if old_part_file_name:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800569 # Verify the source partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700570 with open(old_part_file_name, 'rb') as old_part_file:
Gilad Arnold4b8f4c22015-07-16 11:45:39 -0700571 _VerifySha256(old_part_file, old_part_info.hash,
572 'old ' + part_name, length=old_part_info.size)
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700573 new_part_file_mode = 'r+b'
Allie Wood12f59aa2015-04-06 11:05:12 -0700574 if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
575 # Copy the src partition to the dst one; make sure we don't truncate it.
576 shutil.copyfile(old_part_file_name, new_part_file_name)
Sen Jiangd6122bb2015-12-11 10:27:04 -0800577 elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or
Sen Jiang92161a72016-06-28 16:09:38 -0700578 self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION or
Amin Hassani5ef5d452017-08-04 13:10:59 -0700579 self.minor_version == common.PUFFDIFF_MINOR_PAYLOAD_VERSION):
Sen Jiangd6122bb2015-12-11 10:27:04 -0800580 # In minor version >= 2, we don't want to copy the partitions, so
581 # instead just make the new partition file.
Allie Wood12f59aa2015-04-06 11:05:12 -0700582 open(new_part_file_name, 'w').close()
583 else:
584 raise PayloadError("Unknown minor version: %d" % self.minor_version)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800585 else:
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700586 # We need to create/truncate the dst partition file.
587 new_part_file_mode = 'w+b'
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800588
589 # Apply operations.
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700590 with open(new_part_file_name, new_part_file_mode) as new_part_file:
Allie Wood12f59aa2015-04-06 11:05:12 -0700591 old_part_file = (open(old_part_file_name, 'r+b')
592 if old_part_file_name else None)
593 try:
594 self._ApplyOperations(operations, base_name, old_part_file,
595 new_part_file, new_part_info.size)
596 finally:
597 if old_part_file:
598 old_part_file.close()
599
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700600 # Truncate the result, if so instructed.
601 if self.truncate_to_expected_size:
602 new_part_file.seek(0, 2)
603 if new_part_file.tell() > new_part_info.size:
604 new_part_file.seek(new_part_info.size)
605 new_part_file.truncate()
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800606
607 # Verify the resulting partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700608 with open(new_part_file_name, 'rb') as new_part_file:
Gilad Arnold4b8f4c22015-07-16 11:45:39 -0700609 _VerifySha256(new_part_file, new_part_info.hash,
610 'new ' + part_name, length=new_part_info.size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800611
Gilad Arnold16416602013-05-04 21:40:39 -0700612 def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
613 old_rootfs_part=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800614 """Applier entry point, invoking all update operations.
615
616 Args:
Gilad Arnold16416602013-05-04 21:40:39 -0700617 new_kernel_part: name of dest kernel partition file
618 new_rootfs_part: name of dest rootfs partition file
619 old_kernel_part: name of source kernel partition file (optional)
620 old_rootfs_part: name of source rootfs partition file (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700621
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800622 Raises:
623 PayloadError if payload application failed.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800624 """
625 self.payload.ResetFile()
626
627 # Make sure the arguments are sane and match the payload.
Gilad Arnold16416602013-05-04 21:40:39 -0700628 if not (new_kernel_part and new_rootfs_part):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800629 raise PayloadError('missing dst {kernel,rootfs} partitions')
630
Gilad Arnold16416602013-05-04 21:40:39 -0700631 if not (old_kernel_part or old_rootfs_part):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800632 if not self.payload.IsFull():
633 raise PayloadError('trying to apply a non-full update without src '
634 '{kernel,rootfs} partitions')
Gilad Arnold16416602013-05-04 21:40:39 -0700635 elif old_kernel_part and old_rootfs_part:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800636 if not self.payload.IsDelta():
637 raise PayloadError('trying to apply a non-delta update onto src '
638 '{kernel,rootfs} partitions')
639 else:
640 raise PayloadError('not all src partitions provided')
641
642 # Apply update to rootfs.
643 self._ApplyToPartition(
644 self.payload.manifest.install_operations, 'rootfs',
Gilad Arnold16416602013-05-04 21:40:39 -0700645 'install_operations', new_rootfs_part,
646 self.payload.manifest.new_rootfs_info, old_rootfs_part,
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800647 self.payload.manifest.old_rootfs_info)
648
649 # Apply update to kernel update.
650 self._ApplyToPartition(
651 self.payload.manifest.kernel_install_operations, 'kernel',
Gilad Arnold16416602013-05-04 21:40:39 -0700652 'kernel_install_operations', new_kernel_part,
653 self.payload.manifest.new_kernel_info, old_kernel_part,
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800654 self.payload.manifest.old_kernel_info)