blob: 3cb9741a4a610d37e61b3d7af7c79417ddd0a173 [file] [log] [blame]
Gilad Arnold553b0ec2013-01-26 01:00:39 -08001# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Applying a Chrome OS update payload.
6
7This module is used internally by the main Payload class for applying an update
8payload. The interface for invoking the applier is as follows:
9
10 applier = PayloadApplier(payload)
11 applier.Run(...)
12
13"""
14
Allie Wood12f59aa2015-04-06 11:05:12 -070015from __future__ import print_function
16
Gilad Arnold553b0ec2013-01-26 01:00:39 -080017import array
18import bz2
19import hashlib
Gilad Arnold658185a2013-05-08 17:57:54 -070020import itertools
Gilad Arnold553b0ec2013-01-26 01:00:39 -080021import os
22import shutil
23import subprocess
24import sys
25import tempfile
26
27import common
28from error import PayloadError
29
30
31#
32# Helper functions.
33#
Gilad Arnold382df5c2013-05-03 12:49:28 -070034def _VerifySha256(file_obj, expected_hash, name, length=-1):
Gilad Arnold553b0ec2013-01-26 01:00:39 -080035 """Verifies the SHA256 hash of a file.
36
37 Args:
38 file_obj: file object to read
39 expected_hash: the hash digest we expect to be getting
40 name: name string of this hash, for error reporting
Gilad Arnold382df5c2013-05-03 12:49:28 -070041 length: precise length of data to verify (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -070042
Gilad Arnold553b0ec2013-01-26 01:00:39 -080043 Raises:
Gilad Arnold382df5c2013-05-03 12:49:28 -070044 PayloadError if computed hash doesn't match expected one, or if fails to
45 read the specified length of data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -080046 """
47 # pylint: disable=E1101
48 hasher = hashlib.sha256()
49 block_length = 1024 * 1024
Gilad Arnold382df5c2013-05-03 12:49:28 -070050 max_length = length if length >= 0 else sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -080051
Gilad Arnold382df5c2013-05-03 12:49:28 -070052 while max_length > 0:
Gilad Arnold553b0ec2013-01-26 01:00:39 -080053 read_length = min(max_length, block_length)
54 data = file_obj.read(read_length)
55 if not data:
56 break
57 max_length -= len(data)
58 hasher.update(data)
59
Gilad Arnold382df5c2013-05-03 12:49:28 -070060 if length >= 0 and max_length > 0:
61 raise PayloadError(
62 'insufficient data (%d instead of %d) when verifying %s' %
63 (length - max_length, length, name))
64
Gilad Arnold553b0ec2013-01-26 01:00:39 -080065 actual_hash = hasher.digest()
66 if actual_hash != expected_hash:
67 raise PayloadError('%s hash (%s) not as expected (%s)' %
Gilad Arnold96405372013-05-04 00:24:58 -070068 (name, common.FormatSha256(actual_hash),
69 common.FormatSha256(expected_hash)))
Gilad Arnold553b0ec2013-01-26 01:00:39 -080070
71
72def _ReadExtents(file_obj, extents, block_size, max_length=-1):
73 """Reads data from file as defined by extent sequence.
74
75 This tries to be efficient by not copying data as it is read in chunks.
76
77 Args:
78 file_obj: file object
79 extents: sequence of block extents (offset and length)
80 block_size: size of each block
81 max_length: maximum length to read (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -070082
Gilad Arnold553b0ec2013-01-26 01:00:39 -080083 Returns:
84 A character array containing the concatenated read data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -080085 """
86 data = array.array('c')
Gilad Arnold272a4992013-05-08 13:12:53 -070087 if max_length < 0:
88 max_length = sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -080089 for ex in extents:
90 if max_length == 0:
91 break
Gilad Arnold272a4992013-05-08 13:12:53 -070092 read_length = min(max_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -070093
94 # Fill with zeros or read from file, depending on the type of extent.
95 if ex.start_block == common.PSEUDO_EXTENT_MARKER:
96 data.extend(itertools.repeat('\0', read_length))
97 else:
98 file_obj.seek(ex.start_block * block_size)
99 data.fromfile(file_obj, read_length)
100
Gilad Arnold272a4992013-05-08 13:12:53 -0700101 max_length -= read_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700102
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800103 return data
104
105
106def _WriteExtents(file_obj, data, extents, block_size, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700107 """Writes data to file as defined by extent sequence.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800108
109 This tries to be efficient by not copy data as it is written in chunks.
110
111 Args:
112 file_obj: file object
113 data: data to write
114 extents: sequence of block extents (offset and length)
115 block_size: size of each block
Gilad Arnold272a4992013-05-08 13:12:53 -0700116 base_name: name string of extent sequence for error reporting
Allie Wood12f59aa2015-04-06 11:05:12 -0700117
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800118 Raises:
119 PayloadError when things don't add up.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800120 """
121 data_offset = 0
122 data_length = len(data)
123 for ex, ex_name in common.ExtentIter(extents, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700124 if not data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800125 raise PayloadError('%s: more write extents than data' % ex_name)
Gilad Arnold272a4992013-05-08 13:12:53 -0700126 write_length = min(data_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -0700127
128 # Only do actual writing if this is not a pseudo-extent.
129 if ex.start_block != common.PSEUDO_EXTENT_MARKER:
130 file_obj.seek(ex.start_block * block_size)
131 data_view = buffer(data, data_offset, write_length)
132 file_obj.write(data_view)
133
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800134 data_offset += write_length
Gilad Arnold272a4992013-05-08 13:12:53 -0700135 data_length -= write_length
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800136
Gilad Arnold272a4992013-05-08 13:12:53 -0700137 if data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800138 raise PayloadError('%s: more data than write extents' % base_name)
139
140
Gilad Arnold272a4992013-05-08 13:12:53 -0700141def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
142 """Translates an extent sequence into a bspatch-compatible string argument.
143
144 Args:
145 extents: sequence of block extents (offset and length)
146 block_size: size of each block
147 base_name: name string of extent sequence for error reporting
148 data_length: the actual total length of the data in bytes (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700149
Gilad Arnold272a4992013-05-08 13:12:53 -0700150 Returns:
151 A tuple consisting of (i) a string of the form
152 "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
153 for filling the last extent, (iii) the length of the padding (zero means no
154 padding is needed and the extents cover the full length of data).
Allie Wood12f59aa2015-04-06 11:05:12 -0700155
Gilad Arnold272a4992013-05-08 13:12:53 -0700156 Raises:
157 PayloadError if data_length is too short or too long.
Gilad Arnold272a4992013-05-08 13:12:53 -0700158 """
159 arg = ''
160 pad_off = pad_len = 0
161 if data_length < 0:
162 data_length = sys.maxint
163 for ex, ex_name in common.ExtentIter(extents, base_name):
164 if not data_length:
165 raise PayloadError('%s: more extents than total data length' % ex_name)
Gilad Arnold658185a2013-05-08 17:57:54 -0700166
167 is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
168 start_byte = -1 if is_pseudo else ex.start_block * block_size
Gilad Arnold272a4992013-05-08 13:12:53 -0700169 num_bytes = ex.num_blocks * block_size
170 if data_length < num_bytes:
Gilad Arnold658185a2013-05-08 17:57:54 -0700171 # We're only padding a real extent.
172 if not is_pseudo:
173 pad_off = start_byte + data_length
174 pad_len = num_bytes - data_length
175
Gilad Arnold272a4992013-05-08 13:12:53 -0700176 num_bytes = data_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700177
Gilad Arnold272a4992013-05-08 13:12:53 -0700178 arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
179 data_length -= num_bytes
180
181 if data_length:
182 raise PayloadError('%s: extents not covering full data length' % base_name)
183
184 return arg, pad_off, pad_len
185
186
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800187#
188# Payload application.
189#
190class PayloadApplier(object):
191 """Applying an update payload.
192
193 This is a short-lived object whose purpose is to isolate the logic used for
194 applying an update payload.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800195 """
196
Gilad Arnold21a02502013-08-22 16:59:48 -0700197 def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
Amin Hassani5ef5d452017-08-04 13:10:59 -0700198 puffpatch_path=None, truncate_to_expected_size=True):
Gilad Arnold272a4992013-05-08 13:12:53 -0700199 """Initialize the applier.
200
201 Args:
202 payload: the payload object to check
203 bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
Gilad Arnold21a02502013-08-22 16:59:48 -0700204 bspatch_path: path to the bspatch binary (optional)
Amin Hassani5ef5d452017-08-04 13:10:59 -0700205 puffpatch_path: path to the puffpatch binary (optional)
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700206 truncate_to_expected_size: whether to truncate the resulting partitions
207 to their expected sizes, as specified in the
208 payload (optional)
Gilad Arnold272a4992013-05-08 13:12:53 -0700209 """
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800210 assert payload.is_init, 'uninitialized update payload'
211 self.payload = payload
212 self.block_size = payload.manifest.block_size
Allie Wood12f59aa2015-04-06 11:05:12 -0700213 self.minor_version = payload.manifest.minor_version
Gilad Arnold272a4992013-05-08 13:12:53 -0700214 self.bsdiff_in_place = bsdiff_in_place
Gilad Arnold21a02502013-08-22 16:59:48 -0700215 self.bspatch_path = bspatch_path or 'bspatch'
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700216 self.puffpatch_path = puffpatch_path or 'puffin'
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700217 self.truncate_to_expected_size = truncate_to_expected_size
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800218
219 def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
220 """Applies a REPLACE{,_BZ} operation.
221
222 Args:
223 op: the operation object
224 op_name: name string for error reporting
225 out_data: the data to be written
226 part_file: the partition file object
227 part_size: the size of the partition
Allie Wood12f59aa2015-04-06 11:05:12 -0700228
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800229 Raises:
230 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800231 """
232 block_size = self.block_size
233 data_length = len(out_data)
234
235 # Decompress data if needed.
236 if op.type == common.OpType.REPLACE_BZ:
237 out_data = bz2.decompress(out_data)
238 data_length = len(out_data)
239
240 # Write data to blocks specified in dst extents.
241 data_start = 0
242 for ex, ex_name in common.ExtentIter(op.dst_extents,
243 '%s.dst_extents' % op_name):
244 start_block = ex.start_block
245 num_blocks = ex.num_blocks
246 count = num_blocks * block_size
247
248 # Make sure it's not a fake (signature) operation.
249 if start_block != common.PSEUDO_EXTENT_MARKER:
250 data_end = data_start + count
251
252 # Make sure we're not running past partition boundary.
253 if (start_block + num_blocks) * block_size > part_size:
254 raise PayloadError(
255 '%s: extent (%s) exceeds partition size (%d)' %
256 (ex_name, common.FormatExtent(ex, block_size),
257 part_size))
258
259 # Make sure that we have enough data to write.
260 if data_end >= data_length + block_size:
261 raise PayloadError(
262 '%s: more dst blocks than data (even with padding)')
263
264 # Pad with zeros if necessary.
265 if data_end > data_length:
266 padding = data_end - data_length
267 out_data += '\0' * padding
268
269 self.payload.payload_file.seek(start_block * block_size)
270 part_file.seek(start_block * block_size)
271 part_file.write(out_data[data_start:data_end])
272
273 data_start += count
274
275 # Make sure we wrote all data.
276 if data_start < data_length:
277 raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
278 (op_name, data_start, data_length))
279
280 def _ApplyMoveOperation(self, op, op_name, part_file):
281 """Applies a MOVE operation.
282
Gilad Arnold658185a2013-05-08 17:57:54 -0700283 Note that this operation must read the whole block data from the input and
284 only then dump it, due to our in-place update semantics; otherwise, it
285 might clobber data midway through.
286
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800287 Args:
288 op: the operation object
289 op_name: name string for error reporting
290 part_file: the partition file object
Allie Wood12f59aa2015-04-06 11:05:12 -0700291
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800292 Raises:
293 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800294 """
295 block_size = self.block_size
296
297 # Gather input raw data from src extents.
298 in_data = _ReadExtents(part_file, op.src_extents, block_size)
299
300 # Dump extracted data to dst extents.
301 _WriteExtents(part_file, in_data, op.dst_extents, block_size,
302 '%s.dst_extents' % op_name)
303
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700304 def _ApplyZeroOperation(self, op, op_name, part_file):
305 """Applies a ZERO operation.
306
307 Args:
308 op: the operation object
309 op_name: name string for error reporting
310 part_file: the partition file object
311
312 Raises:
313 PayloadError if something goes wrong.
314 """
315 block_size = self.block_size
316 base_name = '%s.dst_extents' % op_name
317
318 # Iterate over the extents and write zero.
319 for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
320 # Only do actual writing if this is not a pseudo-extent.
321 if ex.start_block != common.PSEUDO_EXTENT_MARKER:
322 part_file.seek(ex.start_block * block_size)
323 part_file.write('\0' * (ex.num_blocks * block_size))
324
Allie Wood12f59aa2015-04-06 11:05:12 -0700325 def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
326 new_part_file):
327 """Applies a SOURCE_COPY operation.
328
329 Args:
330 op: the operation object
331 op_name: name string for error reporting
332 old_part_file: the old partition file object
333 new_part_file: the new partition file object
334
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800335 Raises:
336 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800337 """
Allie Wood12f59aa2015-04-06 11:05:12 -0700338 if not old_part_file:
339 raise PayloadError(
340 '%s: no source partition file provided for operation type (%d)' %
341 (op_name, op.type))
342
343 block_size = self.block_size
344
345 # Gather input raw data from src extents.
346 in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
347
348 # Dump extracted data to dst extents.
349 _WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
350 '%s.dst_extents' % op_name)
351
Amin Hassaniefa62d92017-11-09 13:46:56 -0800352 def _BytesInExtents(self, extents, base_name):
353 """Counts the length of extents in bytes.
354
355 Args:
356 extents: The list of Extents.
357 base_name: For error reporting.
358
359 Returns:
360 The number of bytes in extents.
361 """
362
363 length = 0
364 for ex, ex_name in common.ExtentIter(extents, base_name):
365 length += ex.num_blocks * self.block_size
366 return length
367
Sen Jiang92161a72016-06-28 16:09:38 -0700368 def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file,
369 new_part_file):
Amin Hassaniefa62d92017-11-09 13:46:56 -0800370 """Applies a SOURCE_BSDIFF, BROTLI_BSDIFF or PUFFDIFF operation.
Allie Wood12f59aa2015-04-06 11:05:12 -0700371
372 Args:
373 op: the operation object
374 op_name: name string for error reporting
375 patch_data: the binary patch content
376 old_part_file: the source partition file object
377 new_part_file: the target partition file object
378
379 Raises:
380 PayloadError if something goes wrong.
Allie Wood12f59aa2015-04-06 11:05:12 -0700381 """
382 if not old_part_file:
383 raise PayloadError(
384 '%s: no source partition file provided for operation type (%d)' %
385 (op_name, op.type))
386
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800387 block_size = self.block_size
388
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800389 # Dump patch data to file.
390 with tempfile.NamedTemporaryFile(delete=False) as patch_file:
391 patch_file_name = patch_file.name
392 patch_file.write(patch_data)
393
Allie Wood12f59aa2015-04-06 11:05:12 -0700394 if (hasattr(new_part_file, 'fileno') and
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700395 ((not old_part_file) or hasattr(old_part_file, 'fileno'))):
Gilad Arnold272a4992013-05-08 13:12:53 -0700396 # Construct input and output extents argument for bspatch.
Amin Hassaniefa62d92017-11-09 13:46:56 -0800397
Gilad Arnold272a4992013-05-08 13:12:53 -0700398 in_extents_arg, _, _ = _ExtentsToBspatchArg(
399 op.src_extents, block_size, '%s.src_extents' % op_name,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800400 data_length=op.src_length if op.src_length else
401 self._BytesInExtents(op.src_extents, "%s.src_extents"))
Gilad Arnold272a4992013-05-08 13:12:53 -0700402 out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
403 op.dst_extents, block_size, '%s.dst_extents' % op_name,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800404 data_length=op.dst_length if op.dst_length else
405 self._BytesInExtents(op.dst_extents, "%s.dst_extents"))
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800406
Allie Wood12f59aa2015-04-06 11:05:12 -0700407 new_file_name = '/dev/fd/%d' % new_part_file.fileno()
408 # Diff from source partition.
409 old_file_name = '/dev/fd/%d' % old_part_file.fileno()
410
Amin Hassaniefa62d92017-11-09 13:46:56 -0800411 if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
412 common.OpType.BROTLI_BSDIFF):
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700413 # Invoke bspatch on partition file with extents args.
414 bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
415 patch_file_name, in_extents_arg, out_extents_arg]
416 subprocess.check_call(bspatch_cmd)
417 elif op.type == common.OpType.PUFFDIFF:
418 # Invoke puffpatch on partition file with extents args.
419 puffpatch_cmd = [self.puffpatch_path,
420 "--operation=puffpatch",
421 "--src_file=%s" % old_file_name,
422 "--dst_file=%s" % new_file_name,
423 "--patch_file=%s" % patch_file_name,
424 "--src_extents=%s" % in_extents_arg,
425 "--dst_extents=%s" % out_extents_arg]
426 subprocess.check_call(puffpatch_cmd)
427 else:
428 raise PayloadError("Unknown operation %s", op.type)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800429
Gilad Arnold272a4992013-05-08 13:12:53 -0700430 # Pad with zeros past the total output length.
431 if pad_len:
Allie Wood12f59aa2015-04-06 11:05:12 -0700432 new_part_file.seek(pad_off)
433 new_part_file.write('\0' * pad_len)
Gilad Arnold272a4992013-05-08 13:12:53 -0700434 else:
435 # Gather input raw data and write to a temp file.
Allie Wood12f59aa2015-04-06 11:05:12 -0700436 input_part_file = old_part_file if old_part_file else new_part_file
437 in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800438 max_length=op.src_length if op.src_length else
439 self._BytesInExtents(op.src_extents,
440 "%s.src_extents"))
Gilad Arnold272a4992013-05-08 13:12:53 -0700441 with tempfile.NamedTemporaryFile(delete=False) as in_file:
442 in_file_name = in_file.name
443 in_file.write(in_data)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800444
Allie Wood12f59aa2015-04-06 11:05:12 -0700445 # Allocate temporary output file.
Gilad Arnold272a4992013-05-08 13:12:53 -0700446 with tempfile.NamedTemporaryFile(delete=False) as out_file:
447 out_file_name = out_file.name
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800448
Amin Hassaniefa62d92017-11-09 13:46:56 -0800449 if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
450 common.OpType.BROTLI_BSDIFF):
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700451 # Invoke bspatch.
452 bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
453 patch_file_name]
454 subprocess.check_call(bspatch_cmd)
455 elif op.type == common.OpType.PUFFDIFF:
456 # Invoke puffpatch.
457 puffpatch_cmd = [self.puffpatch_path,
458 "--operation=puffpatch",
459 "--src_file=%s" % in_file_name,
460 "--dst_file=%s" % out_file_name,
461 "--patch_file=%s" % patch_file_name]
462 subprocess.check_call(puffpatch_cmd)
463 else:
464 raise PayloadError("Unknown operation %s", op.type)
Gilad Arnold272a4992013-05-08 13:12:53 -0700465
466 # Read output.
467 with open(out_file_name, 'rb') as out_file:
468 out_data = out_file.read()
469 if len(out_data) != op.dst_length:
470 raise PayloadError(
471 '%s: actual patched data length (%d) not as expected (%d)' %
472 (op_name, len(out_data), op.dst_length))
473
474 # Write output back to partition, with padding.
475 unaligned_out_len = len(out_data) % block_size
476 if unaligned_out_len:
477 out_data += '\0' * (block_size - unaligned_out_len)
Allie Wood12f59aa2015-04-06 11:05:12 -0700478 _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
Gilad Arnold272a4992013-05-08 13:12:53 -0700479 '%s.dst_extents' % op_name)
480
481 # Delete input/output files.
482 os.remove(in_file_name)
483 os.remove(out_file_name)
484
485 # Delete patch file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800486 os.remove(patch_file_name)
487
Allie Wood12f59aa2015-04-06 11:05:12 -0700488 def _ApplyOperations(self, operations, base_name, old_part_file,
489 new_part_file, part_size):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800490 """Applies a sequence of update operations to a partition.
491
Allie Wood12f59aa2015-04-06 11:05:12 -0700492 This assumes an in-place update semantics for MOVE and BSDIFF, namely all
493 reads are performed first, then the data is processed and written back to
494 the same file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800495
496 Args:
497 operations: the sequence of operations
498 base_name: the name of the operation sequence
Allie Wood12f59aa2015-04-06 11:05:12 -0700499 old_part_file: the old partition file object, open for reading/writing
500 new_part_file: the new partition file object, open for reading/writing
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800501 part_size: the partition size
Allie Wood12f59aa2015-04-06 11:05:12 -0700502
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800503 Raises:
504 PayloadError if anything goes wrong while processing the payload.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800505 """
506 for op, op_name in common.OperationIter(operations, base_name):
507 # Read data blob.
508 data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
509
510 if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
Allie Wood12f59aa2015-04-06 11:05:12 -0700511 self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800512 elif op.type == common.OpType.MOVE:
Allie Wood12f59aa2015-04-06 11:05:12 -0700513 self._ApplyMoveOperation(op, op_name, new_part_file)
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700514 elif op.type == common.OpType.ZERO:
515 self._ApplyZeroOperation(op, op_name, new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800516 elif op.type == common.OpType.BSDIFF:
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700517 self._ApplyDiffOperation(op, op_name, data, new_part_file,
518 new_part_file)
Allie Wood12f59aa2015-04-06 11:05:12 -0700519 elif op.type == common.OpType.SOURCE_COPY:
520 self._ApplySourceCopyOperation(op, op_name, old_part_file,
521 new_part_file)
Amin Hassaniefa62d92017-11-09 13:46:56 -0800522 elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF,
523 common.OpType.BROTLI_BSDIFF):
Sen Jiang92161a72016-06-28 16:09:38 -0700524 self._ApplyDiffOperation(op, op_name, data, old_part_file,
525 new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800526 else:
527 raise PayloadError('%s: unknown operation type (%d)' %
528 (op_name, op.type))
529
530 def _ApplyToPartition(self, operations, part_name, base_name,
Gilad Arnold16416602013-05-04 21:40:39 -0700531 new_part_file_name, new_part_info,
532 old_part_file_name=None, old_part_info=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800533 """Applies an update to a partition.
534
535 Args:
536 operations: the sequence of update operations to apply
537 part_name: the name of the partition, for error reporting
538 base_name: the name of the operation sequence
Gilad Arnold16416602013-05-04 21:40:39 -0700539 new_part_file_name: file name to write partition data to
540 new_part_info: size and expected hash of dest partition
541 old_part_file_name: file name of source partition (optional)
542 old_part_info: size and expected hash of source partition (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700543
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800544 Raises:
545 PayloadError if anything goes wrong with the update.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800546 """
547 # Do we have a source partition?
Gilad Arnold16416602013-05-04 21:40:39 -0700548 if old_part_file_name:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800549 # Verify the source partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700550 with open(old_part_file_name, 'rb') as old_part_file:
Gilad Arnold4b8f4c22015-07-16 11:45:39 -0700551 _VerifySha256(old_part_file, old_part_info.hash,
552 'old ' + part_name, length=old_part_info.size)
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700553 new_part_file_mode = 'r+b'
Allie Wood12f59aa2015-04-06 11:05:12 -0700554 if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
555 # Copy the src partition to the dst one; make sure we don't truncate it.
556 shutil.copyfile(old_part_file_name, new_part_file_name)
Sen Jiangd6122bb2015-12-11 10:27:04 -0800557 elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or
Sen Jiang92161a72016-06-28 16:09:38 -0700558 self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION or
Amin Hassani5ef5d452017-08-04 13:10:59 -0700559 self.minor_version == common.PUFFDIFF_MINOR_PAYLOAD_VERSION):
Sen Jiangd6122bb2015-12-11 10:27:04 -0800560 # In minor version >= 2, we don't want to copy the partitions, so
561 # instead just make the new partition file.
Allie Wood12f59aa2015-04-06 11:05:12 -0700562 open(new_part_file_name, 'w').close()
563 else:
564 raise PayloadError("Unknown minor version: %d" % self.minor_version)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800565 else:
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700566 # We need to create/truncate the dst partition file.
567 new_part_file_mode = 'w+b'
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800568
569 # Apply operations.
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700570 with open(new_part_file_name, new_part_file_mode) as new_part_file:
Allie Wood12f59aa2015-04-06 11:05:12 -0700571 old_part_file = (open(old_part_file_name, 'r+b')
572 if old_part_file_name else None)
573 try:
574 self._ApplyOperations(operations, base_name, old_part_file,
575 new_part_file, new_part_info.size)
576 finally:
577 if old_part_file:
578 old_part_file.close()
579
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700580 # Truncate the result, if so instructed.
581 if self.truncate_to_expected_size:
582 new_part_file.seek(0, 2)
583 if new_part_file.tell() > new_part_info.size:
584 new_part_file.seek(new_part_info.size)
585 new_part_file.truncate()
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800586
587 # Verify the resulting partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700588 with open(new_part_file_name, 'rb') as new_part_file:
Gilad Arnold4b8f4c22015-07-16 11:45:39 -0700589 _VerifySha256(new_part_file, new_part_info.hash,
590 'new ' + part_name, length=new_part_info.size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800591
Gilad Arnold16416602013-05-04 21:40:39 -0700592 def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
593 old_rootfs_part=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800594 """Applier entry point, invoking all update operations.
595
596 Args:
Gilad Arnold16416602013-05-04 21:40:39 -0700597 new_kernel_part: name of dest kernel partition file
598 new_rootfs_part: name of dest rootfs partition file
599 old_kernel_part: name of source kernel partition file (optional)
600 old_rootfs_part: name of source rootfs partition file (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700601
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800602 Raises:
603 PayloadError if payload application failed.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800604 """
605 self.payload.ResetFile()
606
607 # Make sure the arguments are sane and match the payload.
Gilad Arnold16416602013-05-04 21:40:39 -0700608 if not (new_kernel_part and new_rootfs_part):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800609 raise PayloadError('missing dst {kernel,rootfs} partitions')
610
Gilad Arnold16416602013-05-04 21:40:39 -0700611 if not (old_kernel_part or old_rootfs_part):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800612 if not self.payload.IsFull():
613 raise PayloadError('trying to apply a non-full update without src '
614 '{kernel,rootfs} partitions')
Gilad Arnold16416602013-05-04 21:40:39 -0700615 elif old_kernel_part and old_rootfs_part:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800616 if not self.payload.IsDelta():
617 raise PayloadError('trying to apply a non-delta update onto src '
618 '{kernel,rootfs} partitions')
619 else:
620 raise PayloadError('not all src partitions provided')
621
622 # Apply update to rootfs.
623 self._ApplyToPartition(
624 self.payload.manifest.install_operations, 'rootfs',
Gilad Arnold16416602013-05-04 21:40:39 -0700625 'install_operations', new_rootfs_part,
626 self.payload.manifest.new_rootfs_info, old_rootfs_part,
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800627 self.payload.manifest.old_rootfs_info)
628
629 # Apply update to kernel update.
630 self._ApplyToPartition(
631 self.payload.manifest.kernel_install_operations, 'kernel',
Gilad Arnold16416602013-05-04 21:40:39 -0700632 'kernel_install_operations', new_kernel_part,
633 self.payload.manifest.new_kernel_info, old_kernel_part,
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800634 self.payload.manifest.old_kernel_info)