blob: da2af121852f1c8491bf59e1443d7f71a1d1f973 [file] [log] [blame]
Gilad Arnold553b0ec2013-01-26 01:00:39 -08001# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Applying a Chrome OS update payload.
6
7This module is used internally by the main Payload class for applying an update
8payload. The interface for invoking the applier is as follows:
9
10 applier = PayloadApplier(payload)
11 applier.Run(...)
12
13"""
14
Allie Wood12f59aa2015-04-06 11:05:12 -070015from __future__ import print_function
16
Gilad Arnold553b0ec2013-01-26 01:00:39 -080017import array
18import bz2
19import hashlib
Gilad Arnold658185a2013-05-08 17:57:54 -070020import itertools
Gilad Arnold553b0ec2013-01-26 01:00:39 -080021import os
22import shutil
23import subprocess
24import sys
25import tempfile
26
27import common
28from error import PayloadError
29
30
31#
32# Helper functions.
33#
Gilad Arnold382df5c2013-05-03 12:49:28 -070034def _VerifySha256(file_obj, expected_hash, name, length=-1):
Gilad Arnold553b0ec2013-01-26 01:00:39 -080035 """Verifies the SHA256 hash of a file.
36
37 Args:
38 file_obj: file object to read
39 expected_hash: the hash digest we expect to be getting
40 name: name string of this hash, for error reporting
Gilad Arnold382df5c2013-05-03 12:49:28 -070041 length: precise length of data to verify (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -070042
Gilad Arnold553b0ec2013-01-26 01:00:39 -080043 Raises:
Gilad Arnold382df5c2013-05-03 12:49:28 -070044 PayloadError if computed hash doesn't match expected one, or if fails to
45 read the specified length of data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -080046 """
47 # pylint: disable=E1101
48 hasher = hashlib.sha256()
49 block_length = 1024 * 1024
Gilad Arnold382df5c2013-05-03 12:49:28 -070050 max_length = length if length >= 0 else sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -080051
Gilad Arnold382df5c2013-05-03 12:49:28 -070052 while max_length > 0:
Gilad Arnold553b0ec2013-01-26 01:00:39 -080053 read_length = min(max_length, block_length)
54 data = file_obj.read(read_length)
55 if not data:
56 break
57 max_length -= len(data)
58 hasher.update(data)
59
Gilad Arnold382df5c2013-05-03 12:49:28 -070060 if length >= 0 and max_length > 0:
61 raise PayloadError(
62 'insufficient data (%d instead of %d) when verifying %s' %
63 (length - max_length, length, name))
64
Gilad Arnold553b0ec2013-01-26 01:00:39 -080065 actual_hash = hasher.digest()
66 if actual_hash != expected_hash:
67 raise PayloadError('%s hash (%s) not as expected (%s)' %
Gilad Arnold96405372013-05-04 00:24:58 -070068 (name, common.FormatSha256(actual_hash),
69 common.FormatSha256(expected_hash)))
Gilad Arnold553b0ec2013-01-26 01:00:39 -080070
71
72def _ReadExtents(file_obj, extents, block_size, max_length=-1):
73 """Reads data from file as defined by extent sequence.
74
75 This tries to be efficient by not copying data as it is read in chunks.
76
77 Args:
78 file_obj: file object
79 extents: sequence of block extents (offset and length)
80 block_size: size of each block
81 max_length: maximum length to read (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -070082
Gilad Arnold553b0ec2013-01-26 01:00:39 -080083 Returns:
84 A character array containing the concatenated read data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -080085 """
86 data = array.array('c')
Gilad Arnold272a4992013-05-08 13:12:53 -070087 if max_length < 0:
88 max_length = sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -080089 for ex in extents:
90 if max_length == 0:
91 break
Gilad Arnold272a4992013-05-08 13:12:53 -070092 read_length = min(max_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -070093
94 # Fill with zeros or read from file, depending on the type of extent.
95 if ex.start_block == common.PSEUDO_EXTENT_MARKER:
96 data.extend(itertools.repeat('\0', read_length))
97 else:
98 file_obj.seek(ex.start_block * block_size)
99 data.fromfile(file_obj, read_length)
100
Gilad Arnold272a4992013-05-08 13:12:53 -0700101 max_length -= read_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700102
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800103 return data
104
105
106def _WriteExtents(file_obj, data, extents, block_size, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700107 """Writes data to file as defined by extent sequence.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800108
109 This tries to be efficient by not copy data as it is written in chunks.
110
111 Args:
112 file_obj: file object
113 data: data to write
114 extents: sequence of block extents (offset and length)
115 block_size: size of each block
Gilad Arnold272a4992013-05-08 13:12:53 -0700116 base_name: name string of extent sequence for error reporting
Allie Wood12f59aa2015-04-06 11:05:12 -0700117
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800118 Raises:
119 PayloadError when things don't add up.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800120 """
121 data_offset = 0
122 data_length = len(data)
123 for ex, ex_name in common.ExtentIter(extents, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700124 if not data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800125 raise PayloadError('%s: more write extents than data' % ex_name)
Gilad Arnold272a4992013-05-08 13:12:53 -0700126 write_length = min(data_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -0700127
128 # Only do actual writing if this is not a pseudo-extent.
129 if ex.start_block != common.PSEUDO_EXTENT_MARKER:
130 file_obj.seek(ex.start_block * block_size)
131 data_view = buffer(data, data_offset, write_length)
132 file_obj.write(data_view)
133
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800134 data_offset += write_length
Gilad Arnold272a4992013-05-08 13:12:53 -0700135 data_length -= write_length
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800136
Gilad Arnold272a4992013-05-08 13:12:53 -0700137 if data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800138 raise PayloadError('%s: more data than write extents' % base_name)
139
140
Gilad Arnold272a4992013-05-08 13:12:53 -0700141def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
142 """Translates an extent sequence into a bspatch-compatible string argument.
143
144 Args:
145 extents: sequence of block extents (offset and length)
146 block_size: size of each block
147 base_name: name string of extent sequence for error reporting
148 data_length: the actual total length of the data in bytes (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700149
Gilad Arnold272a4992013-05-08 13:12:53 -0700150 Returns:
151 A tuple consisting of (i) a string of the form
152 "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
153 for filling the last extent, (iii) the length of the padding (zero means no
154 padding is needed and the extents cover the full length of data).
Allie Wood12f59aa2015-04-06 11:05:12 -0700155
Gilad Arnold272a4992013-05-08 13:12:53 -0700156 Raises:
157 PayloadError if data_length is too short or too long.
Gilad Arnold272a4992013-05-08 13:12:53 -0700158 """
159 arg = ''
160 pad_off = pad_len = 0
161 if data_length < 0:
162 data_length = sys.maxint
163 for ex, ex_name in common.ExtentIter(extents, base_name):
164 if not data_length:
165 raise PayloadError('%s: more extents than total data length' % ex_name)
Gilad Arnold658185a2013-05-08 17:57:54 -0700166
167 is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
168 start_byte = -1 if is_pseudo else ex.start_block * block_size
Gilad Arnold272a4992013-05-08 13:12:53 -0700169 num_bytes = ex.num_blocks * block_size
170 if data_length < num_bytes:
Gilad Arnold658185a2013-05-08 17:57:54 -0700171 # We're only padding a real extent.
172 if not is_pseudo:
173 pad_off = start_byte + data_length
174 pad_len = num_bytes - data_length
175
Gilad Arnold272a4992013-05-08 13:12:53 -0700176 num_bytes = data_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700177
Gilad Arnold272a4992013-05-08 13:12:53 -0700178 arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
179 data_length -= num_bytes
180
181 if data_length:
182 raise PayloadError('%s: extents not covering full data length' % base_name)
183
184 return arg, pad_off, pad_len
185
186
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800187#
188# Payload application.
189#
190class PayloadApplier(object):
191 """Applying an update payload.
192
193 This is a short-lived object whose purpose is to isolate the logic used for
194 applying an update payload.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800195 """
196
Gilad Arnold21a02502013-08-22 16:59:48 -0700197 def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700198 truncate_to_expected_size=True):
Gilad Arnold272a4992013-05-08 13:12:53 -0700199 """Initialize the applier.
200
201 Args:
202 payload: the payload object to check
203 bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
Gilad Arnold21a02502013-08-22 16:59:48 -0700204 bspatch_path: path to the bspatch binary (optional)
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700205 truncate_to_expected_size: whether to truncate the resulting partitions
206 to their expected sizes, as specified in the
207 payload (optional)
Gilad Arnold272a4992013-05-08 13:12:53 -0700208 """
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800209 assert payload.is_init, 'uninitialized update payload'
210 self.payload = payload
211 self.block_size = payload.manifest.block_size
Allie Wood12f59aa2015-04-06 11:05:12 -0700212 self.minor_version = payload.manifest.minor_version
Gilad Arnold272a4992013-05-08 13:12:53 -0700213 self.bsdiff_in_place = bsdiff_in_place
Gilad Arnold21a02502013-08-22 16:59:48 -0700214 self.bspatch_path = bspatch_path or 'bspatch'
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700215 self.truncate_to_expected_size = truncate_to_expected_size
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800216
217 def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
218 """Applies a REPLACE{,_BZ} operation.
219
220 Args:
221 op: the operation object
222 op_name: name string for error reporting
223 out_data: the data to be written
224 part_file: the partition file object
225 part_size: the size of the partition
Allie Wood12f59aa2015-04-06 11:05:12 -0700226
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800227 Raises:
228 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800229 """
230 block_size = self.block_size
231 data_length = len(out_data)
232
233 # Decompress data if needed.
234 if op.type == common.OpType.REPLACE_BZ:
235 out_data = bz2.decompress(out_data)
236 data_length = len(out_data)
237
238 # Write data to blocks specified in dst extents.
239 data_start = 0
240 for ex, ex_name in common.ExtentIter(op.dst_extents,
241 '%s.dst_extents' % op_name):
242 start_block = ex.start_block
243 num_blocks = ex.num_blocks
244 count = num_blocks * block_size
245
246 # Make sure it's not a fake (signature) operation.
247 if start_block != common.PSEUDO_EXTENT_MARKER:
248 data_end = data_start + count
249
250 # Make sure we're not running past partition boundary.
251 if (start_block + num_blocks) * block_size > part_size:
252 raise PayloadError(
253 '%s: extent (%s) exceeds partition size (%d)' %
254 (ex_name, common.FormatExtent(ex, block_size),
255 part_size))
256
257 # Make sure that we have enough data to write.
258 if data_end >= data_length + block_size:
259 raise PayloadError(
260 '%s: more dst blocks than data (even with padding)')
261
262 # Pad with zeros if necessary.
263 if data_end > data_length:
264 padding = data_end - data_length
265 out_data += '\0' * padding
266
267 self.payload.payload_file.seek(start_block * block_size)
268 part_file.seek(start_block * block_size)
269 part_file.write(out_data[data_start:data_end])
270
271 data_start += count
272
273 # Make sure we wrote all data.
274 if data_start < data_length:
275 raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
276 (op_name, data_start, data_length))
277
278 def _ApplyMoveOperation(self, op, op_name, part_file):
279 """Applies a MOVE operation.
280
Gilad Arnold658185a2013-05-08 17:57:54 -0700281 Note that this operation must read the whole block data from the input and
282 only then dump it, due to our in-place update semantics; otherwise, it
283 might clobber data midway through.
284
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800285 Args:
286 op: the operation object
287 op_name: name string for error reporting
288 part_file: the partition file object
Allie Wood12f59aa2015-04-06 11:05:12 -0700289
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800290 Raises:
291 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800292 """
293 block_size = self.block_size
294
295 # Gather input raw data from src extents.
296 in_data = _ReadExtents(part_file, op.src_extents, block_size)
297
298 # Dump extracted data to dst extents.
299 _WriteExtents(part_file, in_data, op.dst_extents, block_size,
300 '%s.dst_extents' % op_name)
301
Allie Wood12f59aa2015-04-06 11:05:12 -0700302 def _ApplyBsdiffOperation(self, op, op_name, patch_data, new_part_file):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800303 """Applies a BSDIFF operation.
304
305 Args:
306 op: the operation object
307 op_name: name string for error reporting
308 patch_data: the binary patch content
Allie Wood12f59aa2015-04-06 11:05:12 -0700309 new_part_file: the target partition file object
310
311 Raises:
312 PayloadError if something goes wrong.
313 """
314 # Implemented using a SOURCE_BSDIFF operation with the source and target
315 # partition set to the new partition.
316 self._ApplySourceBsdiffOperation(op, op_name, patch_data, new_part_file,
317 new_part_file)
318
319 def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
320 new_part_file):
321 """Applies a SOURCE_COPY operation.
322
323 Args:
324 op: the operation object
325 op_name: name string for error reporting
326 old_part_file: the old partition file object
327 new_part_file: the new partition file object
328
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800329 Raises:
330 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800331 """
Allie Wood12f59aa2015-04-06 11:05:12 -0700332 if not old_part_file:
333 raise PayloadError(
334 '%s: no source partition file provided for operation type (%d)' %
335 (op_name, op.type))
336
337 block_size = self.block_size
338
339 # Gather input raw data from src extents.
340 in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
341
342 # Dump extracted data to dst extents.
343 _WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
344 '%s.dst_extents' % op_name)
345
346 def _ApplySourceBsdiffOperation(self, op, op_name, patch_data, old_part_file,
347 new_part_file):
348 """Applies a SOURCE_BSDIFF operation.
349
350 Args:
351 op: the operation object
352 op_name: name string for error reporting
353 patch_data: the binary patch content
354 old_part_file: the source partition file object
355 new_part_file: the target partition file object
356
357 Raises:
358 PayloadError if something goes wrong.
Allie Wood12f59aa2015-04-06 11:05:12 -0700359 """
360 if not old_part_file:
361 raise PayloadError(
362 '%s: no source partition file provided for operation type (%d)' %
363 (op_name, op.type))
364
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800365 block_size = self.block_size
366
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800367 # Dump patch data to file.
368 with tempfile.NamedTemporaryFile(delete=False) as patch_file:
369 patch_file_name = patch_file.name
370 patch_file.write(patch_data)
371
Allie Wood12f59aa2015-04-06 11:05:12 -0700372 if (hasattr(new_part_file, 'fileno') and
373 ((not old_part_file) or hasattr(old_part_file, 'fileno'))):
Gilad Arnold272a4992013-05-08 13:12:53 -0700374 # Construct input and output extents argument for bspatch.
375 in_extents_arg, _, _ = _ExtentsToBspatchArg(
376 op.src_extents, block_size, '%s.src_extents' % op_name,
377 data_length=op.src_length)
378 out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
379 op.dst_extents, block_size, '%s.dst_extents' % op_name,
380 data_length=op.dst_length)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800381
Allie Wood12f59aa2015-04-06 11:05:12 -0700382 new_file_name = '/dev/fd/%d' % new_part_file.fileno()
383 # Diff from source partition.
384 old_file_name = '/dev/fd/%d' % old_part_file.fileno()
385
Gilad Arnold272a4992013-05-08 13:12:53 -0700386 # Invoke bspatch on partition file with extents args.
Allie Wood12f59aa2015-04-06 11:05:12 -0700387 bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
388 patch_file_name, in_extents_arg, out_extents_arg]
Gilad Arnold272a4992013-05-08 13:12:53 -0700389 subprocess.check_call(bspatch_cmd)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800390
Gilad Arnold272a4992013-05-08 13:12:53 -0700391 # Pad with zeros past the total output length.
392 if pad_len:
Allie Wood12f59aa2015-04-06 11:05:12 -0700393 new_part_file.seek(pad_off)
394 new_part_file.write('\0' * pad_len)
Gilad Arnold272a4992013-05-08 13:12:53 -0700395 else:
396 # Gather input raw data and write to a temp file.
Allie Wood12f59aa2015-04-06 11:05:12 -0700397 input_part_file = old_part_file if old_part_file else new_part_file
398 in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
Gilad Arnold272a4992013-05-08 13:12:53 -0700399 max_length=op.src_length)
400 with tempfile.NamedTemporaryFile(delete=False) as in_file:
401 in_file_name = in_file.name
402 in_file.write(in_data)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800403
Allie Wood12f59aa2015-04-06 11:05:12 -0700404 # Allocate temporary output file.
Gilad Arnold272a4992013-05-08 13:12:53 -0700405 with tempfile.NamedTemporaryFile(delete=False) as out_file:
406 out_file_name = out_file.name
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800407
Gilad Arnold272a4992013-05-08 13:12:53 -0700408 # Invoke bspatch.
Gilad Arnold21a02502013-08-22 16:59:48 -0700409 bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
410 patch_file_name]
Gilad Arnold272a4992013-05-08 13:12:53 -0700411 subprocess.check_call(bspatch_cmd)
412
413 # Read output.
414 with open(out_file_name, 'rb') as out_file:
415 out_data = out_file.read()
416 if len(out_data) != op.dst_length:
417 raise PayloadError(
418 '%s: actual patched data length (%d) not as expected (%d)' %
419 (op_name, len(out_data), op.dst_length))
420
421 # Write output back to partition, with padding.
422 unaligned_out_len = len(out_data) % block_size
423 if unaligned_out_len:
424 out_data += '\0' * (block_size - unaligned_out_len)
Allie Wood12f59aa2015-04-06 11:05:12 -0700425 _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
Gilad Arnold272a4992013-05-08 13:12:53 -0700426 '%s.dst_extents' % op_name)
427
428 # Delete input/output files.
429 os.remove(in_file_name)
430 os.remove(out_file_name)
431
432 # Delete patch file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800433 os.remove(patch_file_name)
434
Allie Wood12f59aa2015-04-06 11:05:12 -0700435 def _ApplyOperations(self, operations, base_name, old_part_file,
436 new_part_file, part_size):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800437 """Applies a sequence of update operations to a partition.
438
Allie Wood12f59aa2015-04-06 11:05:12 -0700439 This assumes an in-place update semantics for MOVE and BSDIFF, namely all
440 reads are performed first, then the data is processed and written back to
441 the same file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800442
443 Args:
444 operations: the sequence of operations
445 base_name: the name of the operation sequence
Allie Wood12f59aa2015-04-06 11:05:12 -0700446 old_part_file: the old partition file object, open for reading/writing
447 new_part_file: the new partition file object, open for reading/writing
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800448 part_size: the partition size
Allie Wood12f59aa2015-04-06 11:05:12 -0700449
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800450 Raises:
451 PayloadError if anything goes wrong while processing the payload.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800452 """
453 for op, op_name in common.OperationIter(operations, base_name):
454 # Read data blob.
455 data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
456
457 if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
Allie Wood12f59aa2015-04-06 11:05:12 -0700458 self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800459 elif op.type == common.OpType.MOVE:
Allie Wood12f59aa2015-04-06 11:05:12 -0700460 self._ApplyMoveOperation(op, op_name, new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800461 elif op.type == common.OpType.BSDIFF:
Allie Wood12f59aa2015-04-06 11:05:12 -0700462 self._ApplyBsdiffOperation(op, op_name, data, new_part_file)
463 elif op.type == common.OpType.SOURCE_COPY:
464 self._ApplySourceCopyOperation(op, op_name, old_part_file,
465 new_part_file)
466 elif op.type == common.OpType.SOURCE_BSDIFF:
467 self._ApplySourceBsdiffOperation(op, op_name, data, old_part_file,
468 new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800469 else:
470 raise PayloadError('%s: unknown operation type (%d)' %
471 (op_name, op.type))
472
473 def _ApplyToPartition(self, operations, part_name, base_name,
Gilad Arnold16416602013-05-04 21:40:39 -0700474 new_part_file_name, new_part_info,
475 old_part_file_name=None, old_part_info=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800476 """Applies an update to a partition.
477
478 Args:
479 operations: the sequence of update operations to apply
480 part_name: the name of the partition, for error reporting
481 base_name: the name of the operation sequence
Gilad Arnold16416602013-05-04 21:40:39 -0700482 new_part_file_name: file name to write partition data to
483 new_part_info: size and expected hash of dest partition
484 old_part_file_name: file name of source partition (optional)
485 old_part_info: size and expected hash of source partition (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700486
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800487 Raises:
488 PayloadError if anything goes wrong with the update.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800489 """
490 # Do we have a source partition?
Gilad Arnold16416602013-05-04 21:40:39 -0700491 if old_part_file_name:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800492 # Verify the source partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700493 with open(old_part_file_name, 'rb') as old_part_file:
494 _VerifySha256(old_part_file, old_part_info.hash, part_name,
495 length=old_part_info.size)
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700496 new_part_file_mode = 'r+b'
Allie Wood12f59aa2015-04-06 11:05:12 -0700497 if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
498 # Copy the src partition to the dst one; make sure we don't truncate it.
499 shutil.copyfile(old_part_file_name, new_part_file_name)
500 elif self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION:
501 # In minor version 2, we don't want to copy the partitions, so instead
502 # just make the new partition file.
503 open(new_part_file_name, 'w').close()
504 else:
505 raise PayloadError("Unknown minor version: %d" % self.minor_version)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800506 else:
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700507 # We need to create/truncate the dst partition file.
508 new_part_file_mode = 'w+b'
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800509
510 # Apply operations.
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700511 with open(new_part_file_name, new_part_file_mode) as new_part_file:
Allie Wood12f59aa2015-04-06 11:05:12 -0700512 old_part_file = (open(old_part_file_name, 'r+b')
513 if old_part_file_name else None)
514 try:
515 self._ApplyOperations(operations, base_name, old_part_file,
516 new_part_file, new_part_info.size)
517 finally:
518 if old_part_file:
519 old_part_file.close()
520
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700521 # Truncate the result, if so instructed.
522 if self.truncate_to_expected_size:
523 new_part_file.seek(0, 2)
524 if new_part_file.tell() > new_part_info.size:
525 new_part_file.seek(new_part_info.size)
526 new_part_file.truncate()
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800527
528 # Verify the resulting partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700529 with open(new_part_file_name, 'rb') as new_part_file:
530 _VerifySha256(new_part_file, new_part_info.hash, part_name,
531 length=new_part_info.size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800532
Gilad Arnold16416602013-05-04 21:40:39 -0700533 def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
534 old_rootfs_part=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800535 """Applier entry point, invoking all update operations.
536
537 Args:
Gilad Arnold16416602013-05-04 21:40:39 -0700538 new_kernel_part: name of dest kernel partition file
539 new_rootfs_part: name of dest rootfs partition file
540 old_kernel_part: name of source kernel partition file (optional)
541 old_rootfs_part: name of source rootfs partition file (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700542
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800543 Raises:
544 PayloadError if payload application failed.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800545 """
546 self.payload.ResetFile()
547
548 # Make sure the arguments are sane and match the payload.
Gilad Arnold16416602013-05-04 21:40:39 -0700549 if not (new_kernel_part and new_rootfs_part):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800550 raise PayloadError('missing dst {kernel,rootfs} partitions')
551
Gilad Arnold16416602013-05-04 21:40:39 -0700552 if not (old_kernel_part or old_rootfs_part):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800553 if not self.payload.IsFull():
554 raise PayloadError('trying to apply a non-full update without src '
555 '{kernel,rootfs} partitions')
Gilad Arnold16416602013-05-04 21:40:39 -0700556 elif old_kernel_part and old_rootfs_part:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800557 if not self.payload.IsDelta():
558 raise PayloadError('trying to apply a non-delta update onto src '
559 '{kernel,rootfs} partitions')
560 else:
561 raise PayloadError('not all src partitions provided')
562
563 # Apply update to rootfs.
564 self._ApplyToPartition(
565 self.payload.manifest.install_operations, 'rootfs',
Gilad Arnold16416602013-05-04 21:40:39 -0700566 'install_operations', new_rootfs_part,
567 self.payload.manifest.new_rootfs_info, old_rootfs_part,
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800568 self.payload.manifest.old_rootfs_info)
569
570 # Apply update to kernel update.
571 self._ApplyToPartition(
572 self.payload.manifest.kernel_install_operations, 'kernel',
Gilad Arnold16416602013-05-04 21:40:39 -0700573 'kernel_install_operations', new_kernel_part,
574 self.payload.manifest.new_kernel_info, old_kernel_part,
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800575 self.payload.manifest.old_kernel_info)