blob: c3763e9b18fa0a52516008837dddcbaffe72fdc6 [file] [log] [blame]
Gilad Arnold553b0ec2013-01-26 01:00:39 -08001# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Applying a Chrome OS update payload.
6
7This module is used internally by the main Payload class for applying an update
8payload. The interface for invoking the applier is as follows:
9
10 applier = PayloadApplier(payload)
11 applier.Run(...)
12
13"""
14
Allie Wood12f59aa2015-04-06 11:05:12 -070015from __future__ import print_function
16
Gilad Arnold553b0ec2013-01-26 01:00:39 -080017import array
18import bz2
19import hashlib
Gilad Arnold658185a2013-05-08 17:57:54 -070020import itertools
Gilad Arnold553b0ec2013-01-26 01:00:39 -080021import os
22import shutil
23import subprocess
24import sys
25import tempfile
26
27import common
28from error import PayloadError
29
30
31#
32# Helper functions.
33#
Gilad Arnold382df5c2013-05-03 12:49:28 -070034def _VerifySha256(file_obj, expected_hash, name, length=-1):
Gilad Arnold553b0ec2013-01-26 01:00:39 -080035 """Verifies the SHA256 hash of a file.
36
37 Args:
38 file_obj: file object to read
39 expected_hash: the hash digest we expect to be getting
40 name: name string of this hash, for error reporting
Gilad Arnold382df5c2013-05-03 12:49:28 -070041 length: precise length of data to verify (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -070042
Gilad Arnold553b0ec2013-01-26 01:00:39 -080043 Raises:
Gilad Arnold382df5c2013-05-03 12:49:28 -070044 PayloadError if computed hash doesn't match expected one, or if fails to
45 read the specified length of data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -080046
47 """
48 # pylint: disable=E1101
49 hasher = hashlib.sha256()
50 block_length = 1024 * 1024
Gilad Arnold382df5c2013-05-03 12:49:28 -070051 max_length = length if length >= 0 else sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -080052
Gilad Arnold382df5c2013-05-03 12:49:28 -070053 while max_length > 0:
Gilad Arnold553b0ec2013-01-26 01:00:39 -080054 read_length = min(max_length, block_length)
55 data = file_obj.read(read_length)
56 if not data:
57 break
58 max_length -= len(data)
59 hasher.update(data)
60
Gilad Arnold382df5c2013-05-03 12:49:28 -070061 if length >= 0 and max_length > 0:
62 raise PayloadError(
63 'insufficient data (%d instead of %d) when verifying %s' %
64 (length - max_length, length, name))
65
Gilad Arnold553b0ec2013-01-26 01:00:39 -080066 actual_hash = hasher.digest()
67 if actual_hash != expected_hash:
68 raise PayloadError('%s hash (%s) not as expected (%s)' %
Gilad Arnold96405372013-05-04 00:24:58 -070069 (name, common.FormatSha256(actual_hash),
70 common.FormatSha256(expected_hash)))
Gilad Arnold553b0ec2013-01-26 01:00:39 -080071
72
73def _ReadExtents(file_obj, extents, block_size, max_length=-1):
74 """Reads data from file as defined by extent sequence.
75
76 This tries to be efficient by not copying data as it is read in chunks.
77
78 Args:
79 file_obj: file object
80 extents: sequence of block extents (offset and length)
81 block_size: size of each block
82 max_length: maximum length to read (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -070083
Gilad Arnold553b0ec2013-01-26 01:00:39 -080084 Returns:
85 A character array containing the concatenated read data.
86
87 """
88 data = array.array('c')
Gilad Arnold272a4992013-05-08 13:12:53 -070089 if max_length < 0:
90 max_length = sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -080091 for ex in extents:
92 if max_length == 0:
93 break
Gilad Arnold272a4992013-05-08 13:12:53 -070094 read_length = min(max_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -070095
96 # Fill with zeros or read from file, depending on the type of extent.
97 if ex.start_block == common.PSEUDO_EXTENT_MARKER:
98 data.extend(itertools.repeat('\0', read_length))
99 else:
100 file_obj.seek(ex.start_block * block_size)
101 data.fromfile(file_obj, read_length)
102
Gilad Arnold272a4992013-05-08 13:12:53 -0700103 max_length -= read_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700104
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800105 return data
106
107
108def _WriteExtents(file_obj, data, extents, block_size, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700109 """Writes data to file as defined by extent sequence.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800110
111 This tries to be efficient by not copy data as it is written in chunks.
112
113 Args:
114 file_obj: file object
115 data: data to write
116 extents: sequence of block extents (offset and length)
117 block_size: size of each block
Gilad Arnold272a4992013-05-08 13:12:53 -0700118 base_name: name string of extent sequence for error reporting
Allie Wood12f59aa2015-04-06 11:05:12 -0700119
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800120 Raises:
121 PayloadError when things don't add up.
122
123 """
124 data_offset = 0
125 data_length = len(data)
126 for ex, ex_name in common.ExtentIter(extents, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700127 if not data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800128 raise PayloadError('%s: more write extents than data' % ex_name)
Gilad Arnold272a4992013-05-08 13:12:53 -0700129 write_length = min(data_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -0700130
131 # Only do actual writing if this is not a pseudo-extent.
132 if ex.start_block != common.PSEUDO_EXTENT_MARKER:
133 file_obj.seek(ex.start_block * block_size)
134 data_view = buffer(data, data_offset, write_length)
135 file_obj.write(data_view)
136
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800137 data_offset += write_length
Gilad Arnold272a4992013-05-08 13:12:53 -0700138 data_length -= write_length
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800139
Gilad Arnold272a4992013-05-08 13:12:53 -0700140 if data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800141 raise PayloadError('%s: more data than write extents' % base_name)
142
143
Gilad Arnold272a4992013-05-08 13:12:53 -0700144def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
145 """Translates an extent sequence into a bspatch-compatible string argument.
146
147 Args:
148 extents: sequence of block extents (offset and length)
149 block_size: size of each block
150 base_name: name string of extent sequence for error reporting
151 data_length: the actual total length of the data in bytes (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700152
Gilad Arnold272a4992013-05-08 13:12:53 -0700153 Returns:
154 A tuple consisting of (i) a string of the form
155 "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
156 for filling the last extent, (iii) the length of the padding (zero means no
157 padding is needed and the extents cover the full length of data).
Allie Wood12f59aa2015-04-06 11:05:12 -0700158
Gilad Arnold272a4992013-05-08 13:12:53 -0700159 Raises:
160 PayloadError if data_length is too short or too long.
161
162 """
163 arg = ''
164 pad_off = pad_len = 0
165 if data_length < 0:
166 data_length = sys.maxint
167 for ex, ex_name in common.ExtentIter(extents, base_name):
168 if not data_length:
169 raise PayloadError('%s: more extents than total data length' % ex_name)
Gilad Arnold658185a2013-05-08 17:57:54 -0700170
171 is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
172 start_byte = -1 if is_pseudo else ex.start_block * block_size
Gilad Arnold272a4992013-05-08 13:12:53 -0700173 num_bytes = ex.num_blocks * block_size
174 if data_length < num_bytes:
Gilad Arnold658185a2013-05-08 17:57:54 -0700175 # We're only padding a real extent.
176 if not is_pseudo:
177 pad_off = start_byte + data_length
178 pad_len = num_bytes - data_length
179
Gilad Arnold272a4992013-05-08 13:12:53 -0700180 num_bytes = data_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700181
Gilad Arnold272a4992013-05-08 13:12:53 -0700182 arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
183 data_length -= num_bytes
184
185 if data_length:
186 raise PayloadError('%s: extents not covering full data length' % base_name)
187
188 return arg, pad_off, pad_len
189
190
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800191#
192# Payload application.
193#
194class PayloadApplier(object):
195 """Applying an update payload.
196
197 This is a short-lived object whose purpose is to isolate the logic used for
198 applying an update payload.
199
200 """
201
Gilad Arnold21a02502013-08-22 16:59:48 -0700202 def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700203 truncate_to_expected_size=True):
Gilad Arnold272a4992013-05-08 13:12:53 -0700204 """Initialize the applier.
205
206 Args:
207 payload: the payload object to check
208 bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
Gilad Arnold21a02502013-08-22 16:59:48 -0700209 bspatch_path: path to the bspatch binary (optional)
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700210 truncate_to_expected_size: whether to truncate the resulting partitions
211 to their expected sizes, as specified in the
212 payload (optional)
Gilad Arnold272a4992013-05-08 13:12:53 -0700213
214 """
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800215 assert payload.is_init, 'uninitialized update payload'
216 self.payload = payload
217 self.block_size = payload.manifest.block_size
Allie Wood12f59aa2015-04-06 11:05:12 -0700218 self.minor_version = payload.manifest.minor_version
Gilad Arnold272a4992013-05-08 13:12:53 -0700219 self.bsdiff_in_place = bsdiff_in_place
Gilad Arnold21a02502013-08-22 16:59:48 -0700220 self.bspatch_path = bspatch_path or 'bspatch'
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700221 self.truncate_to_expected_size = truncate_to_expected_size
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800222
223 def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
224 """Applies a REPLACE{,_BZ} operation.
225
226 Args:
227 op: the operation object
228 op_name: name string for error reporting
229 out_data: the data to be written
230 part_file: the partition file object
231 part_size: the size of the partition
Allie Wood12f59aa2015-04-06 11:05:12 -0700232
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800233 Raises:
234 PayloadError if something goes wrong.
235
236 """
237 block_size = self.block_size
238 data_length = len(out_data)
239
240 # Decompress data if needed.
241 if op.type == common.OpType.REPLACE_BZ:
242 out_data = bz2.decompress(out_data)
243 data_length = len(out_data)
244
245 # Write data to blocks specified in dst extents.
246 data_start = 0
247 for ex, ex_name in common.ExtentIter(op.dst_extents,
248 '%s.dst_extents' % op_name):
249 start_block = ex.start_block
250 num_blocks = ex.num_blocks
251 count = num_blocks * block_size
252
253 # Make sure it's not a fake (signature) operation.
254 if start_block != common.PSEUDO_EXTENT_MARKER:
255 data_end = data_start + count
256
257 # Make sure we're not running past partition boundary.
258 if (start_block + num_blocks) * block_size > part_size:
259 raise PayloadError(
260 '%s: extent (%s) exceeds partition size (%d)' %
261 (ex_name, common.FormatExtent(ex, block_size),
262 part_size))
263
264 # Make sure that we have enough data to write.
265 if data_end >= data_length + block_size:
266 raise PayloadError(
267 '%s: more dst blocks than data (even with padding)')
268
269 # Pad with zeros if necessary.
270 if data_end > data_length:
271 padding = data_end - data_length
272 out_data += '\0' * padding
273
274 self.payload.payload_file.seek(start_block * block_size)
275 part_file.seek(start_block * block_size)
276 part_file.write(out_data[data_start:data_end])
277
278 data_start += count
279
280 # Make sure we wrote all data.
281 if data_start < data_length:
282 raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
283 (op_name, data_start, data_length))
284
285 def _ApplyMoveOperation(self, op, op_name, part_file):
286 """Applies a MOVE operation.
287
Gilad Arnold658185a2013-05-08 17:57:54 -0700288 Note that this operation must read the whole block data from the input and
289 only then dump it, due to our in-place update semantics; otherwise, it
290 might clobber data midway through.
291
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800292 Args:
293 op: the operation object
294 op_name: name string for error reporting
295 part_file: the partition file object
Allie Wood12f59aa2015-04-06 11:05:12 -0700296
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800297 Raises:
298 PayloadError if something goes wrong.
299
300 """
301 block_size = self.block_size
302
303 # Gather input raw data from src extents.
304 in_data = _ReadExtents(part_file, op.src_extents, block_size)
305
306 # Dump extracted data to dst extents.
307 _WriteExtents(part_file, in_data, op.dst_extents, block_size,
308 '%s.dst_extents' % op_name)
309
Allie Wood12f59aa2015-04-06 11:05:12 -0700310 def _ApplyBsdiffOperation(self, op, op_name, patch_data, new_part_file):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800311 """Applies a BSDIFF operation.
312
313 Args:
314 op: the operation object
315 op_name: name string for error reporting
316 patch_data: the binary patch content
Allie Wood12f59aa2015-04-06 11:05:12 -0700317 new_part_file: the target partition file object
318
319 Raises:
320 PayloadError if something goes wrong.
321 """
322 # Implemented using a SOURCE_BSDIFF operation with the source and target
323 # partition set to the new partition.
324 self._ApplySourceBsdiffOperation(op, op_name, patch_data, new_part_file,
325 new_part_file)
326
327 def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
328 new_part_file):
329 """Applies a SOURCE_COPY operation.
330
331 Args:
332 op: the operation object
333 op_name: name string for error reporting
334 old_part_file: the old partition file object
335 new_part_file: the new partition file object
336
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800337 Raises:
338 PayloadError if something goes wrong.
339
340 """
Allie Wood12f59aa2015-04-06 11:05:12 -0700341 if not old_part_file:
342 raise PayloadError(
343 '%s: no source partition file provided for operation type (%d)' %
344 (op_name, op.type))
345
346 block_size = self.block_size
347
348 # Gather input raw data from src extents.
349 in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
350
351 # Dump extracted data to dst extents.
352 _WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
353 '%s.dst_extents' % op_name)
354
355 def _ApplySourceBsdiffOperation(self, op, op_name, patch_data, old_part_file,
356 new_part_file):
357 """Applies a SOURCE_BSDIFF operation.
358
359 Args:
360 op: the operation object
361 op_name: name string for error reporting
362 patch_data: the binary patch content
363 old_part_file: the source partition file object
364 new_part_file: the target partition file object
365
366 Raises:
367 PayloadError if something goes wrong.
368
369 """
370 if not old_part_file:
371 raise PayloadError(
372 '%s: no source partition file provided for operation type (%d)' %
373 (op_name, op.type))
374
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800375 block_size = self.block_size
376
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800377 # Dump patch data to file.
378 with tempfile.NamedTemporaryFile(delete=False) as patch_file:
379 patch_file_name = patch_file.name
380 patch_file.write(patch_data)
381
Allie Wood12f59aa2015-04-06 11:05:12 -0700382 if (hasattr(new_part_file, 'fileno') and
383 ((not old_part_file) or hasattr(old_part_file, 'fileno'))):
Gilad Arnold272a4992013-05-08 13:12:53 -0700384 # Construct input and output extents argument for bspatch.
385 in_extents_arg, _, _ = _ExtentsToBspatchArg(
386 op.src_extents, block_size, '%s.src_extents' % op_name,
387 data_length=op.src_length)
388 out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
389 op.dst_extents, block_size, '%s.dst_extents' % op_name,
390 data_length=op.dst_length)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800391
Allie Wood12f59aa2015-04-06 11:05:12 -0700392 new_file_name = '/dev/fd/%d' % new_part_file.fileno()
393 # Diff from source partition.
394 old_file_name = '/dev/fd/%d' % old_part_file.fileno()
395
Gilad Arnold272a4992013-05-08 13:12:53 -0700396 # Invoke bspatch on partition file with extents args.
Allie Wood12f59aa2015-04-06 11:05:12 -0700397 bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
398 patch_file_name, in_extents_arg, out_extents_arg]
Gilad Arnold272a4992013-05-08 13:12:53 -0700399 subprocess.check_call(bspatch_cmd)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800400
Gilad Arnold272a4992013-05-08 13:12:53 -0700401 # Pad with zeros past the total output length.
402 if pad_len:
Allie Wood12f59aa2015-04-06 11:05:12 -0700403 new_part_file.seek(pad_off)
404 new_part_file.write('\0' * pad_len)
Gilad Arnold272a4992013-05-08 13:12:53 -0700405 else:
406 # Gather input raw data and write to a temp file.
Allie Wood12f59aa2015-04-06 11:05:12 -0700407 input_part_file = old_part_file if old_part_file else new_part_file
408 in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
Gilad Arnold272a4992013-05-08 13:12:53 -0700409 max_length=op.src_length)
410 with tempfile.NamedTemporaryFile(delete=False) as in_file:
411 in_file_name = in_file.name
412 in_file.write(in_data)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800413
Allie Wood12f59aa2015-04-06 11:05:12 -0700414 # Allocate temporary output file.
Gilad Arnold272a4992013-05-08 13:12:53 -0700415 with tempfile.NamedTemporaryFile(delete=False) as out_file:
416 out_file_name = out_file.name
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800417
Gilad Arnold272a4992013-05-08 13:12:53 -0700418 # Invoke bspatch.
Gilad Arnold21a02502013-08-22 16:59:48 -0700419 bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
420 patch_file_name]
Gilad Arnold272a4992013-05-08 13:12:53 -0700421 subprocess.check_call(bspatch_cmd)
422
423 # Read output.
424 with open(out_file_name, 'rb') as out_file:
425 out_data = out_file.read()
426 if len(out_data) != op.dst_length:
427 raise PayloadError(
428 '%s: actual patched data length (%d) not as expected (%d)' %
429 (op_name, len(out_data), op.dst_length))
430
431 # Write output back to partition, with padding.
432 unaligned_out_len = len(out_data) % block_size
433 if unaligned_out_len:
434 out_data += '\0' * (block_size - unaligned_out_len)
Allie Wood12f59aa2015-04-06 11:05:12 -0700435 _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
Gilad Arnold272a4992013-05-08 13:12:53 -0700436 '%s.dst_extents' % op_name)
437
438 # Delete input/output files.
439 os.remove(in_file_name)
440 os.remove(out_file_name)
441
442 # Delete patch file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800443 os.remove(patch_file_name)
444
Allie Wood12f59aa2015-04-06 11:05:12 -0700445 def _ApplyOperations(self, operations, base_name, old_part_file,
446 new_part_file, part_size):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800447 """Applies a sequence of update operations to a partition.
448
Allie Wood12f59aa2015-04-06 11:05:12 -0700449 This assumes an in-place update semantics for MOVE and BSDIFF, namely all
450 reads are performed first, then the data is processed and written back to
451 the same file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800452
453 Args:
454 operations: the sequence of operations
455 base_name: the name of the operation sequence
Allie Wood12f59aa2015-04-06 11:05:12 -0700456 old_part_file: the old partition file object, open for reading/writing
457 new_part_file: the new partition file object, open for reading/writing
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800458 part_size: the partition size
Allie Wood12f59aa2015-04-06 11:05:12 -0700459
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800460 Raises:
461 PayloadError if anything goes wrong while processing the payload.
462
463 """
464 for op, op_name in common.OperationIter(operations, base_name):
465 # Read data blob.
466 data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
467
468 if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
Allie Wood12f59aa2015-04-06 11:05:12 -0700469 self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800470 elif op.type == common.OpType.MOVE:
Allie Wood12f59aa2015-04-06 11:05:12 -0700471 self._ApplyMoveOperation(op, op_name, new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800472 elif op.type == common.OpType.BSDIFF:
Allie Wood12f59aa2015-04-06 11:05:12 -0700473 self._ApplyBsdiffOperation(op, op_name, data, new_part_file)
474 elif op.type == common.OpType.SOURCE_COPY:
475 self._ApplySourceCopyOperation(op, op_name, old_part_file,
476 new_part_file)
477 elif op.type == common.OpType.SOURCE_BSDIFF:
478 self._ApplySourceBsdiffOperation(op, op_name, data, old_part_file,
479 new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800480 else:
481 raise PayloadError('%s: unknown operation type (%d)' %
482 (op_name, op.type))
483
484 def _ApplyToPartition(self, operations, part_name, base_name,
Gilad Arnold16416602013-05-04 21:40:39 -0700485 new_part_file_name, new_part_info,
486 old_part_file_name=None, old_part_info=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800487 """Applies an update to a partition.
488
489 Args:
490 operations: the sequence of update operations to apply
491 part_name: the name of the partition, for error reporting
492 base_name: the name of the operation sequence
Gilad Arnold16416602013-05-04 21:40:39 -0700493 new_part_file_name: file name to write partition data to
494 new_part_info: size and expected hash of dest partition
495 old_part_file_name: file name of source partition (optional)
496 old_part_info: size and expected hash of source partition (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700497
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800498 Raises:
499 PayloadError if anything goes wrong with the update.
500
501 """
502 # Do we have a source partition?
Gilad Arnold16416602013-05-04 21:40:39 -0700503 if old_part_file_name:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800504 # Verify the source partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700505 with open(old_part_file_name, 'rb') as old_part_file:
506 _VerifySha256(old_part_file, old_part_info.hash, part_name,
507 length=old_part_info.size)
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700508 new_part_file_mode = 'r+b'
Allie Wood12f59aa2015-04-06 11:05:12 -0700509 if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
510 # Copy the src partition to the dst one; make sure we don't truncate it.
511 shutil.copyfile(old_part_file_name, new_part_file_name)
512 elif self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION:
513 # In minor version 2, we don't want to copy the partitions, so instead
514 # just make the new partition file.
515 open(new_part_file_name, 'w').close()
516 else:
517 raise PayloadError("Unknown minor version: %d" % self.minor_version)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800518 else:
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700519 # We need to create/truncate the dst partition file.
520 new_part_file_mode = 'w+b'
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800521
522 # Apply operations.
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700523 with open(new_part_file_name, new_part_file_mode) as new_part_file:
Allie Wood12f59aa2015-04-06 11:05:12 -0700524 old_part_file = (open(old_part_file_name, 'r+b')
525 if old_part_file_name else None)
526 try:
527 self._ApplyOperations(operations, base_name, old_part_file,
528 new_part_file, new_part_info.size)
529 finally:
530 if old_part_file:
531 old_part_file.close()
532
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700533 # Truncate the result, if so instructed.
534 if self.truncate_to_expected_size:
535 new_part_file.seek(0, 2)
536 if new_part_file.tell() > new_part_info.size:
537 new_part_file.seek(new_part_info.size)
538 new_part_file.truncate()
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800539
540 # Verify the resulting partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700541 with open(new_part_file_name, 'rb') as new_part_file:
542 _VerifySha256(new_part_file, new_part_info.hash, part_name,
543 length=new_part_info.size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800544
Gilad Arnold16416602013-05-04 21:40:39 -0700545 def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
546 old_rootfs_part=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800547 """Applier entry point, invoking all update operations.
548
549 Args:
Gilad Arnold16416602013-05-04 21:40:39 -0700550 new_kernel_part: name of dest kernel partition file
551 new_rootfs_part: name of dest rootfs partition file
552 old_kernel_part: name of source kernel partition file (optional)
553 old_rootfs_part: name of source rootfs partition file (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700554
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800555 Raises:
556 PayloadError if payload application failed.
557
558 """
559 self.payload.ResetFile()
560
561 # Make sure the arguments are sane and match the payload.
Gilad Arnold16416602013-05-04 21:40:39 -0700562 if not (new_kernel_part and new_rootfs_part):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800563 raise PayloadError('missing dst {kernel,rootfs} partitions')
564
Gilad Arnold16416602013-05-04 21:40:39 -0700565 if not (old_kernel_part or old_rootfs_part):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800566 if not self.payload.IsFull():
567 raise PayloadError('trying to apply a non-full update without src '
568 '{kernel,rootfs} partitions')
Gilad Arnold16416602013-05-04 21:40:39 -0700569 elif old_kernel_part and old_rootfs_part:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800570 if not self.payload.IsDelta():
571 raise PayloadError('trying to apply a non-delta update onto src '
572 '{kernel,rootfs} partitions')
573 else:
574 raise PayloadError('not all src partitions provided')
575
576 # Apply update to rootfs.
577 self._ApplyToPartition(
578 self.payload.manifest.install_operations, 'rootfs',
Gilad Arnold16416602013-05-04 21:40:39 -0700579 'install_operations', new_rootfs_part,
580 self.payload.manifest.new_rootfs_info, old_rootfs_part,
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800581 self.payload.manifest.old_rootfs_info)
582
583 # Apply update to kernel update.
584 self._ApplyToPartition(
585 self.payload.manifest.kernel_install_operations, 'kernel',
Gilad Arnold16416602013-05-04 21:40:39 -0700586 'kernel_install_operations', new_kernel_part,
587 self.payload.manifest.new_kernel_info, old_kernel_part,
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800588 self.payload.manifest.old_kernel_info)