blob: dad5ba3083b74c6705a430d57fb86d8f76bbd9c1 [file] [log] [blame]
Amin Hassanif94b6432018-01-26 17:39:47 -08001#
2# Copyright (C) 2013 The Android Open Source Project
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
Gilad Arnold553b0ec2013-01-26 01:00:39 -080016
17"""Applying a Chrome OS update payload.
18
19This module is used internally by the main Payload class for applying an update
20payload. The interface for invoking the applier is as follows:
21
22 applier = PayloadApplier(payload)
23 applier.Run(...)
24
25"""
26
Allie Wood12f59aa2015-04-06 11:05:12 -070027from __future__ import print_function
28
Gilad Arnold553b0ec2013-01-26 01:00:39 -080029import array
30import bz2
31import hashlib
Gilad Arnold658185a2013-05-08 17:57:54 -070032import itertools
Amin Hassani0de7f782017-12-07 12:13:03 -080033# Not everywhere we can have the lzma library so we ignore it if we didn't have
34# it because it is not going to be used. For example, 'cros flash' uses
35# devserver code which eventually loads this file, but the lzma library is not
36# included in the client test devices, and it is not necessary to do so. But
37# lzma is not used in 'cros flash' so it should be fine. Python 3.x include
38# lzma, but for backward compatibility with Python 2.7, backports-lzma is
39# needed.
40try:
41 import lzma
42except ImportError:
43 try:
44 from backports import lzma
45 except ImportError:
46 pass
Gilad Arnold553b0ec2013-01-26 01:00:39 -080047import os
48import shutil
49import subprocess
50import sys
51import tempfile
52
Amin Hassanib05a65a2017-12-18 15:15:32 -080053from update_payload import common
54from update_payload.error import PayloadError
Gilad Arnold553b0ec2013-01-26 01:00:39 -080055
56
57#
58# Helper functions.
59#
Gilad Arnold382df5c2013-05-03 12:49:28 -070060def _VerifySha256(file_obj, expected_hash, name, length=-1):
Gilad Arnold553b0ec2013-01-26 01:00:39 -080061 """Verifies the SHA256 hash of a file.
62
63 Args:
64 file_obj: file object to read
65 expected_hash: the hash digest we expect to be getting
66 name: name string of this hash, for error reporting
Gilad Arnold382df5c2013-05-03 12:49:28 -070067 length: precise length of data to verify (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -070068
Gilad Arnold553b0ec2013-01-26 01:00:39 -080069 Raises:
Gilad Arnold382df5c2013-05-03 12:49:28 -070070 PayloadError if computed hash doesn't match expected one, or if fails to
71 read the specified length of data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -080072 """
Gilad Arnold553b0ec2013-01-26 01:00:39 -080073 hasher = hashlib.sha256()
74 block_length = 1024 * 1024
Gilad Arnold382df5c2013-05-03 12:49:28 -070075 max_length = length if length >= 0 else sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -080076
Gilad Arnold382df5c2013-05-03 12:49:28 -070077 while max_length > 0:
Gilad Arnold553b0ec2013-01-26 01:00:39 -080078 read_length = min(max_length, block_length)
79 data = file_obj.read(read_length)
80 if not data:
81 break
82 max_length -= len(data)
83 hasher.update(data)
84
Gilad Arnold382df5c2013-05-03 12:49:28 -070085 if length >= 0 and max_length > 0:
86 raise PayloadError(
87 'insufficient data (%d instead of %d) when verifying %s' %
88 (length - max_length, length, name))
89
Gilad Arnold553b0ec2013-01-26 01:00:39 -080090 actual_hash = hasher.digest()
91 if actual_hash != expected_hash:
92 raise PayloadError('%s hash (%s) not as expected (%s)' %
Gilad Arnold96405372013-05-04 00:24:58 -070093 (name, common.FormatSha256(actual_hash),
94 common.FormatSha256(expected_hash)))
Gilad Arnold553b0ec2013-01-26 01:00:39 -080095
96
97def _ReadExtents(file_obj, extents, block_size, max_length=-1):
98 """Reads data from file as defined by extent sequence.
99
100 This tries to be efficient by not copying data as it is read in chunks.
101
102 Args:
103 file_obj: file object
104 extents: sequence of block extents (offset and length)
105 block_size: size of each block
106 max_length: maximum length to read (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700107
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800108 Returns:
109 A character array containing the concatenated read data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800110 """
111 data = array.array('c')
Gilad Arnold272a4992013-05-08 13:12:53 -0700112 if max_length < 0:
113 max_length = sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800114 for ex in extents:
115 if max_length == 0:
116 break
Gilad Arnold272a4992013-05-08 13:12:53 -0700117 read_length = min(max_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -0700118
119 # Fill with zeros or read from file, depending on the type of extent.
120 if ex.start_block == common.PSEUDO_EXTENT_MARKER:
121 data.extend(itertools.repeat('\0', read_length))
122 else:
123 file_obj.seek(ex.start_block * block_size)
124 data.fromfile(file_obj, read_length)
125
Gilad Arnold272a4992013-05-08 13:12:53 -0700126 max_length -= read_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700127
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800128 return data
129
130
131def _WriteExtents(file_obj, data, extents, block_size, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700132 """Writes data to file as defined by extent sequence.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800133
134 This tries to be efficient by not copy data as it is written in chunks.
135
136 Args:
137 file_obj: file object
138 data: data to write
139 extents: sequence of block extents (offset and length)
140 block_size: size of each block
Gilad Arnold272a4992013-05-08 13:12:53 -0700141 base_name: name string of extent sequence for error reporting
Allie Wood12f59aa2015-04-06 11:05:12 -0700142
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800143 Raises:
144 PayloadError when things don't add up.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800145 """
146 data_offset = 0
147 data_length = len(data)
148 for ex, ex_name in common.ExtentIter(extents, base_name):
Gilad Arnold272a4992013-05-08 13:12:53 -0700149 if not data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800150 raise PayloadError('%s: more write extents than data' % ex_name)
Gilad Arnold272a4992013-05-08 13:12:53 -0700151 write_length = min(data_length, ex.num_blocks * block_size)
Gilad Arnold658185a2013-05-08 17:57:54 -0700152
153 # Only do actual writing if this is not a pseudo-extent.
154 if ex.start_block != common.PSEUDO_EXTENT_MARKER:
155 file_obj.seek(ex.start_block * block_size)
156 data_view = buffer(data, data_offset, write_length)
157 file_obj.write(data_view)
158
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800159 data_offset += write_length
Gilad Arnold272a4992013-05-08 13:12:53 -0700160 data_length -= write_length
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800161
Gilad Arnold272a4992013-05-08 13:12:53 -0700162 if data_length:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800163 raise PayloadError('%s: more data than write extents' % base_name)
164
165
Gilad Arnold272a4992013-05-08 13:12:53 -0700166def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
167 """Translates an extent sequence into a bspatch-compatible string argument.
168
169 Args:
170 extents: sequence of block extents (offset and length)
171 block_size: size of each block
172 base_name: name string of extent sequence for error reporting
173 data_length: the actual total length of the data in bytes (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700174
Gilad Arnold272a4992013-05-08 13:12:53 -0700175 Returns:
176 A tuple consisting of (i) a string of the form
177 "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
178 for filling the last extent, (iii) the length of the padding (zero means no
179 padding is needed and the extents cover the full length of data).
Allie Wood12f59aa2015-04-06 11:05:12 -0700180
Gilad Arnold272a4992013-05-08 13:12:53 -0700181 Raises:
182 PayloadError if data_length is too short or too long.
Gilad Arnold272a4992013-05-08 13:12:53 -0700183 """
184 arg = ''
185 pad_off = pad_len = 0
186 if data_length < 0:
187 data_length = sys.maxint
188 for ex, ex_name in common.ExtentIter(extents, base_name):
189 if not data_length:
190 raise PayloadError('%s: more extents than total data length' % ex_name)
Gilad Arnold658185a2013-05-08 17:57:54 -0700191
192 is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
193 start_byte = -1 if is_pseudo else ex.start_block * block_size
Gilad Arnold272a4992013-05-08 13:12:53 -0700194 num_bytes = ex.num_blocks * block_size
195 if data_length < num_bytes:
Gilad Arnold658185a2013-05-08 17:57:54 -0700196 # We're only padding a real extent.
197 if not is_pseudo:
198 pad_off = start_byte + data_length
199 pad_len = num_bytes - data_length
200
Gilad Arnold272a4992013-05-08 13:12:53 -0700201 num_bytes = data_length
Gilad Arnold658185a2013-05-08 17:57:54 -0700202
Gilad Arnold272a4992013-05-08 13:12:53 -0700203 arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
204 data_length -= num_bytes
205
206 if data_length:
207 raise PayloadError('%s: extents not covering full data length' % base_name)
208
209 return arg, pad_off, pad_len
210
211
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800212#
213# Payload application.
214#
215class PayloadApplier(object):
216 """Applying an update payload.
217
218 This is a short-lived object whose purpose is to isolate the logic used for
219 applying an update payload.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800220 """
221
Gilad Arnold21a02502013-08-22 16:59:48 -0700222 def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
Amin Hassani5ef5d452017-08-04 13:10:59 -0700223 puffpatch_path=None, truncate_to_expected_size=True):
Gilad Arnold272a4992013-05-08 13:12:53 -0700224 """Initialize the applier.
225
226 Args:
227 payload: the payload object to check
228 bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
Gilad Arnold21a02502013-08-22 16:59:48 -0700229 bspatch_path: path to the bspatch binary (optional)
Amin Hassani5ef5d452017-08-04 13:10:59 -0700230 puffpatch_path: path to the puffpatch binary (optional)
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700231 truncate_to_expected_size: whether to truncate the resulting partitions
232 to their expected sizes, as specified in the
233 payload (optional)
Gilad Arnold272a4992013-05-08 13:12:53 -0700234 """
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800235 assert payload.is_init, 'uninitialized update payload'
236 self.payload = payload
237 self.block_size = payload.manifest.block_size
Allie Wood12f59aa2015-04-06 11:05:12 -0700238 self.minor_version = payload.manifest.minor_version
Gilad Arnold272a4992013-05-08 13:12:53 -0700239 self.bsdiff_in_place = bsdiff_in_place
Gilad Arnold21a02502013-08-22 16:59:48 -0700240 self.bspatch_path = bspatch_path or 'bspatch'
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700241 self.puffpatch_path = puffpatch_path or 'puffin'
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700242 self.truncate_to_expected_size = truncate_to_expected_size
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800243
244 def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
Amin Hassani0de7f782017-12-07 12:13:03 -0800245 """Applies a REPLACE{,_BZ,_XZ} operation.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800246
247 Args:
248 op: the operation object
249 op_name: name string for error reporting
250 out_data: the data to be written
251 part_file: the partition file object
252 part_size: the size of the partition
Allie Wood12f59aa2015-04-06 11:05:12 -0700253
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800254 Raises:
255 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800256 """
257 block_size = self.block_size
258 data_length = len(out_data)
259
260 # Decompress data if needed.
261 if op.type == common.OpType.REPLACE_BZ:
262 out_data = bz2.decompress(out_data)
263 data_length = len(out_data)
Amin Hassani0de7f782017-12-07 12:13:03 -0800264 elif op.type == common.OpType.REPLACE_XZ:
265 # pylint: disable=no-member
266 out_data = lzma.decompress(out_data)
267 data_length = len(out_data)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800268
269 # Write data to blocks specified in dst extents.
270 data_start = 0
271 for ex, ex_name in common.ExtentIter(op.dst_extents,
272 '%s.dst_extents' % op_name):
273 start_block = ex.start_block
274 num_blocks = ex.num_blocks
275 count = num_blocks * block_size
276
277 # Make sure it's not a fake (signature) operation.
278 if start_block != common.PSEUDO_EXTENT_MARKER:
279 data_end = data_start + count
280
281 # Make sure we're not running past partition boundary.
282 if (start_block + num_blocks) * block_size > part_size:
283 raise PayloadError(
284 '%s: extent (%s) exceeds partition size (%d)' %
285 (ex_name, common.FormatExtent(ex, block_size),
286 part_size))
287
288 # Make sure that we have enough data to write.
289 if data_end >= data_length + block_size:
290 raise PayloadError(
291 '%s: more dst blocks than data (even with padding)')
292
293 # Pad with zeros if necessary.
294 if data_end > data_length:
295 padding = data_end - data_length
296 out_data += '\0' * padding
297
298 self.payload.payload_file.seek(start_block * block_size)
299 part_file.seek(start_block * block_size)
300 part_file.write(out_data[data_start:data_end])
301
302 data_start += count
303
304 # Make sure we wrote all data.
305 if data_start < data_length:
306 raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
307 (op_name, data_start, data_length))
308
309 def _ApplyMoveOperation(self, op, op_name, part_file):
310 """Applies a MOVE operation.
311
Gilad Arnold658185a2013-05-08 17:57:54 -0700312 Note that this operation must read the whole block data from the input and
313 only then dump it, due to our in-place update semantics; otherwise, it
314 might clobber data midway through.
315
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800316 Args:
317 op: the operation object
318 op_name: name string for error reporting
319 part_file: the partition file object
Allie Wood12f59aa2015-04-06 11:05:12 -0700320
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800321 Raises:
322 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800323 """
324 block_size = self.block_size
325
326 # Gather input raw data from src extents.
327 in_data = _ReadExtents(part_file, op.src_extents, block_size)
328
329 # Dump extracted data to dst extents.
330 _WriteExtents(part_file, in_data, op.dst_extents, block_size,
331 '%s.dst_extents' % op_name)
332
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700333 def _ApplyZeroOperation(self, op, op_name, part_file):
334 """Applies a ZERO operation.
335
336 Args:
337 op: the operation object
338 op_name: name string for error reporting
339 part_file: the partition file object
340
341 Raises:
342 PayloadError if something goes wrong.
343 """
344 block_size = self.block_size
345 base_name = '%s.dst_extents' % op_name
346
347 # Iterate over the extents and write zero.
Amin Hassanib05a65a2017-12-18 15:15:32 -0800348 # pylint: disable=unused-variable
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700349 for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
350 # Only do actual writing if this is not a pseudo-extent.
351 if ex.start_block != common.PSEUDO_EXTENT_MARKER:
352 part_file.seek(ex.start_block * block_size)
353 part_file.write('\0' * (ex.num_blocks * block_size))
354
Allie Wood12f59aa2015-04-06 11:05:12 -0700355 def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
356 new_part_file):
357 """Applies a SOURCE_COPY operation.
358
359 Args:
360 op: the operation object
361 op_name: name string for error reporting
362 old_part_file: the old partition file object
363 new_part_file: the new partition file object
364
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800365 Raises:
366 PayloadError if something goes wrong.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800367 """
Allie Wood12f59aa2015-04-06 11:05:12 -0700368 if not old_part_file:
369 raise PayloadError(
370 '%s: no source partition file provided for operation type (%d)' %
371 (op_name, op.type))
372
373 block_size = self.block_size
374
375 # Gather input raw data from src extents.
376 in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
377
378 # Dump extracted data to dst extents.
379 _WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
380 '%s.dst_extents' % op_name)
381
Amin Hassaniefa62d92017-11-09 13:46:56 -0800382 def _BytesInExtents(self, extents, base_name):
383 """Counts the length of extents in bytes.
384
385 Args:
386 extents: The list of Extents.
387 base_name: For error reporting.
388
389 Returns:
390 The number of bytes in extents.
391 """
392
393 length = 0
Amin Hassanib05a65a2017-12-18 15:15:32 -0800394 # pylint: disable=unused-variable
Amin Hassaniefa62d92017-11-09 13:46:56 -0800395 for ex, ex_name in common.ExtentIter(extents, base_name):
396 length += ex.num_blocks * self.block_size
397 return length
398
Sen Jiang92161a72016-06-28 16:09:38 -0700399 def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file,
400 new_part_file):
Amin Hassaniefa62d92017-11-09 13:46:56 -0800401 """Applies a SOURCE_BSDIFF, BROTLI_BSDIFF or PUFFDIFF operation.
Allie Wood12f59aa2015-04-06 11:05:12 -0700402
403 Args:
404 op: the operation object
405 op_name: name string for error reporting
406 patch_data: the binary patch content
407 old_part_file: the source partition file object
408 new_part_file: the target partition file object
409
410 Raises:
411 PayloadError if something goes wrong.
Allie Wood12f59aa2015-04-06 11:05:12 -0700412 """
413 if not old_part_file:
414 raise PayloadError(
415 '%s: no source partition file provided for operation type (%d)' %
416 (op_name, op.type))
417
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800418 block_size = self.block_size
419
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800420 # Dump patch data to file.
421 with tempfile.NamedTemporaryFile(delete=False) as patch_file:
422 patch_file_name = patch_file.name
423 patch_file.write(patch_data)
424
Allie Wood12f59aa2015-04-06 11:05:12 -0700425 if (hasattr(new_part_file, 'fileno') and
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700426 ((not old_part_file) or hasattr(old_part_file, 'fileno'))):
Gilad Arnold272a4992013-05-08 13:12:53 -0700427 # Construct input and output extents argument for bspatch.
Amin Hassaniefa62d92017-11-09 13:46:56 -0800428
Gilad Arnold272a4992013-05-08 13:12:53 -0700429 in_extents_arg, _, _ = _ExtentsToBspatchArg(
430 op.src_extents, block_size, '%s.src_extents' % op_name,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800431 data_length=op.src_length if op.src_length else
432 self._BytesInExtents(op.src_extents, "%s.src_extents"))
Gilad Arnold272a4992013-05-08 13:12:53 -0700433 out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
434 op.dst_extents, block_size, '%s.dst_extents' % op_name,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800435 data_length=op.dst_length if op.dst_length else
436 self._BytesInExtents(op.dst_extents, "%s.dst_extents"))
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800437
Allie Wood12f59aa2015-04-06 11:05:12 -0700438 new_file_name = '/dev/fd/%d' % new_part_file.fileno()
439 # Diff from source partition.
440 old_file_name = '/dev/fd/%d' % old_part_file.fileno()
441
Amin Hassaniefa62d92017-11-09 13:46:56 -0800442 if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
443 common.OpType.BROTLI_BSDIFF):
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700444 # Invoke bspatch on partition file with extents args.
445 bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
446 patch_file_name, in_extents_arg, out_extents_arg]
447 subprocess.check_call(bspatch_cmd)
448 elif op.type == common.OpType.PUFFDIFF:
449 # Invoke puffpatch on partition file with extents args.
450 puffpatch_cmd = [self.puffpatch_path,
451 "--operation=puffpatch",
452 "--src_file=%s" % old_file_name,
453 "--dst_file=%s" % new_file_name,
454 "--patch_file=%s" % patch_file_name,
455 "--src_extents=%s" % in_extents_arg,
456 "--dst_extents=%s" % out_extents_arg]
457 subprocess.check_call(puffpatch_cmd)
458 else:
459 raise PayloadError("Unknown operation %s", op.type)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800460
Gilad Arnold272a4992013-05-08 13:12:53 -0700461 # Pad with zeros past the total output length.
462 if pad_len:
Allie Wood12f59aa2015-04-06 11:05:12 -0700463 new_part_file.seek(pad_off)
464 new_part_file.write('\0' * pad_len)
Gilad Arnold272a4992013-05-08 13:12:53 -0700465 else:
466 # Gather input raw data and write to a temp file.
Allie Wood12f59aa2015-04-06 11:05:12 -0700467 input_part_file = old_part_file if old_part_file else new_part_file
468 in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
Amin Hassaniefa62d92017-11-09 13:46:56 -0800469 max_length=op.src_length if op.src_length else
470 self._BytesInExtents(op.src_extents,
471 "%s.src_extents"))
Gilad Arnold272a4992013-05-08 13:12:53 -0700472 with tempfile.NamedTemporaryFile(delete=False) as in_file:
473 in_file_name = in_file.name
474 in_file.write(in_data)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800475
Allie Wood12f59aa2015-04-06 11:05:12 -0700476 # Allocate temporary output file.
Gilad Arnold272a4992013-05-08 13:12:53 -0700477 with tempfile.NamedTemporaryFile(delete=False) as out_file:
478 out_file_name = out_file.name
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800479
Amin Hassaniefa62d92017-11-09 13:46:56 -0800480 if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
481 common.OpType.BROTLI_BSDIFF):
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700482 # Invoke bspatch.
483 bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
484 patch_file_name]
485 subprocess.check_call(bspatch_cmd)
486 elif op.type == common.OpType.PUFFDIFF:
487 # Invoke puffpatch.
488 puffpatch_cmd = [self.puffpatch_path,
489 "--operation=puffpatch",
490 "--src_file=%s" % in_file_name,
491 "--dst_file=%s" % out_file_name,
492 "--patch_file=%s" % patch_file_name]
493 subprocess.check_call(puffpatch_cmd)
494 else:
495 raise PayloadError("Unknown operation %s", op.type)
Gilad Arnold272a4992013-05-08 13:12:53 -0700496
497 # Read output.
498 with open(out_file_name, 'rb') as out_file:
499 out_data = out_file.read()
500 if len(out_data) != op.dst_length:
501 raise PayloadError(
502 '%s: actual patched data length (%d) not as expected (%d)' %
503 (op_name, len(out_data), op.dst_length))
504
505 # Write output back to partition, with padding.
506 unaligned_out_len = len(out_data) % block_size
507 if unaligned_out_len:
508 out_data += '\0' * (block_size - unaligned_out_len)
Allie Wood12f59aa2015-04-06 11:05:12 -0700509 _WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
Gilad Arnold272a4992013-05-08 13:12:53 -0700510 '%s.dst_extents' % op_name)
511
512 # Delete input/output files.
513 os.remove(in_file_name)
514 os.remove(out_file_name)
515
516 # Delete patch file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800517 os.remove(patch_file_name)
518
Allie Wood12f59aa2015-04-06 11:05:12 -0700519 def _ApplyOperations(self, operations, base_name, old_part_file,
520 new_part_file, part_size):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800521 """Applies a sequence of update operations to a partition.
522
Allie Wood12f59aa2015-04-06 11:05:12 -0700523 This assumes an in-place update semantics for MOVE and BSDIFF, namely all
524 reads are performed first, then the data is processed and written back to
525 the same file.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800526
527 Args:
528 operations: the sequence of operations
529 base_name: the name of the operation sequence
Allie Wood12f59aa2015-04-06 11:05:12 -0700530 old_part_file: the old partition file object, open for reading/writing
531 new_part_file: the new partition file object, open for reading/writing
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800532 part_size: the partition size
Allie Wood12f59aa2015-04-06 11:05:12 -0700533
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800534 Raises:
535 PayloadError if anything goes wrong while processing the payload.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800536 """
537 for op, op_name in common.OperationIter(operations, base_name):
538 # Read data blob.
539 data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
540
Amin Hassani0de7f782017-12-07 12:13:03 -0800541 if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
542 common.OpType.REPLACE_XZ):
Allie Wood12f59aa2015-04-06 11:05:12 -0700543 self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800544 elif op.type == common.OpType.MOVE:
Allie Wood12f59aa2015-04-06 11:05:12 -0700545 self._ApplyMoveOperation(op, op_name, new_part_file)
Amin Hassani8ad22ba2017-10-11 10:15:11 -0700546 elif op.type == common.OpType.ZERO:
547 self._ApplyZeroOperation(op, op_name, new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800548 elif op.type == common.OpType.BSDIFF:
Amin Hassanicdeb6e62017-10-11 10:15:11 -0700549 self._ApplyDiffOperation(op, op_name, data, new_part_file,
550 new_part_file)
Allie Wood12f59aa2015-04-06 11:05:12 -0700551 elif op.type == common.OpType.SOURCE_COPY:
552 self._ApplySourceCopyOperation(op, op_name, old_part_file,
553 new_part_file)
Amin Hassaniefa62d92017-11-09 13:46:56 -0800554 elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF,
555 common.OpType.BROTLI_BSDIFF):
Sen Jiang92161a72016-06-28 16:09:38 -0700556 self._ApplyDiffOperation(op, op_name, data, old_part_file,
557 new_part_file)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800558 else:
559 raise PayloadError('%s: unknown operation type (%d)' %
560 (op_name, op.type))
561
562 def _ApplyToPartition(self, operations, part_name, base_name,
Gilad Arnold16416602013-05-04 21:40:39 -0700563 new_part_file_name, new_part_info,
564 old_part_file_name=None, old_part_info=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800565 """Applies an update to a partition.
566
567 Args:
568 operations: the sequence of update operations to apply
569 part_name: the name of the partition, for error reporting
570 base_name: the name of the operation sequence
Gilad Arnold16416602013-05-04 21:40:39 -0700571 new_part_file_name: file name to write partition data to
572 new_part_info: size and expected hash of dest partition
573 old_part_file_name: file name of source partition (optional)
574 old_part_info: size and expected hash of source partition (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700575
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800576 Raises:
577 PayloadError if anything goes wrong with the update.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800578 """
579 # Do we have a source partition?
Gilad Arnold16416602013-05-04 21:40:39 -0700580 if old_part_file_name:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800581 # Verify the source partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700582 with open(old_part_file_name, 'rb') as old_part_file:
Gilad Arnold4b8f4c22015-07-16 11:45:39 -0700583 _VerifySha256(old_part_file, old_part_info.hash,
584 'old ' + part_name, length=old_part_info.size)
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700585 new_part_file_mode = 'r+b'
Allie Wood12f59aa2015-04-06 11:05:12 -0700586 if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
587 # Copy the src partition to the dst one; make sure we don't truncate it.
588 shutil.copyfile(old_part_file_name, new_part_file_name)
Sen Jiangd6122bb2015-12-11 10:27:04 -0800589 elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or
Sen Jiang92161a72016-06-28 16:09:38 -0700590 self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION or
Amin Hassani77d7cbc2018-02-07 16:21:33 -0800591 self.minor_version == common.BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION or
Amin Hassani5ef5d452017-08-04 13:10:59 -0700592 self.minor_version == common.PUFFDIFF_MINOR_PAYLOAD_VERSION):
Sen Jiangd6122bb2015-12-11 10:27:04 -0800593 # In minor version >= 2, we don't want to copy the partitions, so
594 # instead just make the new partition file.
Allie Wood12f59aa2015-04-06 11:05:12 -0700595 open(new_part_file_name, 'w').close()
596 else:
597 raise PayloadError("Unknown minor version: %d" % self.minor_version)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800598 else:
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700599 # We need to create/truncate the dst partition file.
600 new_part_file_mode = 'w+b'
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800601
602 # Apply operations.
Gilad Arnoldf69065c2013-05-27 16:54:59 -0700603 with open(new_part_file_name, new_part_file_mode) as new_part_file:
Allie Wood12f59aa2015-04-06 11:05:12 -0700604 old_part_file = (open(old_part_file_name, 'r+b')
605 if old_part_file_name else None)
606 try:
607 self._ApplyOperations(operations, base_name, old_part_file,
608 new_part_file, new_part_info.size)
609 finally:
610 if old_part_file:
611 old_part_file.close()
612
Gilad Arnolde5fdf182013-05-23 16:13:38 -0700613 # Truncate the result, if so instructed.
614 if self.truncate_to_expected_size:
615 new_part_file.seek(0, 2)
616 if new_part_file.tell() > new_part_info.size:
617 new_part_file.seek(new_part_info.size)
618 new_part_file.truncate()
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800619
620 # Verify the resulting partition.
Gilad Arnold16416602013-05-04 21:40:39 -0700621 with open(new_part_file_name, 'rb') as new_part_file:
Gilad Arnold4b8f4c22015-07-16 11:45:39 -0700622 _VerifySha256(new_part_file, new_part_info.hash,
623 'new ' + part_name, length=new_part_info.size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800624
Tudor Brindus2d22c1a2018-06-15 13:07:13 -0700625 def Run(self, new_parts, old_parts=None):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800626 """Applier entry point, invoking all update operations.
627
628 Args:
Tudor Brindus2d22c1a2018-06-15 13:07:13 -0700629 new_parts: map of partition name to dest partition file
630 old_parts: map of partition name to source partition file (optional)
Allie Wood12f59aa2015-04-06 11:05:12 -0700631
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800632 Raises:
633 PayloadError if payload application failed.
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800634 """
635 self.payload.ResetFile()
636
Tudor Brindus2d22c1a2018-06-15 13:07:13 -0700637 # TODO(tbrindus): make payload applying work for major version 2 partitions
638 new_kernel_part = new_parts[common.KERNEL]
639 new_rootfs_part = new_parts[common.ROOTFS]
640 old_kernel_part = old_parts.get(common.KERNEL, None) if old_parts else None
641 old_rootfs_part = old_parts.get(common.ROOTFS, None) if old_parts else None
642
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800643 # Make sure the arguments are sane and match the payload.
Gilad Arnold16416602013-05-04 21:40:39 -0700644 if not (new_kernel_part and new_rootfs_part):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800645 raise PayloadError('missing dst {kernel,rootfs} partitions')
646
Gilad Arnold16416602013-05-04 21:40:39 -0700647 if not (old_kernel_part or old_rootfs_part):
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800648 if not self.payload.IsFull():
649 raise PayloadError('trying to apply a non-full update without src '
650 '{kernel,rootfs} partitions')
Gilad Arnold16416602013-05-04 21:40:39 -0700651 elif old_kernel_part and old_rootfs_part:
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800652 if not self.payload.IsDelta():
653 raise PayloadError('trying to apply a non-delta update onto src '
654 '{kernel,rootfs} partitions')
655 else:
656 raise PayloadError('not all src partitions provided')
657
658 # Apply update to rootfs.
659 self._ApplyToPartition(
660 self.payload.manifest.install_operations, 'rootfs',
Gilad Arnold16416602013-05-04 21:40:39 -0700661 'install_operations', new_rootfs_part,
662 self.payload.manifest.new_rootfs_info, old_rootfs_part,
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800663 self.payload.manifest.old_rootfs_info)
664
665 # Apply update to kernel update.
666 self._ApplyToPartition(
667 self.payload.manifest.kernel_install_operations, 'kernel',
Gilad Arnold16416602013-05-04 21:40:39 -0700668 'kernel_install_operations', new_kernel_part,
669 self.payload.manifest.new_kernel_info, old_kernel_part,
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800670 self.payload.manifest.old_kernel_info)