blob: 6780e9a2a54ed491686794aba49706bf795b9977 [file] [log] [blame]
Gilad Arnold553b0ec2013-01-26 01:00:39 -08001# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Applying a Chrome OS update payload.
6
7This module is used internally by the main Payload class for applying an update
8payload. The interface for invoking the applier is as follows:
9
10 applier = PayloadApplier(payload)
11 applier.Run(...)
12
13"""
14
15import array
16import bz2
17import hashlib
18import os
19import shutil
20import subprocess
21import sys
22import tempfile
23
24import common
25from error import PayloadError
26
27
28#
29# Helper functions.
30#
31def _VerifySha256(file_obj, expected_hash, name, max_length=-1):
32 """Verifies the SHA256 hash of a file.
33
34 Args:
35 file_obj: file object to read
36 expected_hash: the hash digest we expect to be getting
37 name: name string of this hash, for error reporting
38 max_length: maximum length of data to read/hash (optional)
39 Raises:
40 PayloadError if file hash fails to verify.
41
42 """
43 # pylint: disable=E1101
44 hasher = hashlib.sha256()
45 block_length = 1024 * 1024
46 if max_length < 0:
47 max_length = sys.maxint
48
49 while max_length != 0:
50 read_length = min(max_length, block_length)
51 data = file_obj.read(read_length)
52 if not data:
53 break
54 max_length -= len(data)
55 hasher.update(data)
56
57 actual_hash = hasher.digest()
58 if actual_hash != expected_hash:
59 raise PayloadError('%s hash (%s) not as expected (%s)' %
60 (name, actual_hash.encode('hex'),
61 expected_hash.encode('hex')))
62
63
64def _ReadExtents(file_obj, extents, block_size, max_length=-1):
65 """Reads data from file as defined by extent sequence.
66
67 This tries to be efficient by not copying data as it is read in chunks.
68
69 Args:
70 file_obj: file object
71 extents: sequence of block extents (offset and length)
72 block_size: size of each block
73 max_length: maximum length to read (optional)
74 Returns:
75 A character array containing the concatenated read data.
76
77 """
78 data = array.array('c')
79 for ex in extents:
80 if max_length == 0:
81 break
82 file_obj.seek(ex.start_block * block_size)
83 read_length = ex.num_blocks * block_size
84 if max_length > 0:
85 read_length = min(max_length, read_length)
86 max_length -= read_length
87 data.fromfile(file_obj, read_length)
88 return data
89
90
91def _WriteExtents(file_obj, data, extents, block_size, base_name):
92 """Write data to file as defined by extent sequence.
93
94 This tries to be efficient by not copy data as it is written in chunks.
95
96 Args:
97 file_obj: file object
98 data: data to write
99 extents: sequence of block extents (offset and length)
100 block_size: size of each block
101 base_name: name string of extent block for error reporting
102 Raises:
103 PayloadError when things don't add up.
104
105 """
106 data_offset = 0
107 data_length = len(data)
108 for ex, ex_name in common.ExtentIter(extents, base_name):
109 if data_offset == data_length:
110 raise PayloadError('%s: more write extents than data' % ex_name)
111 write_length = min(data_length - data_offset, ex.num_blocks * block_size)
112 file_obj.seek(ex.start_block * block_size)
113 data_view = buffer(data, data_offset, write_length)
114 file_obj.write(data_view)
115 data_offset += write_length
116
117 if data_offset < data_length:
118 raise PayloadError('%s: more data than write extents' % base_name)
119
120
121#
122# Payload application.
123#
124class PayloadApplier(object):
125 """Applying an update payload.
126
127 This is a short-lived object whose purpose is to isolate the logic used for
128 applying an update payload.
129
130 """
131
132 def __init__(self, payload):
133 assert payload.is_init, 'uninitialized update payload'
134 self.payload = payload
135 self.block_size = payload.manifest.block_size
136
137 def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
138 """Applies a REPLACE{,_BZ} operation.
139
140 Args:
141 op: the operation object
142 op_name: name string for error reporting
143 out_data: the data to be written
144 part_file: the partition file object
145 part_size: the size of the partition
146 Raises:
147 PayloadError if something goes wrong.
148
149 """
150 block_size = self.block_size
151 data_length = len(out_data)
152
153 # Decompress data if needed.
154 if op.type == common.OpType.REPLACE_BZ:
155 out_data = bz2.decompress(out_data)
156 data_length = len(out_data)
157
158 # Write data to blocks specified in dst extents.
159 data_start = 0
160 for ex, ex_name in common.ExtentIter(op.dst_extents,
161 '%s.dst_extents' % op_name):
162 start_block = ex.start_block
163 num_blocks = ex.num_blocks
164 count = num_blocks * block_size
165
166 # Make sure it's not a fake (signature) operation.
167 if start_block != common.PSEUDO_EXTENT_MARKER:
168 data_end = data_start + count
169
170 # Make sure we're not running past partition boundary.
171 if (start_block + num_blocks) * block_size > part_size:
172 raise PayloadError(
173 '%s: extent (%s) exceeds partition size (%d)' %
174 (ex_name, common.FormatExtent(ex, block_size),
175 part_size))
176
177 # Make sure that we have enough data to write.
178 if data_end >= data_length + block_size:
179 raise PayloadError(
180 '%s: more dst blocks than data (even with padding)')
181
182 # Pad with zeros if necessary.
183 if data_end > data_length:
184 padding = data_end - data_length
185 out_data += '\0' * padding
186
187 self.payload.payload_file.seek(start_block * block_size)
188 part_file.seek(start_block * block_size)
189 part_file.write(out_data[data_start:data_end])
190
191 data_start += count
192
193 # Make sure we wrote all data.
194 if data_start < data_length:
195 raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
196 (op_name, data_start, data_length))
197
198 def _ApplyMoveOperation(self, op, op_name, part_file):
199 """Applies a MOVE operation.
200
201 Args:
202 op: the operation object
203 op_name: name string for error reporting
204 part_file: the partition file object
205 Raises:
206 PayloadError if something goes wrong.
207
208 """
209 block_size = self.block_size
210
211 # Gather input raw data from src extents.
212 in_data = _ReadExtents(part_file, op.src_extents, block_size)
213
214 # Dump extracted data to dst extents.
215 _WriteExtents(part_file, in_data, op.dst_extents, block_size,
216 '%s.dst_extents' % op_name)
217
218 def _ApplyBsdiffOperation(self, op, op_name, patch_data, part_file):
219 """Applies a BSDIFF operation.
220
221 Args:
222 op: the operation object
223 op_name: name string for error reporting
224 patch_data: the binary patch content
225 part_file: the partition file object
226 Raises:
227 PayloadError if something goes wrong.
228
229 """
230 block_size = self.block_size
231
232 # Gather input raw data and write to a temp file.
233 in_data = _ReadExtents(part_file, op.src_extents, block_size,
234 max_length=op.src_length)
235 with tempfile.NamedTemporaryFile(delete=False) as in_file:
236 in_file_name = in_file.name
237 in_file.write(in_data)
238
239 # Dump patch data to file.
240 with tempfile.NamedTemporaryFile(delete=False) as patch_file:
241 patch_file_name = patch_file.name
242 patch_file.write(patch_data)
243
244 # Allocate tepmorary output file.
245 with tempfile.NamedTemporaryFile(delete=False) as out_file:
246 out_file_name = out_file.name
247
248 # Invoke bspatch.
249 bspatch_cmd = ['bspatch', in_file_name, out_file_name, patch_file_name]
250 subprocess.check_call(bspatch_cmd)
251
252 # Read output.
253 with open(out_file_name, 'rb') as out_file:
254 out_data = out_file.read()
255 if len(out_data) != op.dst_length:
256 raise PayloadError(
257 '%s: actual patched data length (%d) not as expected (%d)' %
258 (op_name, len(out_data), op.dst_length))
259
260 # Write output back to partition, with padding.
261 unaligned_out_len = len(out_data) % block_size
262 if unaligned_out_len:
263 out_data += '\0' * (block_size - unaligned_out_len)
264 _WriteExtents(part_file, out_data, op.dst_extents, block_size,
265 '%s.dst_extents' % op_name)
266
267 # Delete all temporary files.
268 os.remove(in_file_name)
269 os.remove(out_file_name)
270 os.remove(patch_file_name)
271
272 def _ApplyOperations(self, operations, base_name, part_file, part_size):
273 """Applies a sequence of update operations to a partition.
274
275 This assumes an in-place update semantics, namely all reads are performed
276 first, then the data is processed and written back to the same file.
277
278 Args:
279 operations: the sequence of operations
280 base_name: the name of the operation sequence
281 part_file: the partition file object, open for reading/writing
282 part_size: the partition size
283 Raises:
284 PayloadError if anything goes wrong while processing the payload.
285
286 """
287 for op, op_name in common.OperationIter(operations, base_name):
288 # Read data blob.
289 data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
290
291 if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
292 self._ApplyReplaceOperation(op, op_name, data, part_file, part_size)
293 elif op.type == common.OpType.MOVE:
294 self._ApplyMoveOperation(op, op_name, part_file)
295 elif op.type == common.OpType.BSDIFF:
296 self._ApplyBsdiffOperation(op, op_name, data, part_file)
297 else:
298 raise PayloadError('%s: unknown operation type (%d)' %
299 (op_name, op.type))
300
301 def _ApplyToPartition(self, operations, part_name, base_name,
302 dst_part_file_name, dst_part_info,
303 src_part_file_name=None, src_part_info=None):
304 """Applies an update to a partition.
305
306 Args:
307 operations: the sequence of update operations to apply
308 part_name: the name of the partition, for error reporting
309 base_name: the name of the operation sequence
310 dst_part_file_name: file name to write partition data to
311 dst_part_info: size and expected hash of dest partition
312 src_part_file_name: file name of source partition (optional)
313 src_part_info: size and expected hash of source partition (optional)
314 Raises:
315 PayloadError if anything goes wrong with the update.
316
317 """
318 # Do we have a source partition?
319 if src_part_file_name:
320 # Verify the source partition.
321 with open(src_part_file_name, 'rb') as src_part_file:
322 _VerifySha256(src_part_file, src_part_info.hash, part_name)
323
324 # Copy the src partition to the dst one.
325 shutil.copyfile(src_part_file_name, dst_part_file_name)
326 else:
327 # Preallocate the dst partition file.
328 subprocess.check_call(
329 ['fallocate', '-l', str(dst_part_info.size), dst_part_file_name])
330
331 # Apply operations.
332 with open(dst_part_file_name, 'r+b') as dst_part_file:
333 self._ApplyOperations(operations, base_name, dst_part_file,
334 dst_part_info.size)
335
336 # Verify the resulting partition.
337 with open(dst_part_file_name, 'rb') as dst_part_file:
338 _VerifySha256(dst_part_file, dst_part_info.hash, part_name)
339
340 def Run(self, dst_kernel_part, dst_rootfs_part, src_kernel_part=None,
341 src_rootfs_part=None):
342 """Applier entry point, invoking all update operations.
343
344 Args:
345 dst_kernel_part: name of dest kernel partition file
346 dst_rootfs_part: name of dest rootfs partition file
347 src_kernel_part: name of source kernel partition file (optional)
348 src_rootfs_part: name of source rootfs partition file (optional)
349 Raises:
350 PayloadError if payload application failed.
351
352 """
353 self.payload.ResetFile()
354
355 # Make sure the arguments are sane and match the payload.
356 if not (dst_kernel_part and dst_rootfs_part):
357 raise PayloadError('missing dst {kernel,rootfs} partitions')
358
359 if not (src_kernel_part or src_rootfs_part):
360 if not self.payload.IsFull():
361 raise PayloadError('trying to apply a non-full update without src '
362 '{kernel,rootfs} partitions')
363 elif src_kernel_part and src_rootfs_part:
364 if not self.payload.IsDelta():
365 raise PayloadError('trying to apply a non-delta update onto src '
366 '{kernel,rootfs} partitions')
367 else:
368 raise PayloadError('not all src partitions provided')
369
370 # Apply update to rootfs.
371 self._ApplyToPartition(
372 self.payload.manifest.install_operations, 'rootfs',
373 'install_operations', dst_rootfs_part,
374 self.payload.manifest.new_rootfs_info, src_rootfs_part,
375 self.payload.manifest.old_rootfs_info)
376
377 # Apply update to kernel update.
378 self._ApplyToPartition(
379 self.payload.manifest.kernel_install_operations, 'kernel',
380 'kernel_install_operations', dst_kernel_part,
381 self.payload.manifest.new_kernel_info, src_kernel_part,
382 self.payload.manifest.old_kernel_info)