blob: 3b7b1a67305b8ff53e15fe89df6c3d9e9403a7ac [file] [log] [blame]
Gilad Arnold553b0ec2013-01-26 01:00:39 -08001# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Applying a Chrome OS update payload.
6
7This module is used internally by the main Payload class for applying an update
8payload. The interface for invoking the applier is as follows:
9
10 applier = PayloadApplier(payload)
11 applier.Run(...)
12
13"""
14
15import array
16import bz2
17import hashlib
18import os
19import shutil
20import subprocess
21import sys
22import tempfile
23
24import common
25from error import PayloadError
26
27
28#
29# Helper functions.
30#
Gilad Arnold382df5c2013-05-03 12:49:28 -070031def _VerifySha256(file_obj, expected_hash, name, length=-1):
Gilad Arnold553b0ec2013-01-26 01:00:39 -080032 """Verifies the SHA256 hash of a file.
33
34 Args:
35 file_obj: file object to read
36 expected_hash: the hash digest we expect to be getting
37 name: name string of this hash, for error reporting
Gilad Arnold382df5c2013-05-03 12:49:28 -070038 length: precise length of data to verify (optional)
Gilad Arnold553b0ec2013-01-26 01:00:39 -080039 Raises:
Gilad Arnold382df5c2013-05-03 12:49:28 -070040 PayloadError if computed hash doesn't match expected one, or if fails to
41 read the specified length of data.
Gilad Arnold553b0ec2013-01-26 01:00:39 -080042
43 """
44 # pylint: disable=E1101
45 hasher = hashlib.sha256()
46 block_length = 1024 * 1024
Gilad Arnold382df5c2013-05-03 12:49:28 -070047 max_length = length if length >= 0 else sys.maxint
Gilad Arnold553b0ec2013-01-26 01:00:39 -080048
Gilad Arnold382df5c2013-05-03 12:49:28 -070049 while max_length > 0:
Gilad Arnold553b0ec2013-01-26 01:00:39 -080050 read_length = min(max_length, block_length)
51 data = file_obj.read(read_length)
52 if not data:
53 break
54 max_length -= len(data)
55 hasher.update(data)
56
Gilad Arnold382df5c2013-05-03 12:49:28 -070057 if length >= 0 and max_length > 0:
58 raise PayloadError(
59 'insufficient data (%d instead of %d) when verifying %s' %
60 (length - max_length, length, name))
61
Gilad Arnold553b0ec2013-01-26 01:00:39 -080062 actual_hash = hasher.digest()
63 if actual_hash != expected_hash:
64 raise PayloadError('%s hash (%s) not as expected (%s)' %
65 (name, actual_hash.encode('hex'),
66 expected_hash.encode('hex')))
67
68
69def _ReadExtents(file_obj, extents, block_size, max_length=-1):
70 """Reads data from file as defined by extent sequence.
71
72 This tries to be efficient by not copying data as it is read in chunks.
73
74 Args:
75 file_obj: file object
76 extents: sequence of block extents (offset and length)
77 block_size: size of each block
78 max_length: maximum length to read (optional)
79 Returns:
80 A character array containing the concatenated read data.
81
82 """
83 data = array.array('c')
84 for ex in extents:
85 if max_length == 0:
86 break
87 file_obj.seek(ex.start_block * block_size)
88 read_length = ex.num_blocks * block_size
89 if max_length > 0:
90 read_length = min(max_length, read_length)
91 max_length -= read_length
92 data.fromfile(file_obj, read_length)
93 return data
94
95
96def _WriteExtents(file_obj, data, extents, block_size, base_name):
97 """Write data to file as defined by extent sequence.
98
99 This tries to be efficient by not copy data as it is written in chunks.
100
101 Args:
102 file_obj: file object
103 data: data to write
104 extents: sequence of block extents (offset and length)
105 block_size: size of each block
106 base_name: name string of extent block for error reporting
107 Raises:
108 PayloadError when things don't add up.
109
110 """
111 data_offset = 0
112 data_length = len(data)
113 for ex, ex_name in common.ExtentIter(extents, base_name):
114 if data_offset == data_length:
115 raise PayloadError('%s: more write extents than data' % ex_name)
116 write_length = min(data_length - data_offset, ex.num_blocks * block_size)
117 file_obj.seek(ex.start_block * block_size)
118 data_view = buffer(data, data_offset, write_length)
119 file_obj.write(data_view)
120 data_offset += write_length
121
122 if data_offset < data_length:
123 raise PayloadError('%s: more data than write extents' % base_name)
124
125
126#
127# Payload application.
128#
129class PayloadApplier(object):
130 """Applying an update payload.
131
132 This is a short-lived object whose purpose is to isolate the logic used for
133 applying an update payload.
134
135 """
136
137 def __init__(self, payload):
138 assert payload.is_init, 'uninitialized update payload'
139 self.payload = payload
140 self.block_size = payload.manifest.block_size
141
142 def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
143 """Applies a REPLACE{,_BZ} operation.
144
145 Args:
146 op: the operation object
147 op_name: name string for error reporting
148 out_data: the data to be written
149 part_file: the partition file object
150 part_size: the size of the partition
151 Raises:
152 PayloadError if something goes wrong.
153
154 """
155 block_size = self.block_size
156 data_length = len(out_data)
157
158 # Decompress data if needed.
159 if op.type == common.OpType.REPLACE_BZ:
160 out_data = bz2.decompress(out_data)
161 data_length = len(out_data)
162
163 # Write data to blocks specified in dst extents.
164 data_start = 0
165 for ex, ex_name in common.ExtentIter(op.dst_extents,
166 '%s.dst_extents' % op_name):
167 start_block = ex.start_block
168 num_blocks = ex.num_blocks
169 count = num_blocks * block_size
170
171 # Make sure it's not a fake (signature) operation.
172 if start_block != common.PSEUDO_EXTENT_MARKER:
173 data_end = data_start + count
174
175 # Make sure we're not running past partition boundary.
176 if (start_block + num_blocks) * block_size > part_size:
177 raise PayloadError(
178 '%s: extent (%s) exceeds partition size (%d)' %
179 (ex_name, common.FormatExtent(ex, block_size),
180 part_size))
181
182 # Make sure that we have enough data to write.
183 if data_end >= data_length + block_size:
184 raise PayloadError(
185 '%s: more dst blocks than data (even with padding)')
186
187 # Pad with zeros if necessary.
188 if data_end > data_length:
189 padding = data_end - data_length
190 out_data += '\0' * padding
191
192 self.payload.payload_file.seek(start_block * block_size)
193 part_file.seek(start_block * block_size)
194 part_file.write(out_data[data_start:data_end])
195
196 data_start += count
197
198 # Make sure we wrote all data.
199 if data_start < data_length:
200 raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
201 (op_name, data_start, data_length))
202
203 def _ApplyMoveOperation(self, op, op_name, part_file):
204 """Applies a MOVE operation.
205
206 Args:
207 op: the operation object
208 op_name: name string for error reporting
209 part_file: the partition file object
210 Raises:
211 PayloadError if something goes wrong.
212
213 """
214 block_size = self.block_size
215
216 # Gather input raw data from src extents.
217 in_data = _ReadExtents(part_file, op.src_extents, block_size)
218
219 # Dump extracted data to dst extents.
220 _WriteExtents(part_file, in_data, op.dst_extents, block_size,
221 '%s.dst_extents' % op_name)
222
223 def _ApplyBsdiffOperation(self, op, op_name, patch_data, part_file):
224 """Applies a BSDIFF operation.
225
226 Args:
227 op: the operation object
228 op_name: name string for error reporting
229 patch_data: the binary patch content
230 part_file: the partition file object
231 Raises:
232 PayloadError if something goes wrong.
233
234 """
235 block_size = self.block_size
236
237 # Gather input raw data and write to a temp file.
238 in_data = _ReadExtents(part_file, op.src_extents, block_size,
239 max_length=op.src_length)
240 with tempfile.NamedTemporaryFile(delete=False) as in_file:
241 in_file_name = in_file.name
242 in_file.write(in_data)
243
244 # Dump patch data to file.
245 with tempfile.NamedTemporaryFile(delete=False) as patch_file:
246 patch_file_name = patch_file.name
247 patch_file.write(patch_data)
248
249 # Allocate tepmorary output file.
250 with tempfile.NamedTemporaryFile(delete=False) as out_file:
251 out_file_name = out_file.name
252
253 # Invoke bspatch.
254 bspatch_cmd = ['bspatch', in_file_name, out_file_name, patch_file_name]
255 subprocess.check_call(bspatch_cmd)
256
257 # Read output.
258 with open(out_file_name, 'rb') as out_file:
259 out_data = out_file.read()
260 if len(out_data) != op.dst_length:
261 raise PayloadError(
262 '%s: actual patched data length (%d) not as expected (%d)' %
263 (op_name, len(out_data), op.dst_length))
264
265 # Write output back to partition, with padding.
266 unaligned_out_len = len(out_data) % block_size
267 if unaligned_out_len:
268 out_data += '\0' * (block_size - unaligned_out_len)
269 _WriteExtents(part_file, out_data, op.dst_extents, block_size,
270 '%s.dst_extents' % op_name)
271
272 # Delete all temporary files.
273 os.remove(in_file_name)
274 os.remove(out_file_name)
275 os.remove(patch_file_name)
276
277 def _ApplyOperations(self, operations, base_name, part_file, part_size):
278 """Applies a sequence of update operations to a partition.
279
280 This assumes an in-place update semantics, namely all reads are performed
281 first, then the data is processed and written back to the same file.
282
283 Args:
284 operations: the sequence of operations
285 base_name: the name of the operation sequence
286 part_file: the partition file object, open for reading/writing
287 part_size: the partition size
288 Raises:
289 PayloadError if anything goes wrong while processing the payload.
290
291 """
292 for op, op_name in common.OperationIter(operations, base_name):
293 # Read data blob.
294 data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
295
296 if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
297 self._ApplyReplaceOperation(op, op_name, data, part_file, part_size)
298 elif op.type == common.OpType.MOVE:
299 self._ApplyMoveOperation(op, op_name, part_file)
300 elif op.type == common.OpType.BSDIFF:
301 self._ApplyBsdiffOperation(op, op_name, data, part_file)
302 else:
303 raise PayloadError('%s: unknown operation type (%d)' %
304 (op_name, op.type))
305
306 def _ApplyToPartition(self, operations, part_name, base_name,
307 dst_part_file_name, dst_part_info,
308 src_part_file_name=None, src_part_info=None):
309 """Applies an update to a partition.
310
311 Args:
312 operations: the sequence of update operations to apply
313 part_name: the name of the partition, for error reporting
314 base_name: the name of the operation sequence
315 dst_part_file_name: file name to write partition data to
316 dst_part_info: size and expected hash of dest partition
317 src_part_file_name: file name of source partition (optional)
318 src_part_info: size and expected hash of source partition (optional)
319 Raises:
320 PayloadError if anything goes wrong with the update.
321
322 """
323 # Do we have a source partition?
324 if src_part_file_name:
325 # Verify the source partition.
326 with open(src_part_file_name, 'rb') as src_part_file:
Gilad Arnold382df5c2013-05-03 12:49:28 -0700327 _VerifySha256(src_part_file, src_part_info.hash, part_name,
328 length=src_part_info.size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800329
330 # Copy the src partition to the dst one.
331 shutil.copyfile(src_part_file_name, dst_part_file_name)
332 else:
333 # Preallocate the dst partition file.
334 subprocess.check_call(
335 ['fallocate', '-l', str(dst_part_info.size), dst_part_file_name])
336
337 # Apply operations.
338 with open(dst_part_file_name, 'r+b') as dst_part_file:
339 self._ApplyOperations(operations, base_name, dst_part_file,
340 dst_part_info.size)
341
342 # Verify the resulting partition.
343 with open(dst_part_file_name, 'rb') as dst_part_file:
Gilad Arnold382df5c2013-05-03 12:49:28 -0700344 _VerifySha256(dst_part_file, dst_part_info.hash, part_name,
345 length=dst_part_info.size)
Gilad Arnold553b0ec2013-01-26 01:00:39 -0800346
347 def Run(self, dst_kernel_part, dst_rootfs_part, src_kernel_part=None,
348 src_rootfs_part=None):
349 """Applier entry point, invoking all update operations.
350
351 Args:
352 dst_kernel_part: name of dest kernel partition file
353 dst_rootfs_part: name of dest rootfs partition file
354 src_kernel_part: name of source kernel partition file (optional)
355 src_rootfs_part: name of source rootfs partition file (optional)
356 Raises:
357 PayloadError if payload application failed.
358
359 """
360 self.payload.ResetFile()
361
362 # Make sure the arguments are sane and match the payload.
363 if not (dst_kernel_part and dst_rootfs_part):
364 raise PayloadError('missing dst {kernel,rootfs} partitions')
365
366 if not (src_kernel_part or src_rootfs_part):
367 if not self.payload.IsFull():
368 raise PayloadError('trying to apply a non-full update without src '
369 '{kernel,rootfs} partitions')
370 elif src_kernel_part and src_rootfs_part:
371 if not self.payload.IsDelta():
372 raise PayloadError('trying to apply a non-delta update onto src '
373 '{kernel,rootfs} partitions')
374 else:
375 raise PayloadError('not all src partitions provided')
376
377 # Apply update to rootfs.
378 self._ApplyToPartition(
379 self.payload.manifest.install_operations, 'rootfs',
380 'install_operations', dst_rootfs_part,
381 self.payload.manifest.new_rootfs_info, src_rootfs_part,
382 self.payload.manifest.old_rootfs_info)
383
384 # Apply update to kernel update.
385 self._ApplyToPartition(
386 self.payload.manifest.kernel_install_operations, 'kernel',
387 'kernel_install_operations', dst_kernel_part,
388 self.payload.manifest.new_kernel_info, src_kernel_part,
389 self.payload.manifest.old_kernel_info)