blob: b2920a0cefce6c62ea4bb39bb3331d646afdfcef [file] [log] [blame]
Gilad Arnold553b0ec2013-01-26 01:00:39 -08001# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Verifying the integrity of a Chrome OS update payload.
6
7This module is used internally by the main Payload class for verifying the
8integrity of an update payload. The interface for invoking the checks is as
9follows:
10
11 checker = PayloadChecker(payload)
12 checker.Run(...)
13
14"""
15
16import array
17import base64
18import hashlib
19import subprocess
20
21import common
22from error import PayloadError
23import format_utils
24import histogram
25import update_metadata_pb2
26
27
28#
29# Constants / helper functions.
30#
31_SIG_ASN1_HEADER = (
32 '\x30\x31\x30\x0d\x06\x09\x60\x86'
33 '\x48\x01\x65\x03\x04\x02\x01\x05'
34 '\x00\x04\x20'
35)
36
37_TYPE_FULL = 'full'
38_TYPE_DELTA = 'delta'
39
40_DEFAULT_BLOCK_SIZE = 4096
41
42
43#
44# Helper functions.
45#
46def _IsPowerOfTwo(val):
47 """Returns True iff val is a power of two."""
48 return val > 0 and (val & (val - 1)) == 0
49
50
51def _AddFormat(format_func, value):
52 """Adds a custom formatted representation to ordinary string representation.
53
54 Args:
55 format_func: a value formatter
56 value: value to be formatted and returned
57 Returns:
58 A string 'x (y)' where x = str(value) and y = format_func(value).
59
60 """
61 return '%s (%s)' % (value, format_func(value))
62
63
64def _AddHumanReadableSize(size):
65 """Adds a human readable representation to a byte size value."""
66 return _AddFormat(format_utils.BytesToHumanReadable, size)
67
68
69#
70# Payload report generator.
71#
72class _PayloadReport(object):
73 """A payload report generator.
74
75 A report is essentially a sequence of nodes, which represent data points. It
76 is initialized to have a "global", untitled section. A node may be a
77 sub-report itself.
78
79 """
80
81 # Report nodes: field, sub-report, section.
82 class Node(object):
83 """A report node interface."""
84
85 @staticmethod
86 def _Indent(indent, line):
87 """Indents a line by a given indentation amount.
88
89 Args:
90 indent: the indentation amount
91 line: the line content (string)
92 Returns:
93 The properly indented line (string).
94
95 """
96 return '%*s%s' % (indent, '', line)
97
98 def GenerateLines(self, base_indent, sub_indent, curr_section):
99 """Generates the report lines for this node.
100
101 Args:
102 base_indent: base indentation for each line
103 sub_indent: additional indentation for sub-nodes
104 curr_section: the current report section object
105 Returns:
106 A pair consisting of a list of properly indented report lines and a new
107 current section object.
108
109 """
110 raise NotImplementedError()
111
112 class FieldNode(Node):
113 """A field report node, representing a (name, value) pair."""
114
115 def __init__(self, name, value, linebreak, indent):
116 super(_PayloadReport.FieldNode, self).__init__()
117 self.name = name
118 self.value = value
119 self.linebreak = linebreak
120 self.indent = indent
121
122 def GenerateLines(self, base_indent, sub_indent, curr_section):
123 """Generates a properly formatted 'name : value' entry."""
124 report_output = ''
125 if self.name:
126 report_output += self.name.ljust(curr_section.max_field_name_len) + ' :'
127 value_lines = str(self.value).splitlines()
128 if self.linebreak and self.name:
129 report_output += '\n' + '\n'.join(
130 ['%*s%s' % (self.indent, '', line) for line in value_lines])
131 else:
132 if self.name:
133 report_output += ' '
134 report_output += '%*s' % (self.indent, '')
135 cont_line_indent = len(report_output)
136 indented_value_lines = [value_lines[0]]
137 indented_value_lines.extend(['%*s%s' % (cont_line_indent, '', line)
138 for line in value_lines[1:]])
139 report_output += '\n'.join(indented_value_lines)
140
141 report_lines = [self._Indent(base_indent, line + '\n')
142 for line in report_output.split('\n')]
143 return report_lines, curr_section
144
145 class SubReportNode(Node):
146 """A sub-report node, representing a nested report."""
147
148 def __init__(self, title, report):
149 super(_PayloadReport.SubReportNode, self).__init__()
150 self.title = title
151 self.report = report
152
153 def GenerateLines(self, base_indent, sub_indent, curr_section):
154 """Recurse with indentation."""
155 report_lines = [self._Indent(base_indent, self.title + ' =>\n')]
156 report_lines.extend(self.report.GenerateLines(base_indent + sub_indent,
157 sub_indent))
158 return report_lines, curr_section
159
160 class SectionNode(Node):
161 """A section header node."""
162
163 def __init__(self, title=None):
164 super(_PayloadReport.SectionNode, self).__init__()
165 self.title = title
166 self.max_field_name_len = 0
167
168 def GenerateLines(self, base_indent, sub_indent, curr_section):
169 """Dump a title line, return self as the (new) current section."""
170 report_lines = []
171 if self.title:
172 report_lines.append(self._Indent(base_indent,
173 '=== %s ===\n' % self.title))
174 return report_lines, self
175
176 def __init__(self):
177 self.report = []
178 self.last_section = self.global_section = self.SectionNode()
179 self.is_finalized = False
180
181 def GenerateLines(self, base_indent, sub_indent):
182 """Generates the lines in the report, properly indented.
183
184 Args:
185 base_indent: the indentation used for root-level report lines
186 sub_indent: the indentation offset used for sub-reports
187 Returns:
188 A list of indented report lines.
189
190 """
191 report_lines = []
192 curr_section = self.global_section
193 for node in self.report:
194 node_report_lines, curr_section = node.GenerateLines(
195 base_indent, sub_indent, curr_section)
196 report_lines.extend(node_report_lines)
197
198 return report_lines
199
200 def Dump(self, out_file, base_indent=0, sub_indent=2):
201 """Dumps the report to a file.
202
203 Args:
204 out_file: file object to output the content to
205 base_indent: base indentation for report lines
206 sub_indent: added indentation for sub-reports
207
208 """
209
210 report_lines = self.GenerateLines(base_indent, sub_indent)
211 if report_lines and not self.is_finalized:
212 report_lines.append('(incomplete report)\n')
213
214 for line in report_lines:
215 out_file.write(line)
216
217 def AddField(self, name, value, linebreak=False, indent=0):
218 """Adds a field/value pair to the payload report.
219
220 Args:
221 name: the field's name
222 value: the field's value
223 linebreak: whether the value should be printed on a new line
224 indent: amount of extra indent for each line of the value
225
226 """
227 assert not self.is_finalized
228 if name and self.last_section.max_field_name_len < len(name):
229 self.last_section.max_field_name_len = len(name)
230 self.report.append(self.FieldNode(name, value, linebreak, indent))
231
232 def AddSubReport(self, title):
233 """Adds and returns a sub-report with a title."""
234 assert not self.is_finalized
235 sub_report = self.SubReportNode(title, type(self)())
236 self.report.append(sub_report)
237 return sub_report.report
238
239 def AddSection(self, title):
240 """Adds a new section title."""
241 assert not self.is_finalized
242 self.last_section = self.SectionNode(title)
243 self.report.append(self.last_section)
244
245 def Finalize(self):
246 """Seals the report, marking it as complete."""
247 self.is_finalized = True
248
249
250#
251# Payload verification.
252#
253class PayloadChecker(object):
254 """Checking the integrity of an update payload.
255
256 This is a short-lived object whose purpose is to isolate the logic used for
257 verifying the integrity of an update payload.
258
259 """
260
261 def __init__(self, payload):
262 assert payload.is_init, 'uninitialized update payload'
263 self.payload = payload
264
265 # Reset state; these will be assigned when the manifest is checked.
266 self.block_size = _DEFAULT_BLOCK_SIZE
267 self.sigs_offset = 0
268 self.sigs_size = 0
269 self.old_rootfs_size = 0
270 self.old_kernel_size = 0
271 self.new_rootfs_size = 0
272 self.new_kernel_size = 0
273 self.payload_type = None
274
275 @staticmethod
276 def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
277 msg_name=None, linebreak=False, indent=0):
278 """Adds an element from a protobuf message to the payload report.
279
280 Checks to see whether a message contains a given element, and if so adds
281 the element value to the provided report. A missing mandatory element
282 causes an exception to be raised.
283
284 Args:
285 msg: the message containing the element
286 name: the name of the element
287 report: a report object to add the element name/value to
288 is_mandatory: whether or not this element must be present
289 is_submsg: whether this element is itself a message
290 convert: a function for converting the element value for reporting
291 msg_name: the name of the message object (for error reporting)
292 linebreak: whether the value report should induce a line break
293 indent: amount of indent used for reporting the value
294 Returns:
295 A pair consisting of the element value and the generated sub-report for
296 it (if the element is a sub-message, None otherwise). If the element is
297 missing, returns (None, None).
298 Raises:
299 PayloadError if a mandatory element is missing.
300
301 """
302 if not msg.HasField(name):
303 if is_mandatory:
304 raise PayloadError("%smissing mandatory %s '%s'" %
305 (msg_name + ' ' if msg_name else '',
306 'sub-message' if is_submsg else 'field',
307 name))
308 return (None, None)
309
310 value = getattr(msg, name)
311 if is_submsg:
312 return (value, report and report.AddSubReport(name))
313 else:
314 if report:
315 report.AddField(name, convert(value), linebreak=linebreak,
316 indent=indent)
317 return (value, None)
318
319 @staticmethod
320 def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
321 linebreak=False, indent=0):
322 """Adds a mandatory field; returning first component from _CheckElem."""
323 return PayloadChecker._CheckElem(msg, field_name, report, True, False,
324 convert=convert, msg_name=msg_name,
325 linebreak=linebreak, indent=indent)[0]
326
327 @staticmethod
328 def _CheckOptionalField(msg, field_name, report, convert=str,
329 linebreak=False, indent=0):
330 """Adds an optional field; returning first component from _CheckElem."""
331 return PayloadChecker._CheckElem(msg, field_name, report, False, False,
332 convert=convert, linebreak=linebreak,
333 indent=indent)[0]
334
335 @staticmethod
336 def _CheckMandatorySubMsg(msg, submsg_name, report, msg_name):
337 """Adds a mandatory sub-message; wrapper for _CheckElem."""
338 return PayloadChecker._CheckElem(msg, submsg_name, report, True, True,
339 msg_name)
340
341 @staticmethod
342 def _CheckOptionalSubMsg(msg, submsg_name, report):
343 """Adds an optional sub-message; wrapper for _CheckElem."""
344 return PayloadChecker._CheckElem(msg, submsg_name, report, False, True)
345
346 @staticmethod
347 def _CheckPresentIff(val1, val2, name1, name2, obj_name):
348 """Checks that val1 is None iff val2 is None.
349
350 Args:
351 val1: first value to be compared
352 val2: second value to be compared
353 name1: name of object holding the first value
354 name2: name of object holding the second value
355 obj_name: name of the object containing these values
356 Raises:
357 PayloadError if assertion does not hold.
358
359 """
360 if None in (val1, val2) and val1 is not val2:
361 present, missing = (name1, name2) if val2 is None else (name2, name1)
362 raise PayloadError("'%s' present without '%s'%s" %
363 (present, missing,
364 ' in ' + obj_name if obj_name else ''))
365
366 @staticmethod
367 def _Run(cmd, send_data=None):
368 """Runs a subprocess, returns its output.
369
370 Args:
371 cmd: list of command-line argument for invoking the subprocess
372 send_data: data to feed to the process via its stdin
373 Returns:
374 A tuple containing the stdout and stderr output of the process.
375
376 """
377 run_process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
378 stdout=subprocess.PIPE)
379 return run_process.communicate(input=send_data)
380
381 @staticmethod
382 def _CheckSha256Signature(sig_data, pubkey_file_name, actual_hash, sig_name):
383 """Verifies an actual hash against a signed one.
384
385 Args:
386 sig_data: the raw signature data
387 pubkey_file_name: public key used for verifying signature
388 actual_hash: the actual hash digest
389 sig_name: signature name for error reporting
390 Raises:
391 PayloadError if signature could not be verified.
392
393 """
394 if len(sig_data) != 256:
395 raise PayloadError('%s: signature size (%d) not as expected (256)' %
396 (sig_name, len(sig_data)))
397 signed_data, _ = PayloadChecker._Run(
398 ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', pubkey_file_name],
399 send_data=sig_data)
400
401 if len(signed_data) != len(_SIG_ASN1_HEADER) + 32:
402 raise PayloadError('%s: unexpected signed data length (%d)' %
403 (sig_name, len(signed_data)))
404
405 if not signed_data.startswith(_SIG_ASN1_HEADER):
406 raise PayloadError('%s: not containing standard ASN.1 prefix' % sig_name)
407
408 signed_hash = signed_data[len(_SIG_ASN1_HEADER):]
409 if signed_hash != actual_hash:
410 raise PayloadError('%s: signed hash (%s) different from actual (%s)' %
411 (sig_name, signed_hash.encode('hex'),
412 actual_hash.encode('hex')))
413
414 @staticmethod
415 def _CheckBlocksFitLength(length, num_blocks, block_size, length_name,
416 block_name=None):
417 """Checks that a given length fits given block space.
418
419 This ensures that the number of blocks allocated is appropriate for the
420 length of the data residing in these blocks.
421
422 Args:
423 length: the actual length of the data
424 num_blocks: the number of blocks allocated for it
425 block_size: the size of each block in bytes
426 length_name: name of length (used for error reporting)
427 block_name: name of block (used for error reporting)
428 Raises:
429 PayloadError if the aforementioned invariant is not satisfied.
430
431 """
432 # Check: length <= num_blocks * block_size.
433 if not length <= num_blocks * block_size:
434 raise PayloadError(
435 '%s (%d) > num %sblocks (%d) * block_size (%d)' %
436 (length_name, length, block_name or '', num_blocks, block_size))
437
438 # Check: length > (num_blocks - 1) * block_size.
439 if not length > (num_blocks - 1) * block_size:
440 raise PayloadError(
441 '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d)' %
442 (length_name, length, block_name or '', num_blocks - 1, block_size))
443
444 def _CheckManifest(self, report):
445 """Checks the payload manifest.
446
447 Args:
448 report: a report object to add to
449 Returns:
450 A tuple consisting of the partition block size used during the update
451 (integer), the signatures block offset and size.
452 Raises:
453 PayloadError if any of the checks fail.
454
455 """
456 manifest = self.payload.manifest
457 report.AddSection('manifest')
458
459 # Check: block_size must exist and match the expected value.
460 actual_block_size = self._CheckMandatoryField(manifest, 'block_size',
461 report, 'manifest')
462 if actual_block_size != self.block_size:
463 raise PayloadError('block_size (%d) not as expected (%d)' %
464 (actual_block_size, self.block_size))
465
466 # Check: signatures_offset <==> signatures_size.
467 self.sigs_offset = self._CheckOptionalField(manifest, 'signatures_offset',
468 report)
469 self.sigs_size = self._CheckOptionalField(manifest, 'signatures_size',
470 report)
471 self._CheckPresentIff(self.sigs_offset, self.sigs_size,
472 'signatures_offset', 'signatures_size', 'manifest')
473
474 # Check: old_kernel_info <==> old_rootfs_info.
475 oki_msg, oki_report = self._CheckOptionalSubMsg(manifest,
476 'old_kernel_info', report)
477 ori_msg, ori_report = self._CheckOptionalSubMsg(manifest,
478 'old_rootfs_info', report)
479 self._CheckPresentIff(oki_msg, ori_msg, 'old_kernel_info',
480 'old_rootfs_info', 'manifest')
481 if oki_msg: # equivalently, ori_msg
482 # Assert/mark delta payload.
483 if self.payload_type == _TYPE_FULL:
484 raise PayloadError(
485 'apparent full payload contains old_{kernel,rootfs}_info')
486 self.payload_type = _TYPE_DELTA
487
488 # Check: {size, hash} present in old_{kernel,rootfs}_info.
489 self.old_kernel_size = self._CheckMandatoryField(
490 oki_msg, 'size', oki_report, 'old_kernel_info')
491 self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info',
492 convert=common.FormatSha256)
493 self.old_rootfs_size = self._CheckMandatoryField(
494 ori_msg, 'size', ori_report, 'old_rootfs_info')
495 self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info',
496 convert=common.FormatSha256)
497 else:
498 # Assert/mark full payload.
499 if self.payload_type == _TYPE_DELTA:
500 raise PayloadError(
501 'apparent delta payload missing old_{kernel,rootfs}_info')
502 self.payload_type = _TYPE_FULL
503
504 # Check: new_kernel_info present; contains {size, hash}.
505 nki_msg, nki_report = self._CheckMandatorySubMsg(
506 manifest, 'new_kernel_info', report, 'manifest')
507 self.new_kernel_size = self._CheckMandatoryField(
508 nki_msg, 'size', nki_report, 'new_kernel_info')
509 self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info',
510 convert=common.FormatSha256)
511
512 # Check: new_rootfs_info present; contains {size, hash}.
513 nri_msg, nri_report = self._CheckMandatorySubMsg(
514 manifest, 'new_rootfs_info', report, 'manifest')
515 self.new_rootfs_size = self._CheckMandatoryField(
516 nri_msg, 'size', nri_report, 'new_rootfs_info')
517 self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info',
518 convert=common.FormatSha256)
519
520 # Check: payload must contain at least one operation.
521 if not(len(manifest.install_operations) or
522 len(manifest.kernel_install_operations)):
523 raise PayloadError('payload contains no operations')
524
525 def _CheckLength(self, length, total_blocks, op_name, length_name):
526 """Checks whether a length matches the space designated in extents.
527
528 Args:
529 length: the total length of the data
530 total_blocks: the total number of blocks in extents
531 op_name: operation name (for error reporting)
532 length_name: length name (for error reporting)
533 Raises:
534 PayloadError is there a problem with the length.
535
536 """
537 # Check: length is non-zero.
538 if length == 0:
539 raise PayloadError('%s: %s is zero' % (op_name, length_name))
540
541 # Check that length matches number of blocks.
542 self._CheckBlocksFitLength(length, total_blocks, self.block_size,
543 '%s: %s' % (op_name, length_name))
544
545 def _CheckExtents(self, extents, part_size, block_counters, name,
546 allow_pseudo=False, allow_signature=False):
547 """Checks a sequence of extents.
548
549 Args:
550 extents: the sequence of extents to check
551 part_size: the total size of the partition to which the extents apply
552 block_counters: an array of counters corresponding to the number of blocks
553 name: the name of the extent block
554 allow_pseudo: whether or not pseudo block numbers are allowed
555 allow_signature: whether or not the extents are used for a signature
556 Returns:
557 The total number of blocks in the extents.
558 Raises:
559 PayloadError if any of the entailed checks fails.
560
561 """
562 total_num_blocks = 0
563 num_extents = 0
564 for ex, ex_name in common.ExtentIter(extents, name):
565 num_extents += 1
566
567 # Check: mandatory fields.
568 start_block = PayloadChecker._CheckMandatoryField(ex, 'start_block',
569 None, ex_name)
570 num_blocks = PayloadChecker._CheckMandatoryField(ex, 'num_blocks', None,
571 ex_name)
572 end_block = start_block + num_blocks
573
574 # Check: num_blocks > 0.
575 if num_blocks == 0:
576 raise PayloadError('%s: extent length is zero' % ex_name)
577
578 if start_block != common.PSEUDO_EXTENT_MARKER:
579 # Check: make sure we're within the partition limit.
580 if part_size and (end_block - 1) * self.block_size > part_size:
581 raise PayloadError(
582 '%s: extent (%s) exceeds partition size (%d)' %
583 (ex_name, common.FormatExtent(ex, self.block_size), part_size))
584
585 # Record block usage.
586 for i in range(start_block, end_block):
587 block_counters[i] += 1
588 elif not (allow_pseudo or
589 (allow_signature and
590 (num_extents == len(extents) and num_blocks == 1))):
591 raise PayloadError('%s: unexpected pseudo-extent' % ex_name)
592
593 total_num_blocks += num_blocks
594
595 return total_num_blocks
596
597 def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name):
598 """Specific checks for REPLACE/REPLACE_BZ operations.
599
600 Args:
601 op: the operation object from the manifest
602 data_length: the length of the data blob associated with the operation
603 total_dst_blocks: total number of blocks in dst_extents
604 op_name: operation name for error reporting
605 Raises:
606 PayloadError if any check fails.
607
608 """
609 if op.src_extents:
610 raise PayloadError('%s: contains src_extents' % op_name)
611
612 if op.type == common.OpType.REPLACE:
613 PayloadChecker._CheckBlocksFitLength(data_length, total_dst_blocks,
614 self.block_size,
615 op_name + '.data_length', 'dst')
616 else:
617 # Check: data_length must be smaller than the alotted dst blocks.
618 if data_length >= total_dst_blocks * self.block_size:
619 raise PayloadError(
620 '%s: data_length (%d) must be less than allotted dst block '
621 'space (%d * %d)' %
622 (op_name, data_length, total_dst_blocks, self.block_size))
623
624 def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
625 total_dst_blocks, op_name):
626 """Specific checks for MOVE operations.
627
628 Args:
629 op: the operation object from the manifest
630 data_offset: the offset of a data blob for the operation
631 total_src_blocks: total number of blocks in src_extents
632 total_dst_blocks: total number of blocks in dst_extents
633 op_name: operation name for error reporting
634 Raises:
635 PayloadError if any check fails.
636
637 """
638 # Check: no data_{offset,length}.
639 if data_offset is not None:
640 raise PayloadError('%s: contains data_{offset,length}' % op_name)
641
642 # Check: total src blocks == total dst blocks.
643 if total_src_blocks != total_dst_blocks:
644 raise PayloadError(
645 '%s: total src blocks (%d) != total dst blocks (%d)' %
646 (op_name, total_src_blocks, total_dst_blocks))
647
648 # Check: for all i, i-th src block index != i-th dst block index.
649 i = 0
650 src_extent_iter = iter(op.src_extents)
651 dst_extent_iter = iter(op.dst_extents)
652 src_extent = dst_extent = None
653 src_idx = src_num = dst_idx = dst_num = 0
654 while i < total_src_blocks:
655 # Get the next source extent, if needed.
656 if not src_extent:
657 try:
658 src_extent = src_extent_iter.next()
659 except StopIteration:
660 raise PayloadError('%s: ran out of src extents (%d/%d)' %
661 (op_name, i, total_src_blocks))
662 src_idx = src_extent.start_block
663 src_num = src_extent.num_blocks
664
665 # Get the next dest extent, if needed.
666 if not dst_extent:
667 try:
668 dst_extent = dst_extent_iter.next()
669 except StopIteration:
670 raise PayloadError('%s: ran out of dst extents (%d/%d)' %
671 (op_name, i, total_dst_blocks))
672 dst_idx = dst_extent.start_block
673 dst_num = dst_extent.num_blocks
674
675 if src_idx == dst_idx:
676 raise PayloadError('%s: src/dst blocks %d are the same (%d)' %
677 (op_name, i, src_idx))
678
679 advance = min(src_num, dst_num)
680 i += advance
681
682 src_idx += advance
683 src_num -= advance
684 if src_num == 0:
685 src_extent = None
686
687 dst_idx += advance
688 dst_num -= advance
689 if dst_num == 0:
690 dst_extent = None
691
692 def _CheckBsdiffOperation(self, data_length, total_dst_blocks, op_name):
693 """Specific checks for BSDIFF operations.
694
695 Args:
696 data_length: the length of the data blob associated with the operation
697 total_dst_blocks: total number of blocks in dst_extents
698 op_name: operation name for error reporting
699 Raises:
700 PayloadError if any check fails.
701
702 """
703 # Check: data_length is strictly smaller than the alotted dst blocks.
704 if data_length >= total_dst_blocks * self.block_size:
705 raise PayloadError(
706 '%s: data_length (%d) must be smaller than num dst blocks (%d) * '
707 'block_size (%d)' %
708 (op_name, data_length, total_dst_blocks, self.block_size))
709
710 def _CheckOperation(self, op, op_name, is_last, old_block_counters,
711 new_block_counters, old_part_size, new_part_size,
712 prev_data_offset, allow_signature, allow_unhashed,
713 blob_hash_counts):
714 """Checks a single update operation.
715
716 Args:
717 op: the operation object
718 op_name: operation name string for error reporting
719 is_last: whether this is the last operation in the sequence
720 old_block_counters: arrays of block read counters
721 new_block_counters: arrays of block write counters
722 old_part_size: the source partition size in bytes
723 new_part_size: the target partition size in bytes
724 prev_data_offset: offset of last used data bytes
725 allow_signature: whether this may be a signature operation
726 allow_unhashed: allow operations with unhashed data blobs
727 blob_hash_counts: counters for hashed/unhashed blobs
728 Returns:
729 The amount of data blob associated with the operation.
730 Raises:
731 PayloadError if any check has failed.
732
733 """
734 # Check extents.
735 total_src_blocks = self._CheckExtents(
736 op.src_extents, old_part_size, old_block_counters,
737 op_name + '.src_extents', allow_pseudo=True)
738 allow_signature_in_extents = (allow_signature and is_last and
739 op.type == common.OpType.REPLACE)
740 total_dst_blocks = self._CheckExtents(
741 op.dst_extents, new_part_size, new_block_counters,
742 op_name + '.dst_extents', allow_signature=allow_signature_in_extents)
743
744 # Check: data_offset present <==> data_length present.
745 data_offset = self._CheckOptionalField(op, 'data_offset', None)
746 data_length = self._CheckOptionalField(op, 'data_length', None)
747 self._CheckPresentIff(data_offset, data_length, 'data_offset',
748 'data_length', op_name)
749
750 # Check: at least one dst_extent.
751 if not op.dst_extents:
752 raise PayloadError('%s: dst_extents is empty' % op_name)
753
754 # Check {src,dst}_length, if present.
755 if op.HasField('src_length'):
756 self._CheckLength(op.src_length, total_src_blocks, op_name, 'src_length')
757 if op.HasField('dst_length'):
758 self._CheckLength(op.dst_length, total_dst_blocks, op_name, 'dst_length')
759
760 if op.HasField('data_sha256_hash'):
761 blob_hash_counts['hashed'] += 1
762
763 # Check: operation carries data.
764 if data_offset is None:
765 raise PayloadError(
766 '%s: data_sha256_hash present but no data_{offset,length}' %
767 op_name)
768
769 # Check: hash verifies correctly.
770 # pylint: disable=E1101
771 actual_hash = hashlib.sha256(self.payload.ReadDataBlob(data_offset,
772 data_length))
773 if op.data_sha256_hash != actual_hash.digest():
774 raise PayloadError(
775 '%s: data_sha256_hash (%s) does not match actual hash (%s)' %
776 (op_name, op.data_sha256_hash.encode('hex'),
777 actual_hash.hexdigest()))
778 elif data_offset is not None:
779 if allow_signature_in_extents:
780 blob_hash_counts['signature'] += 1
781 elif allow_unhashed:
782 blob_hash_counts['unhashed'] += 1
783 else:
784 raise PayloadError('%s: unhashed operation not allowed' % op_name)
785
786 if data_offset is not None:
787 # Check: contiguous use of data section.
788 if data_offset != prev_data_offset:
789 raise PayloadError(
790 '%s: data offset (%d) not matching amount used so far (%d)' %
791 (op_name, data_offset, prev_data_offset))
792
793 # Type-specific checks.
794 if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
795 self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
796 elif self.payload_type == _TYPE_FULL:
797 raise PayloadError('%s: non-REPLACE operation in a full payload' %
798 op_name)
799 elif op.type == common.OpType.MOVE:
800 self._CheckMoveOperation(op, data_offset, total_src_blocks,
801 total_dst_blocks, op_name)
802 elif op.type == common.OpType.BSDIFF:
803 self._CheckBsdiffOperation(data_length, total_dst_blocks, op_name)
804 else:
805 assert False, 'cannot get here'
806
807 return data_length if data_length is not None else 0
808
809 def _AllocBlockCounterss(self, part_size):
810 """Returns a freshly initialized array of block counters.
811
812 Args:
813 part_size: the size of the partition
814 Returns:
815 An array of unsigned char elements initialized to zero, one for each of
816 the blocks necessary for containing the partition.
817
818 """
819 num_blocks = (part_size + self.block_size - 1) / self.block_size
820 return array.array('B', [0] * num_blocks)
821
822 def _CheckOperations(self, operations, report, base_name, old_part_size,
823 new_part_size, prev_data_offset, allow_unhashed,
824 allow_signature):
825 """Checks a sequence of update operations.
826
827 Args:
828 operations: the sequence of operations to check
829 report: the report object to add to
830 base_name: the name of the operation block
831 old_part_size: the old partition size in bytes
832 new_part_size: the new partition size in bytes
833 prev_data_offset: offset of last used data bytes
834 allow_unhashed: allow operations with unhashed data blobs
835 allow_signature: whether this sequence may contain signature operations
836 Returns:
837 A pair consisting of the number of operations and the total data blob
838 size used.
839 Raises:
840 PayloadError if any of the checks fails.
841
842 """
843 # The total size of data blobs used by operations scanned thus far.
844 total_data_used = 0
845 # Counts of specific operation types.
846 op_counts = {
847 common.OpType.REPLACE: 0,
848 common.OpType.REPLACE_BZ: 0,
849 common.OpType.MOVE: 0,
850 common.OpType.BSDIFF: 0,
851 }
852 # Total blob sizes for each operation type.
853 op_blob_totals = {
854 common.OpType.REPLACE: 0,
855 common.OpType.REPLACE_BZ: 0,
856 # MOVE operations don't have blobs
857 common.OpType.BSDIFF: 0,
858 }
859 # Counts of hashed vs unhashed operations.
860 blob_hash_counts = {
861 'hashed': 0,
862 'unhashed': 0,
863 }
864 if allow_signature:
865 blob_hash_counts['signature'] = 0
866
867 # Allocate old and new block counters.
868 old_block_counters = (self._AllocBlockCounterss(old_part_size)
869 if old_part_size else None)
870 new_block_counters = self._AllocBlockCounterss(new_part_size)
871
872 # Process and verify each operation.
873 op_num = 0
874 for op, op_name in common.OperationIter(operations, base_name):
875 op_num += 1
876
877 # Check: type is valid.
878 if op.type not in op_counts.keys():
879 raise PayloadError('%s: invalid type (%d)' % (op_name, op.type))
880 op_counts[op.type] += 1
881
882 is_last = op_num == len(operations)
883 curr_data_used = self._CheckOperation(
884 op, op_name, is_last, old_block_counters, new_block_counters,
885 old_part_size, new_part_size, prev_data_offset + total_data_used,
886 allow_signature, allow_unhashed, blob_hash_counts)
887 if curr_data_used:
888 op_blob_totals[op.type] += curr_data_used
889 total_data_used += curr_data_used
890
891 # Report totals and breakdown statistics.
892 report.AddField('total operations', op_num)
893 report.AddField(
894 None,
895 histogram.Histogram.FromCountDict(op_counts,
896 key_names=common.OpType.NAMES),
897 indent=1)
898 report.AddField('total blobs', sum(blob_hash_counts.values()))
899 report.AddField(None,
900 histogram.Histogram.FromCountDict(blob_hash_counts),
901 indent=1)
902 report.AddField('total blob size', _AddHumanReadableSize(total_data_used))
903 report.AddField(
904 None,
905 histogram.Histogram.FromCountDict(op_blob_totals,
906 formatter=_AddHumanReadableSize,
907 key_names=common.OpType.NAMES),
908 indent=1)
909
910 # Report read/write histograms.
911 if old_block_counters:
912 report.AddField('block read hist',
913 histogram.Histogram.FromKeyList(old_block_counters),
914 linebreak=True, indent=1)
915
916 new_write_hist = histogram.Histogram.FromKeyList(new_block_counters)
917 # Check: full update must write each dst block once.
918 if self.payload_type == _TYPE_FULL and new_write_hist.GetKeys() != [1]:
919 raise PayloadError(
920 '%s: not all blocks written exactly once during full update' %
921 base_name)
922
923 report.AddField('block write hist', new_write_hist, linebreak=True,
924 indent=1)
925
926 return total_data_used
927
928 def _CheckSignatures(self, report, pubkey_file_name):
929 """Checks a payload's signature block."""
930 sigs_raw = self.payload.ReadDataBlob(self.sigs_offset, self.sigs_size)
931 sigs = update_metadata_pb2.Signatures()
932 sigs.ParseFromString(sigs_raw)
933 report.AddSection('signatures')
934
935 # Check: at least one signature present.
936 # pylint: disable=E1101
937 if not sigs.signatures:
938 raise PayloadError('signature block is empty')
939
940 # Check: signatures_{offset,size} must match the last (fake) operation.
941 last_ops_section = (self.payload.manifest.kernel_install_operations or
942 self.payload.manifest.install_operations)
943 fake_sig_op = last_ops_section[-1]
944 if not (self.sigs_offset == fake_sig_op.data_offset and
945 self.sigs_size == fake_sig_op.data_length):
946 raise PayloadError(
947 'signatures_{offset,size} (%d+%d) does not match last operation '
948 '(%d+%d)' %
949 (self.sigs_offset, self.sigs_size, fake_sig_op.data_offset,
950 fake_sig_op.data_length))
951
952 # Compute the checksum of all data up to signature blob.
953 # TODO(garnold) we're re-reading the whole data section into a string
954 # just to compute the checksum; instead, we could do it incrementally as
955 # we read the blobs one-by-one, under the assumption that we're reading
956 # them in order (which currently holds). This should be reconsidered.
957 payload_hasher = self.payload.manifest_hasher.copy()
958 common.Read(self.payload.payload_file, self.sigs_offset,
959 offset=self.payload.data_offset, hasher=payload_hasher)
960
961 for sig, sig_name in common.SignatureIter(sigs.signatures, 'signatures'):
962 sig_report = report.AddSubReport(sig_name)
963
964 # Check: signature contains mandatory fields.
965 self._CheckMandatoryField(sig, 'version', sig_report, sig_name)
966 self._CheckMandatoryField(sig, 'data', None, sig_name)
967 sig_report.AddField('data len', len(sig.data))
968
969 # Check: signatures pertains to actual payload hash.
970 if sig.version == 1:
971 self._CheckSha256Signature(sig.data, pubkey_file_name,
972 payload_hasher.digest(), sig_name)
973 else:
974 raise PayloadError('unknown signature version (%d)' % sig.version)
975
976 def Run(self, pubkey_file_name=None, metadata_sig_file=None,
977 report_out_file=None, assert_type=None, block_size=0,
978 allow_unhashed=False):
979 """Checker entry point, invoking all checks.
980
981 Args:
982 pubkey_file_name: public key used for signature verification
983 metadata_sig_file: metadata signature, if verification is desired
984 report_out_file: file object to dump the report to
985 assert_type: assert that payload is either 'full' or 'delta' (optional)
986 block_size: expected filesystem / payload block size
987 allow_unhashed: allow operations with unhashed data blobs
988 Raises:
989 PayloadError if payload verification failed.
990
991 """
992 report = _PayloadReport()
993
994 if assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
995 raise PayloadError("invalid assert_type value (`%s')" % assert_type)
996 self.payload_type = assert_type
997
998 if block_size:
999 self.block_size = block_size
1000 if not _IsPowerOfTwo(self.block_size):
1001 raise PayloadError('expected block (%d) size is not a power of two' %
1002 self.block_size)
1003
1004 # Get payload file size.
1005 self.payload.payload_file.seek(0, 2)
1006 payload_file_size = self.payload.payload_file.tell()
1007 self.payload.ResetFile()
1008
1009 try:
1010 # Check metadata signature (if provided).
1011 if metadata_sig_file:
1012 if not pubkey_file_name:
1013 raise PayloadError(
1014 'no public key provided, cannot verify metadata signature')
1015 metadata_sig = base64.b64decode(metadata_sig_file.read())
1016 self._CheckSha256Signature(metadata_sig, pubkey_file_name,
1017 self.payload.manifest_hasher.digest(),
1018 'metadata signature')
1019
1020 # Part 1: check the file header.
1021 report.AddSection('header')
1022 # Check: payload version is valid.
1023 if self.payload.header.version != 1:
1024 raise PayloadError('unknown payload version (%d)' %
1025 self.payload.header.version)
1026 report.AddField('version', self.payload.header.version)
1027 report.AddField('manifest len', self.payload.header.manifest_len)
1028
1029 # Part 2: check the manifest.
1030 self._CheckManifest(report)
1031 assert self.payload_type, 'payload type should be known by now'
1032
1033 # Part 3: examine rootfs operations.
1034 report.AddSection('rootfs operations')
1035 total_blob_size = self._CheckOperations(
1036 self.payload.manifest.install_operations, report,
1037 'install_operations', self.old_rootfs_size,
1038 self.new_rootfs_size, 0, allow_unhashed, False)
1039
1040 # Part 4: examine kernel operations.
1041 report.AddSection('kernel operations')
1042 total_blob_size += self._CheckOperations(
1043 self.payload.manifest.kernel_install_operations, report,
1044 'kernel_install_operations', self.old_kernel_size,
1045 self.new_kernel_size, total_blob_size, allow_unhashed, True)
1046
1047 # Check: operations data reach the end of the payload file.
1048 used_payload_size = self.payload.data_offset + total_blob_size
1049 if used_payload_size != payload_file_size:
1050 raise PayloadError(
1051 'used payload size (%d) different from actual file size (%d)' %
1052 (used_payload_size, payload_file_size))
1053
1054 # Part 5: handle payload signatures message.
1055 if self.sigs_size:
1056 if not pubkey_file_name:
1057 raise PayloadError(
1058 'no public key provided, cannot verify payload signature')
1059 self._CheckSignatures(report, pubkey_file_name)
1060
1061 # Part 6: summary.
1062 report.AddSection('summary')
1063 report.AddField('update type', self.payload_type)
1064
1065 report.Finalize()
1066 finally:
1067 if report_out_file:
1068 report.Dump(report_out_file)