paycheck: support for in-place BSDIFF operations
When applying BSDIFF operations in payloads, we used to extract the
block sequences from the old partition and serialize it into a temporary
file, and vice versa for the new partition. This worked but did not
allow us to test bspatch as it is actually being used by the update
engine.
This CLs allows paycheck to invoke bspatch so that it reads/writes block
extents directly from/to the partition file, in the same way it's being
used by the update engine. Since performance is the same, this is the
new default behavior; users can opt for the aforementioned old behavior
using a command-line flag (-x or --extract-bsdiff).
BUG=chromium:229705
TEST=bspatch invoked differently depending on the -x flag
TEST=Passes unit/integration tests
Change-Id: I8821754e1163b357617ece6befa42d1c2e575930
Reviewed-on: https://gerrit.chromium.org/gerrit/50486
Tested-by: Gilad Arnold <garnold@chromium.org>
Reviewed-by: Darin Petkov <petkov@chromium.org>
Commit-Queue: Gilad Arnold <garnold@chromium.org>
diff --git a/scripts/paycheck.py b/scripts/paycheck.py
index 42fade6..9689af4 100755
--- a/scripts/paycheck.py
+++ b/scripts/paycheck.py
@@ -55,7 +55,7 @@
default_key = os.path.join(lib_dir,
'update_payload/update-payload-key.pub.pem')
- check_opts = optparse.OptionGroup(parser, 'Payload integrity checking')
+ check_opts = optparse.OptionGroup(parser, 'Checking payload integrity')
check_opts.add_option('-c', '--check', action='store_true', default=False,
help=('force payload integrity check (e.g. before '
'applying)'))
@@ -87,9 +87,15 @@
default=_DEFAULT_KERNEL_PART_SIZE, type='int',
help=('override default (%default) kernel partition '
'size'))
-
parser.add_option_group(check_opts)
+ trace_opts = optparse.OptionGroup(parser, 'Applying payload')
+ trace_opts.add_option('-x', '--extract-bsdiff', action='store_true',
+ default=False,
+ help=('use temp input/output files with BSDIFF '
+ 'operations (not in-place)'))
+ parser.add_option_group(trace_opts)
+
trace_opts = optparse.OptionGroup(parser, 'Block tracing')
trace_opts.add_option('-b', '--root-block', metavar='BLOCK', type='int',
help='trace the origin for a rootfs block')
@@ -142,6 +148,8 @@
# integrity check.
if not do_block_trace:
opts.check = True
+ if opts.extract_bsdiff:
+ parser.error('--extract-bsdiff can only be used when applying payloads')
else:
parser.error('unexpected number of arguments')
@@ -203,14 +211,12 @@
# Apply payload.
if extra_args:
- if options.assert_type == _TYPE_FULL:
- payload.Apply(extra_args[0], extra_args[1])
- elif options.assert_type == _TYPE_DELTA:
- payload.Apply(extra_args[0], extra_args[1],
- old_kernel_part=extra_args[2],
- old_rootfs_part=extra_args[3])
- else:
- assert False, 'cannot get here'
+ dargs = {'bsdiff_in_place': not options.extract_bsdiff}
+ if options.assert_type == _TYPE_DELTA:
+ dargs['old_kernel_part'] = extra_args[2]
+ dargs['old_rootfs_part'] = extra_args[3]
+
+ payload.Apply(extra_args[0], extra_args[1], **dargs)
except update_payload.PayloadError, e:
sys.stderr.write('Error: %s\n' % e)
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
index 0e4c2a8..9a1b509 100644
--- a/scripts/update_payload/applier.py
+++ b/scripts/update_payload/applier.py
@@ -81,20 +81,20 @@
"""
data = array.array('c')
+ if max_length < 0:
+ max_length = sys.maxint
for ex in extents:
if max_length == 0:
break
file_obj.seek(ex.start_block * block_size)
- read_length = ex.num_blocks * block_size
- if max_length > 0:
- read_length = min(max_length, read_length)
- max_length -= read_length
+ read_length = min(max_length, ex.num_blocks * block_size)
data.fromfile(file_obj, read_length)
+ max_length -= read_length
return data
def _WriteExtents(file_obj, data, extents, block_size, base_name):
- """Write data to file as defined by extent sequence.
+ """Writes data to file as defined by extent sequence.
This tries to be efficient by not copy data as it is written in chunks.
@@ -103,7 +103,7 @@
data: data to write
extents: sequence of block extents (offset and length)
block_size: size of each block
- base_name: name string of extent block for error reporting
+ base_name: name string of extent sequence for error reporting
Raises:
PayloadError when things don't add up.
@@ -111,18 +111,58 @@
data_offset = 0
data_length = len(data)
for ex, ex_name in common.ExtentIter(extents, base_name):
- if data_offset == data_length:
+ if not data_length:
raise PayloadError('%s: more write extents than data' % ex_name)
- write_length = min(data_length - data_offset, ex.num_blocks * block_size)
+ write_length = min(data_length, ex.num_blocks * block_size)
file_obj.seek(ex.start_block * block_size)
data_view = buffer(data, data_offset, write_length)
file_obj.write(data_view)
data_offset += write_length
+ data_length -= write_length
- if data_offset < data_length:
+ if data_length:
raise PayloadError('%s: more data than write extents' % base_name)
+def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
+ """Translates an extent sequence into a bspatch-compatible string argument.
+
+ Args:
+ extents: sequence of block extents (offset and length)
+ block_size: size of each block
+ base_name: name string of extent sequence for error reporting
+ data_length: the actual total length of the data in bytes (optional)
+ Returns:
+ A tuple consisting of (i) a string of the form
+ "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
+ for filling the last extent, (iii) the length of the padding (zero means no
+ padding is needed and the extents cover the full length of data).
+ Raises:
+ PayloadError if data_length is too short or too long.
+
+ """
+ arg = ''
+ pad_off = pad_len = 0
+ if data_length < 0:
+ data_length = sys.maxint
+ for ex, ex_name in common.ExtentIter(extents, base_name):
+ if not data_length:
+ raise PayloadError('%s: more extents than total data length' % ex_name)
+ start_byte = ex.start_block * block_size
+ num_bytes = ex.num_blocks * block_size
+ if data_length < num_bytes:
+ pad_off = start_byte + data_length
+ pad_len = num_bytes - data_length
+ num_bytes = data_length
+ arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
+ data_length -= num_bytes
+
+ if data_length:
+ raise PayloadError('%s: extents not covering full data length' % base_name)
+
+ return arg, pad_off, pad_len
+
+
#
# Payload application.
#
@@ -134,10 +174,18 @@
"""
- def __init__(self, payload):
+ def __init__(self, payload, bsdiff_in_place=True):
+ """Initialize the applier.
+
+ Args:
+ payload: the payload object to check
+ bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
+
+ """
assert payload.is_init, 'uninitialized update payload'
self.payload = payload
self.block_size = payload.manifest.block_size
+ self.bsdiff_in_place = bsdiff_in_place
def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
"""Applies a REPLACE{,_BZ} operation.
@@ -234,44 +282,66 @@
"""
block_size = self.block_size
- # Gather input raw data and write to a temp file.
- in_data = _ReadExtents(part_file, op.src_extents, block_size,
- max_length=op.src_length)
- with tempfile.NamedTemporaryFile(delete=False) as in_file:
- in_file_name = in_file.name
- in_file.write(in_data)
-
# Dump patch data to file.
with tempfile.NamedTemporaryFile(delete=False) as patch_file:
patch_file_name = patch_file.name
patch_file.write(patch_data)
- # Allocate tepmorary output file.
- with tempfile.NamedTemporaryFile(delete=False) as out_file:
- out_file_name = out_file.name
+ if self.bsdiff_in_place and hasattr(part_file, 'fileno'):
+ # Construct input and output extents argument for bspatch.
+ in_extents_arg, _, _ = _ExtentsToBspatchArg(
+ op.src_extents, block_size, '%s.src_extents' % op_name,
+ data_length=op.src_length)
+ out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
+ op.dst_extents, block_size, '%s.dst_extents' % op_name,
+ data_length=op.dst_length)
- # Invoke bspatch.
- bspatch_cmd = ['bspatch', in_file_name, out_file_name, patch_file_name]
- subprocess.check_call(bspatch_cmd)
+ # Invoke bspatch on partition file with extents args.
+ file_name = '/dev/fd/%d' % part_file.fileno()
+ bspatch_cmd = ['bspatch', file_name, file_name, patch_file_name,
+ in_extents_arg, out_extents_arg]
+ subprocess.check_call(bspatch_cmd)
- # Read output.
- with open(out_file_name, 'rb') as out_file:
- out_data = out_file.read()
- if len(out_data) != op.dst_length:
- raise PayloadError(
- '%s: actual patched data length (%d) not as expected (%d)' %
- (op_name, len(out_data), op.dst_length))
+ # Pad with zeros past the total output length.
+ if pad_len:
+ part_file.seek(pad_off)
+ part_file.write('\0' * pad_len)
+ else:
+ # Gather input raw data and write to a temp file.
+ in_data = _ReadExtents(part_file, op.src_extents, block_size,
+ max_length=op.src_length)
+ with tempfile.NamedTemporaryFile(delete=False) as in_file:
+ in_file_name = in_file.name
+ in_file.write(in_data)
- # Write output back to partition, with padding.
- unaligned_out_len = len(out_data) % block_size
- if unaligned_out_len:
- out_data += '\0' * (block_size - unaligned_out_len)
- _WriteExtents(part_file, out_data, op.dst_extents, block_size,
- '%s.dst_extents' % op_name)
+ # Allocate tepmorary output file.
+ with tempfile.NamedTemporaryFile(delete=False) as out_file:
+ out_file_name = out_file.name
- # Delete all temporary files.
- os.remove(in_file_name)
- os.remove(out_file_name)
+ # Invoke bspatch.
+ bspatch_cmd = ['bspatch', in_file_name, out_file_name, patch_file_name]
+ subprocess.check_call(bspatch_cmd)
+
+ # Read output.
+ with open(out_file_name, 'rb') as out_file:
+ out_data = out_file.read()
+ if len(out_data) != op.dst_length:
+ raise PayloadError(
+ '%s: actual patched data length (%d) not as expected (%d)' %
+ (op_name, len(out_data), op.dst_length))
+
+ # Write output back to partition, with padding.
+ unaligned_out_len = len(out_data) % block_size
+ if unaligned_out_len:
+ out_data += '\0' * (block_size - unaligned_out_len)
+ _WriteExtents(part_file, out_data, op.dst_extents, block_size,
+ '%s.dst_extents' % op_name)
+
+ # Delete input/output files.
+ os.remove(in_file_name)
+ os.remove(out_file_name)
+
+ # Delete patch file.
os.remove(patch_file_name)
def _ApplyOperations(self, operations, base_name, part_file, part_size):
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 13a0518..7631cb0 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -263,7 +263,7 @@
def __init__(self, payload, assert_type=None, block_size=0,
allow_unhashed=False, disabled_tests=()):
- """Initialize the checker object.
+ """Initialize the checker.
Args:
payload: the payload object to check
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index 34ebaae..fefc7e4 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -206,7 +206,7 @@
report_out_file=report_out_file)
def Apply(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
- old_rootfs_part=None):
+ old_rootfs_part=None, bsdiff_in_place=True):
"""Applies the update payload.
Args:
@@ -214,6 +214,7 @@
new_rootfs_part: name of dest rootfs partition file
old_kernel_part: name of source kernel partition file (optional)
old_rootfs_part: name of source rootfs partition file (optional)
+ bsdiff_in_place: whether to perform BSDIFF operations in-place (optional)
Raises:
PayloadError if payload application failed.
@@ -221,7 +222,7 @@
self._AssertInit()
# Create a short-lived payload applier object and run it.
- helper = applier.PayloadApplier(self)
+ helper = applier.PayloadApplier(self, bsdiff_in_place=bsdiff_in_place)
helper.Run(new_kernel_part, new_rootfs_part,
old_kernel_part=old_kernel_part,
old_rootfs_part=old_rootfs_part)