Merge "Create symbolic boot vdex files for different ISAs"
diff --git a/core/Makefile b/core/Makefile
index 0d14c85..d020335 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -123,6 +123,24 @@
endif
# -----------------------------------------------------------------
+# FINAL_VENDOR_DEFAULT_PROPERTIES will be installed in vendor/default.prop if
+# property_overrides_split_enabled is true. Otherwise it will be installed in
+# ROOT/default.prop.
+ifdef BOARD_VNDK_VERSION
+ ifeq ($(BOARD_VNDK_VERSION),current)
+ FINAL_VENDOR_DEFAULT_PROPERTIES := ro.vndk.version=$(PLATFORM_VNDK_VERSION)
+ else
+ FINAL_VENDOR_DEFAULT_PROPERTIES := ro.vndk.version=$(BOARD_VNDK_VERSION)
+ endif
+else
+ FINAL_VENDOR_DEFAULT_PROPERTIES :=
+endif
+FINAL_VENDOR_DEFAULT_PROPERTIES += \
+ $(call collapse-pairs, $(PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
+FINAL_VENDOR_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
+ $(FINAL_VENDOR_DEFAULT_PROPERTIES),=)
+
+# -----------------------------------------------------------------
# prop.default
ifdef property_overrides_split_enabled
INSTALLED_DEFAULT_PROP_TARGET := $(TARGET_OUT)/etc/prop.default
@@ -139,7 +157,7 @@
$(call collapse-pairs, $(PRODUCT_SYSTEM_DEFAULT_PROPERTIES))
ifndef property_overrides_split_enabled
FINAL_DEFAULT_PROPERTIES += \
- $(call collapse-pairs, $(PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
+ $(call collapse-pairs, $(FINAL_VENDOR_DEFAULT_PROPERTIES))
endif
FINAL_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
$(FINAL_DEFAULT_PROPERTIES),=)
@@ -174,20 +192,6 @@
INSTALLED_VENDOR_DEFAULT_PROP_TARGET := $(TARGET_OUT_VENDOR)/default.prop
ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_VENDOR_DEFAULT_PROP_TARGET)
-ifdef BOARD_VNDK_VERSION
- ifeq ($(BOARD_VNDK_VERSION),current)
- FINAL_VENDOR_DEFAULT_PROPERTIES := ro.vndk.version=$(PLATFORM_VNDK_VERSION)
- else
- FINAL_VENDOR_DEFAULT_PROPERTIES := ro.vndk.version=$(BOARD_VNDK_VERSION)
- endif
-else
- FINAL_VENDOR_DEFAULT_PROPERTIES :=
-endif
-FINAL_VENDOR_DEFAULT_PROPERTIES += \
- $(call collapse-pairs, $(PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
-FINAL_VENDOR_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
- $(FINAL_VENDOR_DEFAULT_PROPERTIES),=)
-
$(INSTALLED_VENDOR_DEFAULT_PROP_TARGET): $(INSTALLED_DEFAULT_PROP_TARGET)
@echo Target buildinfo: $@
@mkdir -p $(dir $@)
diff --git a/core/config.mk b/core/config.mk
index e9b5d4c..64743e0 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -797,6 +797,14 @@
requirements :=
+# BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED can be true only if early-mount of
+# partitions is supported. But the early-mount must be supported for full
+# treble products, and so BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED should be set
+# by default for full treble products.
+ifeq ($(PRODUCT_FULL_TREBLE),true)
+ BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED ?= true
+endif
+
# If PRODUCT_USE_VNDK is true and BOARD_VNDK_VERSION is not defined yet,
# BOARD_VNDK_VERSION will be set to "current" as default.
# PRODUCT_USE_VNDK will be true in Android-P or later launching devices.
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 639b019..a084f79 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -133,6 +133,8 @@
$(call add_json_list, NamespacesToExport, $(PRODUCT_SOONG_NAMESPACES))
+$(call add_json_list, PgoAdditionalProfileDirs, $(PGO_ADDITIONAL_PROFILE_DIRS))
+
_contents := $(subst $(comma)$(newline)__SV_END,$(newline)}$(newline),$(_contents)__SV_END)
$(file >$(SOONG_VARIABLES).tmp,$(_contents))
diff --git a/target/product/base.mk b/target/product/base.mk
index 750d3fa..14ff1c2 100644
--- a/target/product/base.mk
+++ b/target/product/base.mk
@@ -31,6 +31,7 @@
bit \
blkid \
bmgr \
+ bpfloader \
bugreport \
bugreportz \
cameraserver \
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index 18eeb40..3f1d6df 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -51,6 +51,7 @@
libbinder \
libc \
libc_malloc_debug \
+ libc_malloc_hooks \
libcutils \
libdl \
libgui \
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index fe37b39..24c5b2d 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -15,7 +15,6 @@
from __future__ import print_function
import array
-import common
import copy
import functools
import heapq
@@ -27,9 +26,10 @@
import subprocess
import sys
import threading
-
from collections import deque, OrderedDict
from hashlib import sha1
+
+import common
from rangelib import RangeSet
@@ -264,9 +264,6 @@
reasons. The stats is only meaningful when imgdiff not being disabled by the
caller of BlockImageDiff. In addition, only files with supported types
(BlockImageDiff.FileTypeSupportedByImgdiff()) are allowed to be logged.
-
- TODO: The info could be inaccurate due to the unconditional fallback from
- imgdiff to bsdiff on errors. The fallbacks will be removed.
"""
USED_IMGDIFF = "APK files diff'd with imgdiff"
@@ -275,6 +272,8 @@
# Reasons for not applying imgdiff on APKs.
SKIPPED_TRIMMED = "Not used imgdiff due to trimmed RangeSet"
SKIPPED_NONMONOTONIC = "Not used imgdiff due to having non-monotonic ranges"
+ SKIPPED_SHARED_BLOCKS = "Not used imgdiff due to using shared blocks"
+ SKIPPED_INCOMPLETE = "Not used imgdiff due to incomplete RangeSet"
# The list of valid reasons, which will also be the dumped order in a report.
REASONS = (
@@ -282,6 +281,8 @@
USED_IMGDIFF_LARGE_APK,
SKIPPED_TRIMMED,
SKIPPED_NONMONOTONIC,
+ SKIPPED_SHARED_BLOCKS,
+ SKIPPED_INCOMPLETE,
)
def __init__(self):
@@ -415,6 +416,8 @@
- The file type is supported by imgdiff;
- The source and target blocks are monotonic (i.e. the data is stored with
blocks in increasing order);
+ - Both files don't contain shared blocks;
+ - Both files have complete lists of blocks;
- We haven't removed any blocks from the source set.
If all these conditions are satisfied, concatenating all the blocks in the
@@ -430,13 +433,22 @@
Returns:
A boolean result.
"""
- if (self.disable_imgdiff or not self.FileTypeSupportedByImgdiff(name)):
+ if self.disable_imgdiff or not self.FileTypeSupportedByImgdiff(name):
return False
if not tgt_ranges.monotonic or not src_ranges.monotonic:
self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_NONMONOTONIC)
return False
+ if (tgt_ranges.extra.get('uses_shared_blocks') or
+ src_ranges.extra.get('uses_shared_blocks')):
+ self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_SHARED_BLOCKS)
+ return False
+
+ if tgt_ranges.extra.get('incomplete') or src_ranges.extra.get('incomplete'):
+ self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_INCOMPLETE)
+ return False
+
if tgt_ranges.extra.get('trimmed') or src_ranges.extra.get('trimmed'):
self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_TRIMMED)
return False
@@ -535,7 +547,7 @@
# <# blocks> - <stash refs...>
size = xf.src_ranges.size()
- src_str = [str(size)]
+ src_str_buffer = [str(size)]
unstashed_src_ranges = xf.src_ranges
mapped_stashes = []
@@ -545,7 +557,7 @@
sr = xf.src_ranges.map_within(sr)
mapped_stashes.append(sr)
assert sh in stashes
- src_str.append("%s:%s" % (sh, sr.to_string_raw()))
+ src_str_buffer.append("%s:%s" % (sh, sr.to_string_raw()))
stashes[sh] -= 1
if stashes[sh] == 0:
free_string.append("free %s\n" % (sh,))
@@ -553,17 +565,17 @@
stashes.pop(sh)
if unstashed_src_ranges:
- src_str.insert(1, unstashed_src_ranges.to_string_raw())
+ src_str_buffer.insert(1, unstashed_src_ranges.to_string_raw())
if xf.use_stash:
mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
- src_str.insert(2, mapped_unstashed.to_string_raw())
+ src_str_buffer.insert(2, mapped_unstashed.to_string_raw())
mapped_stashes.append(mapped_unstashed)
self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
else:
- src_str.insert(1, "-")
+ src_str_buffer.insert(1, "-")
self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
- src_str = " ".join(src_str)
+ src_str = " ".join(src_str_buffer)
# version 3+:
# zero <rangeset>
@@ -684,11 +696,11 @@
max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
print("max stashed blocks: %d (%d bytes), "
"limit: %d bytes (%.2f%%)\n" % (
- max_stashed_blocks, self._max_stashed_size, max_allowed,
- self._max_stashed_size * 100.0 / max_allowed))
+ max_stashed_blocks, self._max_stashed_size, max_allowed,
+ self._max_stashed_size * 100.0 / max_allowed))
else:
print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
- max_stashed_blocks, self._max_stashed_size))
+ max_stashed_blocks, self._max_stashed_size))
def ReviseStashSize(self):
print("Revising stash size...")
@@ -851,10 +863,6 @@
diff_total = len(diff_queue)
patches = [None] * diff_total
error_messages = []
- warning_messages = []
- if sys.stdout.isatty():
- global diff_done
- diff_done = 0
# Using multiprocessing doesn't give additional benefits, due to the
# pattern of the code. The diffing work is done by subprocess.call, which
@@ -870,8 +878,15 @@
if not diff_queue:
return
xf_index, imgdiff, patch_index = diff_queue.pop()
+ xf = self.transfers[xf_index]
- xf = self.transfers[xf_index]
+ if sys.stdout.isatty():
+ diff_left = len(diff_queue)
+ progress = (diff_total - diff_left) * 100 / diff_total
+ # '\033[K' is to clear to EOL.
+ print(' [%3d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
+ sys.stdout.flush()
+
patch = xf.patch
if not patch:
src_ranges = xf.src_ranges
@@ -891,40 +906,16 @@
except ValueError as e:
message.append(
"Failed to generate %s for %s: tgt=%s, src=%s:\n%s" % (
- "imgdiff" if imgdiff else "bsdiff",
- xf.tgt_name if xf.tgt_name == xf.src_name else
+ "imgdiff" if imgdiff else "bsdiff",
+ xf.tgt_name if xf.tgt_name == xf.src_name else
xf.tgt_name + " (from " + xf.src_name + ")",
- xf.tgt_ranges, xf.src_ranges, e.message))
- # TODO(b/68016761): Better handle the holes in mke2fs created
- # images.
- if imgdiff:
- try:
- patch = compute_patch(src_file, tgt_file, imgdiff=False)
- message.append(
- "Fell back and generated with bsdiff instead for %s" % (
- xf.tgt_name,))
- xf.style = "bsdiff"
- with lock:
- warning_messages.extend(message)
- del message[:]
- except ValueError as e:
- message.append(
- "Also failed to generate with bsdiff for %s:\n%s" % (
- xf.tgt_name, e.message))
-
+ xf.tgt_ranges, xf.src_ranges, e.message))
if message:
with lock:
error_messages.extend(message)
with lock:
patches[patch_index] = (xf_index, patch)
- if sys.stdout.isatty():
- global diff_done
- diff_done += 1
- progress = diff_done * 100 / diff_total
- # '\033[K' is to clear to EOL.
- print(' [%d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
- sys.stdout.flush()
threads = [threading.Thread(target=diff_worker)
for _ in range(self.threads)]
@@ -936,11 +927,6 @@
if sys.stdout.isatty():
print('\n')
- if warning_messages:
- print('WARNING:')
- print('\n'.join(warning_messages))
- print('\n\n\n')
-
if error_messages:
print('ERROR:')
print('\n'.join(error_messages))
@@ -961,11 +947,11 @@
if common.OPTIONS.verbose:
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
- xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
- xf.style,
- xf.tgt_name if xf.tgt_name == xf.src_name else (
- xf.tgt_name + " (from " + xf.src_name + ")"),
- xf.tgt_ranges, xf.src_ranges))
+ xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
+ xf.style,
+ xf.tgt_name if xf.tgt_name == xf.src_name else (
+ xf.tgt_name + " (from " + xf.src_name + ")"),
+ xf.tgt_ranges, xf.src_ranges))
def AssertSha1Good(self):
"""Check the SHA-1 of the src & tgt blocks in the transfer list.
@@ -1198,7 +1184,8 @@
while sinks:
new_sinks = OrderedDict()
for u in sinks:
- if u not in G: continue
+ if u not in G:
+ continue
s2.appendleft(u)
del G[u]
for iu in u.incoming:
@@ -1211,7 +1198,8 @@
while sources:
new_sources = OrderedDict()
for u in sources:
- if u not in G: continue
+ if u not in G:
+ continue
s1.append(u)
del G[u]
for iu in u.outgoing:
@@ -1220,7 +1208,8 @@
new_sources[iu] = None
sources = new_sources
- if not G: break
+ if not G:
+ break
# Find the "best" vertex to put next. "Best" is the one that
# maximizes the net difference in source blocks saved we get by
@@ -1277,14 +1266,16 @@
intersections = OrderedDict()
for s, e in a.tgt_ranges:
for i in range(s, e):
- if i >= len(source_ranges): break
+ if i >= len(source_ranges):
+ break
# Add all the Transfers in source_ranges[i] to the (ordered) set.
if source_ranges[i] is not None:
for j in source_ranges[i]:
intersections[j] = None
for b in intersections:
- if a is b: continue
+ if a is b:
+ continue
# If the blocks written by A are read by B, then B needs to go before A.
i = a.tgt_ranges.intersect(b.src_ranges)
@@ -1421,8 +1412,9 @@
if tgt_changed < tgt_size * crop_threshold:
assert tgt_changed + tgt_skipped.size() == tgt_size
- print('%10d %10d (%6.2f%%) %s' % (tgt_skipped.size(), tgt_size,
- tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
+ print('%10d %10d (%6.2f%%) %s' % (
+ tgt_skipped.size(), tgt_size,
+ tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
AddSplitTransfers(
"%s-skipped" % (tgt_name,),
"%s-skipped" % (src_name,),
@@ -1515,11 +1507,6 @@
be valid because the block ranges of src-X & tgt-X will always stay the
same afterwards; but there's a chance we don't use the patch if we
convert the "diff" command into "new" or "move" later.
-
- The split will be attempted by calling imgdiff, which expects the input
- files to be valid zip archives. If imgdiff fails for some reason (i.e.
- holes in the APK file), we will fall back to split the failed APKs into
- fixed size chunks.
"""
while True:
@@ -1541,16 +1528,11 @@
"--block-limit={}".format(max_blocks_per_transfer),
"--split-info=" + patch_info_file,
src_file, tgt_file, patch_file]
- p = common.Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- if p.returncode != 0:
- print("Failed to create patch between {} and {},"
- " falling back to bsdiff".format(src_name, tgt_name))
- with transfer_lock:
- AddSplitTransfersWithFixedSizeChunks(tgt_name, src_name,
- tgt_ranges, src_ranges,
- "diff", self.transfers)
- continue
+ p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ imgdiff_output, _ = p.communicate()
+ assert p.returncode == 0, \
+ "Failed to create imgdiff patch between {} and {}:\n{}".format(
+ src_name, tgt_name, imgdiff_output)
with open(patch_info_file) as patch_info:
lines = patch_info.readlines()
@@ -1560,7 +1542,7 @@
tgt_ranges, src_ranges,
lines)
for index, (patch_start, patch_length, split_tgt_ranges,
- split_src_ranges) in enumerate(split_info_list):
+ split_src_ranges) in enumerate(split_info_list):
with open(patch_file) as f:
f.seek(patch_start)
patch_content = f.read(patch_length)
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 16600ed..370710e 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -625,7 +625,7 @@
return tmp, zipfile.ZipFile(filename, "r")
-def GetSparseImage(which, tmpdir, input_zip):
+def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
"""Returns a SparseImage object suitable for passing to BlockImageDiff.
This function loads the specified sparse image from the given path, and
@@ -637,6 +637,7 @@
which: The partition name, which must be "system" or "vendor".
tmpdir: The directory that contains the prebuilt image and block map file.
input_zip: The target-files ZIP archive.
+ allow_shared_blocks: Whether having shared blocks is allowed.
Returns:
A SparseImage object, with file_map info loaded.
@@ -655,7 +656,8 @@
# unconditionally. Note that they are still part of care_map. (Bug: 20939131)
clobbered_blocks = "0"
- image = sparse_img.SparseImage(path, mappath, clobbered_blocks)
+ image = sparse_img.SparseImage(path, mappath, clobbered_blocks,
+ allow_shared_blocks=allow_shared_blocks)
# block.map may contain less blocks, because mke2fs may skip allocating blocks
# if they contain all zeros. We can't reconstruct such a file from its block
@@ -669,6 +671,13 @@
info = input_zip.getinfo(arcname)
ranges = image.file_map[entry]
+
+ # If a RangeSet has been tagged as using shared blocks while loading the
+ # image, its block list must be already incomplete due to that reason. Don't
+ # give it 'incomplete' tag to avoid messing up the imgdiff stats.
+ if ranges.extra.get('uses_shared_blocks'):
+ continue
+
if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
ranges.extra['incomplete'] = True
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index cd497b2..a22145a 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -386,11 +386,17 @@
SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
- def __init__(self):
+ def __init__(self, secondary=False):
+ """Initializes a Payload instance.
+
+ Args:
+ secondary: Whether it's generating a secondary payload (default: False).
+ """
# The place where the output from the subprocess should go.
self._log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
self.payload_file = None
self.payload_properties = None
+ self.secondary = secondary
def Generate(self, target_file, source_file=None, additional_args=None):
"""Generates a payload from the given target-files zip(s).
@@ -470,6 +476,10 @@
p1.communicate()
assert p1.returncode == 0, "brillo_update_payload properties failed"
+ if self.secondary:
+ with open(properties_file, "a") as f:
+ f.write("SWITCH_SLOT_ON_REBOOT=0\n")
+
if OPTIONS.wipe_user_data:
with open(properties_file, "a") as f:
f.write("POWERWASH=1\n")
@@ -477,18 +487,16 @@
self.payload_file = signed_payload_file
self.payload_properties = properties_file
- def WriteToZip(self, output_zip, secondary=False):
+ def WriteToZip(self, output_zip):
"""Writes the payload to the given zip.
Args:
output_zip: The output ZipFile instance.
- secondary: Whether the payload should be packed as secondary payload
- (default: False).
"""
assert self.payload_file is not None
assert self.payload_properties is not None
- if secondary:
+ if self.secondary:
payload_arcname = Payload.SECONDARY_PAYLOAD_BIN
payload_properties_arcname = Payload.SECONDARY_PAYLOAD_PROPERTIES_TXT
else:
@@ -778,11 +786,15 @@
script.ShowProgress(system_progress, 0)
+ # See the notes in WriteBlockIncrementalOTAPackage().
+ allow_shared_blocks = target_info.get('ext4_share_dup_blocks') == "true"
+
# Full OTA is done as an "incremental" against an empty source image. This
# has the effect of writing new data from the package to the entire
# partition, but lets us reuse the updater code that writes incrementals to
# do it.
- system_tgt = common.GetSparseImage("system", OPTIONS.input_tmp, input_zip)
+ system_tgt = common.GetSparseImage("system", OPTIONS.input_tmp, input_zip,
+ allow_shared_blocks)
system_tgt.ResetFileMap()
system_diff = common.BlockDifference("system", system_tgt, src=None)
system_diff.WriteScript(script, output_zip)
@@ -793,7 +805,8 @@
if HasVendorPartition(input_zip):
script.ShowProgress(0.1, 0)
- vendor_tgt = common.GetSparseImage("vendor", OPTIONS.input_tmp, input_zip)
+ vendor_tgt = common.GetSparseImage("vendor", OPTIONS.input_tmp, input_zip,
+ allow_shared_blocks)
vendor_tgt.ResetFileMap()
vendor_diff = common.BlockDifference("vendor", vendor_tgt)
vendor_diff.WriteScript(script, output_zip)
@@ -970,8 +983,16 @@
target_recovery = common.GetBootableImage(
"/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
- system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip)
- system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip)
+ # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
+ # shared blocks (i.e. some blocks will show up in multiple files' block
+ # list). We can only allocate such shared blocks to the first "owner", and
+ # disable imgdiff for all later occurrences.
+ allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
+ target_info.get('ext4_share_dup_blocks') == "true")
+ system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip,
+ allow_shared_blocks)
+ system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip,
+ allow_shared_blocks)
blockimgdiff_version = max(
int(i) for i in target_info.get("blockimgdiff_versions", "1").split(","))
@@ -996,8 +1017,10 @@
if HasVendorPartition(target_zip):
if not HasVendorPartition(source_zip):
raise RuntimeError("can't generate incremental that adds /vendor")
- vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip)
- vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip)
+ vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip,
+ allow_shared_blocks)
+ vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip,
+ allow_shared_blocks)
# Check first block of vendor partition for remount R/W only if
# disk type is ext4
@@ -1319,10 +1342,10 @@
# We always include a full payload for the secondary slot, even when
# building an incremental OTA. See the comments for "--include_secondary".
secondary_target_file = GetTargetFilesZipForSecondaryImages(target_file)
- secondary_payload = Payload()
+ secondary_payload = Payload(secondary=True)
secondary_payload.Generate(secondary_target_file)
secondary_payload.Sign(payload_signer)
- secondary_payload.WriteToZip(output_zip, secondary=True)
+ secondary_payload.WriteToZip(output_zip)
# If dm-verity is supported for the device, copy contents of care_map
# into A/B OTA package.
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index c978be8..083da7a 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -33,7 +33,7 @@
"""
def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
- mode="rb", build_map=True):
+ mode="rb", build_map=True, allow_shared_blocks=False):
self.simg_f = f = open(simg_fn, mode)
header_bin = f.read(28)
@@ -129,7 +129,8 @@
self.extended = extended
if file_map_fn:
- self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
+ self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
+ allow_shared_blocks)
else:
self.file_map = {"__DATA": self.care_map}
@@ -209,7 +210,14 @@
yield fill_data * (this_read * (self.blocksize >> 2))
to_read -= this_read
- def LoadFileBlockMap(self, fn, clobbered_blocks):
+ def LoadFileBlockMap(self, fn, clobbered_blocks, allow_shared_blocks):
+ """Loads the given block map file.
+
+ Args:
+ fn: The filename of the block map file.
+ clobbered_blocks: A RangeSet instance for the clobbered blocks.
+ allow_shared_blocks: Whether having shared blocks is allowed.
+ """
remaining = self.care_map
self.file_map = out = {}
@@ -217,6 +225,18 @@
for line in f:
fn, ranges = line.split(None, 1)
ranges = rangelib.RangeSet.parse(ranges)
+
+ if allow_shared_blocks:
+ # Find the shared blocks that have been claimed by others.
+ shared_blocks = ranges.subtract(remaining)
+ if shared_blocks:
+ ranges = ranges.subtract(shared_blocks)
+ if not ranges:
+ continue
+
+ # Tag the entry so that we can skip applying imgdiff on this file.
+ ranges.extra['uses_shared_blocks'] = True
+
out[fn] = ranges
assert ranges.size() == ranges.intersect(remaining).size()
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index a2552d6..ceada18 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -236,11 +236,19 @@
block_image_diff.CanUseImgdiff(
"/vendor/app/app3.apk", RangeSet("10-15"), src_ranges))
+ # At least one of the ranges is incomplete.
+ src_ranges = RangeSet("0-5")
+ src_ranges.extra['incomplete'] = True
+ self.assertFalse(
+ block_image_diff.CanUseImgdiff(
+ "/vendor/app/app4.apk", RangeSet("10-15"), src_ranges))
+
# The stats are correctly logged.
self.assertDictEqual(
{
ImgdiffStats.SKIPPED_NONMONOTONIC : {'/system/app/app2.apk'},
ImgdiffStats.SKIPPED_TRIMMED : {'/vendor/app/app3.apk'},
+ ImgdiffStats.SKIPPED_INCOMPLETE: {'/vendor/app/app4.apk'},
},
block_image_diff.imgdiff_stats.stats)
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 6edf80c..a4fa4f9 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -643,7 +643,7 @@
@staticmethod
def _create_payload_full(secondary=False):
target_file = construct_target_files(secondary)
- payload = Payload()
+ payload = Payload(secondary)
payload.Generate(target_file)
return payload
@@ -713,6 +713,13 @@
with open(payload.payload_properties) as properties_fp:
self.assertIn("POWERWASH=1", properties_fp.read())
+ def test_Sign_secondary(self):
+ payload = self._create_payload_full(secondary=True)
+ payload.Sign(PayloadSigner())
+
+ with open(payload.payload_properties) as properties_fp:
+ self.assertIn("SWITCH_SLOT_ON_REBOOT=0", properties_fp.read())
+
def test_Sign_badSigner(self):
"""Tests that signing failure can be captured."""
payload = self._create_payload_full()
@@ -762,7 +769,7 @@
output_file = common.MakeTempFile(suffix='.zip')
with zipfile.ZipFile(output_file, 'w') as output_zip:
- payload.WriteToZip(output_zip, secondary=True)
+ payload.WriteToZip(output_zip)
with zipfile.ZipFile(output_file) as verify_zip:
# First make sure we have the essential entries.