Merge "configstore: add default implementation of configstore@1.0 HAL"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 9d3fc23..e0c826c 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -400,6 +400,11 @@
$(call add-clean-step, rm -rf $(TARGET_OUT_ETC)/init)
+# Libraries are moved from {system|vendor}/lib to ./lib/framework, ./lib/vndk, etc.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/vendor/lib*)
+
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
# ************************************************
diff --git a/core/Makefile b/core/Makefile
index 5a74763..b0a03b5 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -910,7 +910,15 @@
$(ALL_DEFAULT_INSTALLED_MODULES))
recovery_initrc := $(call include-path-for, recovery)/etc/init.rc
-recovery_sepolicy := $(call intermediates-dir-for,ETC,sepolicy.recovery)/sepolicy.recovery
+recovery_sepolicy := \
+ $(TARGET_RECOVERY_ROOT_OUT)/sepolicy \
+ $(TARGET_RECOVERY_ROOT_OUT)/file_contexts.bin \
+ $(TARGET_RECOVERY_ROOT_OUT)/plat_property_contexts \
+ $(TARGET_RECOVERY_ROOT_OUT)/nonplat_property_contexts
+# Passed into rsync from non-recovery root to recovery root, to avoid overwriting recovery-specific
+# SELinux files
+IGNORE_RECOVERY_SEPOLICY := $(patsubst $(TARGET_RECOVERY_OUT)/%,--exclude=/%,$(recovery_sepolicy))
+
recovery_kernel := $(INSTALLED_KERNEL_TARGET) # same as a non-recovery system
recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img
recovery_build_prop := $(intermediate_system_build_prop)
@@ -1045,14 +1053,13 @@
$(hide) mkdir -p $(TARGET_RECOVERY_OUT)
$(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc $(TARGET_RECOVERY_ROOT_OUT)/sdcard $(TARGET_RECOVERY_ROOT_OUT)/tmp
@echo Copying baseline ramdisk...
- $(hide) rsync -a --exclude=etc --exclude=sdcard $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT) # "cp -Rf" fails to overwrite broken symlinks on Mac.
+ # Use rsync because "cp -Rf" fails to overwrite broken symlinks on Mac.
+ $(hide) rsync -a --exclude=etc --exclude=sdcard $(IGNORE_RECOVERY_SEPOLICY) $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT)
@echo Modifying ramdisk contents...
$(if $(BOARD_RECOVERY_KERNEL_MODULES), \
$(call build-image-kernel-modules,$(BOARD_RECOVERY_KERNEL_MODULES),$(TARGET_RECOVERY_ROOT_OUT),,$(call intermediates-dir-for,PACKAGING,depmod_recovery)))
$(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/init*.rc
$(hide) cp -f $(recovery_initrc) $(TARGET_RECOVERY_ROOT_OUT)/
- $(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
- $(hide) cp -f $(recovery_sepolicy) $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
$(hide) cp $(TARGET_ROOT_OUT)/init.recovery.*.rc $(TARGET_RECOVERY_ROOT_OUT)/ || true # Ignore error when the src file doesn't exist.
$(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/res
$(hide) rm -rf $(TARGET_RECOVERY_ROOT_OUT)/res/*
@@ -2053,7 +2060,7 @@
$(hide) echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $(zip_root)/META/misc_info.txt
$(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $(zip_root)/META/misc_info.txt
$(hide) echo "multistage_support=1" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "blockimgdiff_versions=1,2,3,4" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "blockimgdiff_versions=3,4" >> $(zip_root)/META/misc_info.txt
ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
# OTA scripts are only interested in fingerprint related properties
$(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt
diff --git a/core/base_rules.mk b/core/base_rules.mk
index c65d3ce..1fb516b 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -78,6 +78,8 @@
endif
endif
+my_module_is_soong := $(if $(filter $(OUT_DIR)/soong/%,$(LOCAL_MODULE_MAKEFILE)),true,false)
+
# Ninja has an implicit dependency on the command being run, and kati will
# regenerate the ninja manifest if any read makefile changes, so there is no
# need to have dependencies on makefiles.
@@ -163,6 +165,14 @@
endif
my_module_path := $(patsubst %/,%,$(my_module_path))
my_module_relative_path := $(strip $(LOCAL_MODULE_RELATIVE_PATH))
+
+# my_module_default_path is the path that is automatically chosen according to the attributes of
+# a module. It is used when the module does not explicitly specify install path using LOCAL_MODULE_PATH.
+# If LOCAL_MODULE_PATH is specified, it is always respected and my_module_default_path is
+# ignored. However, for shared libraries, such conflict generates warning so that module owner
+# can place the library in the correct location (, stop using LOCAL_MODULE_PATH, or migrate to Soong to
+# be better).
+my_module_default_path :=
ifdef LOCAL_IS_HOST_MODULE
partition_tag :=
else
@@ -180,20 +190,135 @@
partition_tag := $(if $(call should-install-to-system,$(my_module_tags)),,_DATA)
endif
endif
-ifeq ($(my_module_path),)
- install_path_var := $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT$(partition_tag)_$(LOCAL_MODULE_CLASS)
- ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
- install_path_var := $(install_path_var)_PRIVILEGED
- endif
+install_path_var := $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT$(partition_tag)_$(LOCAL_MODULE_CLASS)
+ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
+ install_path_var := $(install_path_var)_PRIVILEGED
+endif
- my_module_path := $($(install_path_var))
- ifeq ($(strip $(my_module_path)),)
- $(error $(LOCAL_PATH): unhandled install path "$(install_path_var) for $(LOCAL_MODULE)")
+my_module_default_path := $($(install_path_var))
+ifeq ($(strip $(my_module_path)$(my_module_default_path)),)
+ $(call pretty-error,internal error in base_rules.mk; $(install_path_var) is not defined.)
+endif
+
+# Determine lib_type and do some sanity checks.
+ifeq ($(LOCAL_IS_HOST_MODULE)$(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
+ ifneq ($(filter $(LOCAL_MODULE),$(addprefix lib,$(NDK_PREBUILT_SHARED_LIBRARIES))),)
+ ifneq ($(partition_tag),)
+ $(call pretty-error,"NDK library must be installed at system partition, where other libraries will look for it. It cannot be moved.")
+ endif
+ lib_type := ndk
+ else ifneq ($(filter $(LOCAL_MODULE),$(VNDK_LIBRARIES) $(VNDK_INDIRECT_LIBRARIES)),)
+ ifneq ($(partition_tag),)
+ $(call pretty-error,"VNDK library must be installed at system partition. DO NOT modify VNDK_LIBRARIES or VNDK_LIBRARIES. \
+If your library needs to be shared between system.img and vendor.img then define it as a VNDK-ext library. Use vndk_ext_library {...} \
+in Android.bp to do so.")
+ endif
+ lib_type := vndk
+ else ifneq ($(filter $(LOCAL_MODULE),$(BOARD_SAME_PROCESS_HAL_DEPS)),)
+ # List of libraries implementing same-process HALs (and their internal sub-libraries) is
+ # defined by vendors.
+ ifeq ($(partition_tag),)
+ $(call pretty-error,Sameprocess HAL must not be installed at system partition)
+ endif
+ lib_type := sameprocess_hal
+ else ifeq ($(LOCAL_IS_HOST_MODULE)$(partition_tag),)
+ lib_type := framework
+ else ifneq ($(partition_tag),_DATA)
+ # Here, vendor means vendor/oem/odm
+ lib_type := vendor_provided
+ else
+ # Test, samples lib falls into this. No lib_type required for them.
+ ifeq ($(filter tests samples,$(LOCAL_MODULE_TAGS)),)
+ $(call pretty-warning,Cannot determine the type of this library)
+ endif
+ lib_type :=
+ endif
+else
+ lib_type :=
+endif
+
+# This is the default path until N. From O, the default path is changed.
+# Let's save the old default path in case we need a symlink to it later.
+my_module_pre_o_default_path := $(my_module_default_path)
+
+# Special case for pre_o_default_path of Soong defined modules.
+# For those modules, we guess their pre_o_default_path by removing /ndk, /vndk, etc.
+# from their LOCAL_MODULE_PATH. This is because relative_install_path is already
+# embedded to my_module_path.
+ifeq ($(my_module_is_soong),true)
+ifndef LOCAL_IS_HOST_MODULE
+ifeq ($(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
+ my_module_pre_o_default_path := $(my_module_path)
+ my_module_pre_o_default_path := $(subst /vndk-ext,,$(my_module_pre_o_default_path))
+ my_module_pre_o_default_path := $(subst /vndk,,$(my_module_pre_o_default_path))
+ my_module_pre_o_default_path := $(subst /ndk,,$(my_module_pre_o_default_path))
+ my_module_pre_o_default_path := $(subst /sameprocess,,$(my_module_pre_o_default_path))
+endif
+endif
+endif
+
+# Amend the default_path once again depending on lib_type. This is new from O.
+ifeq ($(lib_type),vndk)
+ my_module_default_path := $(my_module_default_path)/vndk
+ # TODO(b/35020246): before P, we should support installing two snapshots of VNDK
+ # libraries. One for framework libs and execs and the other for vendor libs and execs.
+else ifeq ($(lib_type),ndk)
+ my_module_default_path := $(my_module_default_path)/ndk
+else ifeq ($(lib_type),sameprocess_hal)
+ my_module_default_path := $(my_module_default_path)/sameprocess
+endif
+
+# Relative path is appended to path resolved so far
+ifneq ($(my_module_relative_path),)
+ my_module_default_path := $(my_module_default_path)/$(my_module_relative_path)
+ my_module_pre_o_default_path := $(my_module_pre_o_default_path)/$(my_module_relative_path)
+ ifneq ($(my_module_path),)
+ my_module_path := $(my_module_path)/$(my_module_relative_path)
endif
endif
-ifneq ($(my_module_relative_path),)
- my_module_path := $(my_module_path)/$(my_module_relative_path)
+
+_lib_moved :=
+ifeq ($(my_module_path),)
+ # If LOCAL_MODULE_PATH is not specified, use the automatically determined path.
+ my_module_path := $(my_module_default_path)
+
+ # Mark if the lib is installed to a different path than before. With this hint,
+ # a symlink is created if BOARD_SYMLINK_FOR_LIBS is true.
+ ifneq ($(my_module_path),$(my_module_pre_o_default_path))
+ _lib_moved := true
+ endif
+else
+ # If LOCAL_MODULE_PATH is specified, we respect it.
+ ifndef LOCAL_IS_HOST_MODULE
+ ifeq ($(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
+ ifeq ($(filter $(TARGET_OUT_DATA)%,$(my_module_path)),)
+ # However, we are kind enough to warn if it seems to be wrong.
+ # Warn only for Android.mk defined shared libraries that will be installed
+ # to system or vendor partition. For other types of files - especially
+ # Soong-defined libs -, we don't warn because Soong always gives us correct
+ # paths.
+ ifeq ($(my_module_is_soong),false)
+ ifneq ($(my_module_path),$(my_module_default_path))
+ # TODO(b/35020635): s/warning/error/
+ $(call pretty-warning,$(lib_type) library must be installed to \
+$(subst $(PRODUCT_OUT)/,,$(my_module_default_path)) but requested to be installed at \
+$(subst $(PRODUCT_OUT)/,,$(my_module_path)). Please fix.)
+ endif
+ else
+ # For Soong-defined module, symlink is provided if the path has been amended
+ # ...except for vndk-ext libraries because there already is a symlink for the
+ # vndk (unmodified) version of the vndk-ext library.
+ ifneq ($(my_module_path),$(my_module_pre_o_default_path))
+ ifeq ($(filter vndk-ext,$(subst /,$(space),$(my_module_path))),)
+ _lib_moved := true
+ endif
+ endif
+ endif
+ endif
+ endif
+ endif
endif
+
endif # not LOCAL_UNINSTALLABLE_MODULE
ifneq ($(strip $(LOCAL_BUILT_MODULE)$(LOCAL_INSTALLED_MODULE)),)
@@ -384,8 +509,30 @@
# Rule to install the module's companion symlinks
my_installed_symlinks := $(addprefix $(my_module_path)/,$(LOCAL_MODULE_SYMLINKS) $(LOCAL_MODULE_SYMLINKS_$(my_32_64_bit_suffix)))
+
+# If this lib is installed to the different directory than before,
+# make a symlink from the old path to the new path.
+# This symlink is required because there are so many plances that expect the old
+# path (e.g. systemproperty rild.libpath). Until that places are all fixed,
+# we keep this symlink.
+# TODO(b/34917183): remove symlinks after everything migrations to the new paths;
+# this should be done before O launch unless it will be a security hole that
+# we can't restrict access to a certain set of libraries by using the directory
+# path.
+ifneq ($(BOARD_SYMLINK_FOR_LIBS),false)
+ifeq ($(_lib_moved),true)
+ my_installed_symlinks += $(my_module_pre_o_default_path)/$(my_installed_module_stem)
+endif
+else
+# Symlinks for ndk libs are permanent.
+ifeq ($(lib_type)$(_lib_moved),ndktrue)
+ my_installed_symlinks += $(my_module_pre_o_default_path)/$(my_installed_module_stem)
+endif
+endif
+
+# Make a symlink $(symlink) -> $(LOCAL_INSTALLED_MODULE)
$(foreach symlink,$(my_installed_symlinks),\
- $(call symlink-file,$(LOCAL_INSTALLED_MODULE),$(my_installed_module_stem),$(symlink)))
+ $(call symlink-file,$(LOCAL_INSTALLED_MODULE),$(LOCAL_INSTALLED_MODULE),$(symlink),true))
$(my_all_targets) : | $(my_installed_symlinks)
diff --git a/core/definitions.mk b/core/definitions.mk
index 1a7cc50..aae269b 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -2845,8 +2845,10 @@
# Define a rule to create a symlink to a file.
# $(1): full path to source
-# $(2): source (may be relative)
-# $(3): full path to destination
+# $(2): target of the link
+# $(3): full path of the symlink
+# $(4): (optional) when set to true, $(2) is recognized as a path from the build root and
+# thus -r option is used to link $(3) to $(2). Off by default.
define symlink-file
$(eval $(_symlink-file))
endef
@@ -2858,7 +2860,9 @@
@echo "Symlink: $$@ -> $(2)"
@mkdir -p $(dir $$@)
@rm -rf $$@
- $(hide) ln -sf $(2) $$@
+ $(if $(filter true,$(4)),\
+ $(hide) python -c "import os.path; import os; os.symlink(os.path.relpath('$(2)','$(dir $(3))'), '$$@')",\
+ $(hide) ln -sf $(2) $$@)
endef
###########################################################
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 576c8ab..ff8a51d 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -69,7 +69,9 @@
echo ''; \
echo ' "ArtUseReadBarrier": $(if $(filter false,$(PRODUCT_ART_USE_READ_BARRIER)),false,true),'; \
echo ''; \
- echo ' "BtConfigIncludeDir": "$(BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR)"'; \
+ echo ' "BtConfigIncludeDir": "$(BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR)",'; \
+ echo ''; \
+ echo ' "SameProcessHalDeps": [$(if $(BOARD_SAME_PROCESS_HAL_DEPS),"$(subst $(space),"$(comma)",$(BOARD_SAME_PROCESS_HAL_DEPS))")]'; \
echo '}') > $(SOONG_VARIABLES_TMP); \
if ! cmp -s $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); then \
mv $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); \
diff --git a/core/tasks/tools/compatibility.mk b/core/tasks/tools/compatibility.mk
index d8f900e..1455a44 100644
--- a/core/tasks/tools/compatibility.mk
+++ b/core/tasks/tools/compatibility.mk
@@ -37,13 +37,14 @@
$(compatibility_zip): PRIVATE_TOOLS := $(test_tools)
$(compatibility_zip): PRIVATE_SUITE_NAME := $(test_suite_name)
$(compatibility_zip): PRIVATE_DYNAMIC_CONFIG := $(test_suite_dynamic_config)
-$(compatibility_zip): $(test_artifacts) $(test_tools) $(test_suite_dynamic_config) | $(ADB) $(ACP)
+$(compatibility_zip): $(test_artifacts) $(test_tools) $(test_suite_dynamic_config) $(SOONG_ZIP) | $(ADB) $(ACP)
# Make dir structure
$(hide) mkdir -p $(PRIVATE_OUT_DIR)/tools $(PRIVATE_OUT_DIR)/testcases
# Copy tools
$(hide) $(ACP) -fp $(PRIVATE_TOOLS) $(PRIVATE_OUT_DIR)/tools
$(if $(PRIVATE_DYNAMIC_CONFIG),$(hide) $(ACP) -fp $(PRIVATE_DYNAMIC_CONFIG) $(PRIVATE_OUT_DIR)/testcases/$(PRIVATE_SUITE_NAME).dynamic)
- $(hide) cd $(dir $@) && zip -rq $(notdir $@) $(PRIVATE_NAME)
+ $(hide) find $(dir $@)/$(PRIVATE_NAME) | sort >$@.list
+ $(hide) $(SOONG_ZIP) -d -o $@ -C $(dir $@) -l $@.list
# Reset all input variables
test_suite_name :=
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index 15438df..cd33693 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -86,13 +86,10 @@
# SELinux packages
PRODUCT_PACKAGES += \
- file_contexts.bin \
- nonplat_file_contexts \
nonplat_mac_permissions.xml \
nonplat_property_contexts \
nonplat_seapp_contexts \
nonplat_service_contexts \
- plat_file_contexts \
plat_mac_permissions.xml \
plat_property_contexts \
plat_seapp_contexts \
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 6c842dc..d8fcc41 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -294,7 +294,7 @@
self.touched_src_sha1 = None
self.disable_imgdiff = disable_imgdiff
- assert version in (1, 2, 3, 4)
+ assert version in (3, 4)
self.tgt = tgt
if src is None:
@@ -333,14 +333,11 @@
self.FindVertexSequence()
# Fix up the ordering dependencies that the sequence didn't
# satisfy.
- if self.version == 1:
- self.RemoveBackwardEdges()
- else:
- self.ReverseBackwardEdges()
- self.ImproveVertexSequence()
+ self.ReverseBackwardEdges()
+ self.ImproveVertexSequence()
# Ensure the runtime stash size is under the limit.
- if self.version >= 2 and common.OPTIONS.cache_size is not None:
+ if common.OPTIONS.cache_size is not None:
self.ReviseStashSize()
# Double-check our work.
@@ -369,13 +366,6 @@
out = []
total = 0
- # In BBOTA v2, 'stashes' records the map from 'stash_raw_id' to 'stash_id'
- # (aka 'sid', which is the stash slot id). The stash in a 'stash_id' will
- # be freed immediately after its use. So unlike 'stash_raw_id' (which
- # uniquely identifies each pair of stashed blocks), the same 'stash_id'
- # may be reused during the life cycle of an update (maintained by
- # 'free_stash_ids' heap and 'next_stash_id').
- #
# In BBOTA v3+, it uses the hash of the stashed blocks as the stash slot
# id. 'stashes' records the map from 'hash' to the ref count. The stash
# will be freed only if the count decrements to zero.
@@ -383,36 +373,17 @@
stashed_blocks = 0
max_stashed_blocks = 0
- if self.version == 2:
- free_stash_ids = []
- next_stash_id = 0
-
for xf in self.transfers:
- if self.version < 2:
- assert not xf.stash_before
- assert not xf.use_stash
-
- for stash_raw_id, sr in xf.stash_before:
- if self.version == 2:
- assert stash_raw_id not in stashes
- if free_stash_ids:
- sid = heapq.heappop(free_stash_ids)
- else:
- sid = next_stash_id
- next_stash_id += 1
- stashes[stash_raw_id] = sid
- stashed_blocks += sr.size()
- out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
+ for _, sr in xf.stash_before:
+ sh = self.src.RangeSha1(sr)
+ if sh in stashes:
+ stashes[sh] += 1
else:
- sh = self.src.RangeSha1(sr)
- if sh in stashes:
- stashes[sh] += 1
- else:
- stashes[sh] = 1
- stashed_blocks += sr.size()
- self.touched_src_ranges = self.touched_src_ranges.union(sr)
- out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
+ stashes[sh] = 1
+ stashed_blocks += sr.size()
+ self.touched_src_ranges = self.touched_src_ranges.union(sr)
+ out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
if stashed_blocks > max_stashed_blocks:
max_stashed_blocks = stashed_blocks
@@ -420,75 +391,47 @@
free_string = []
free_size = 0
- if self.version == 1:
- src_str = xf.src_ranges.to_string_raw() if xf.src_ranges else ""
- elif self.version >= 2:
+ # <# blocks> <src ranges>
+ # OR
+ # <# blocks> <src ranges> <src locs> <stash refs...>
+ # OR
+ # <# blocks> - <stash refs...>
- # <# blocks> <src ranges>
- # OR
- # <# blocks> <src ranges> <src locs> <stash refs...>
- # OR
- # <# blocks> - <stash refs...>
+ size = xf.src_ranges.size()
+ src_str = [str(size)]
- size = xf.src_ranges.size()
- src_str = [str(size)]
+ unstashed_src_ranges = xf.src_ranges
+ mapped_stashes = []
+ for _, sr in xf.use_stash:
+ unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
+ sh = self.src.RangeSha1(sr)
+ sr = xf.src_ranges.map_within(sr)
+ mapped_stashes.append(sr)
+ assert sh in stashes
+ src_str.append("%s:%s" % (sh, sr.to_string_raw()))
+ stashes[sh] -= 1
+ if stashes[sh] == 0:
+ free_string.append("free %s\n" % (sh,))
+ free_size += sr.size()
+ stashes.pop(sh)
- unstashed_src_ranges = xf.src_ranges
- mapped_stashes = []
- for stash_raw_id, sr in xf.use_stash:
- unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
- sh = self.src.RangeSha1(sr)
- sr = xf.src_ranges.map_within(sr)
- mapped_stashes.append(sr)
- if self.version == 2:
- sid = stashes.pop(stash_raw_id)
- src_str.append("%d:%s" % (sid, sr.to_string_raw()))
- # A stash will be used only once. We need to free the stash
- # immediately after the use, instead of waiting for the automatic
- # clean-up at the end. Because otherwise it may take up extra space
- # and lead to OTA failures.
- # Bug: 23119955
- free_string.append("free %d\n" % (sid,))
- free_size += sr.size()
- heapq.heappush(free_stash_ids, sid)
- else:
- assert sh in stashes
- src_str.append("%s:%s" % (sh, sr.to_string_raw()))
- stashes[sh] -= 1
- if stashes[sh] == 0:
- free_string.append("free %s\n" % (sh,))
- free_size += sr.size()
- stashes.pop(sh)
-
- if unstashed_src_ranges:
- src_str.insert(1, unstashed_src_ranges.to_string_raw())
- if xf.use_stash:
- mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
- src_str.insert(2, mapped_unstashed.to_string_raw())
- mapped_stashes.append(mapped_unstashed)
- self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
- else:
- src_str.insert(1, "-")
+ if unstashed_src_ranges:
+ src_str.insert(1, unstashed_src_ranges.to_string_raw())
+ if xf.use_stash:
+ mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
+ src_str.insert(2, mapped_unstashed.to_string_raw())
+ mapped_stashes.append(mapped_unstashed)
self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
+ else:
+ src_str.insert(1, "-")
+ self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
- src_str = " ".join(src_str)
+ src_str = " ".join(src_str)
- # all versions:
+ # version 3+:
# zero <rangeset>
# new <rangeset>
# erase <rangeset>
- #
- # version 1:
- # bsdiff patchstart patchlen <src rangeset> <tgt rangeset>
- # imgdiff patchstart patchlen <src rangeset> <tgt rangeset>
- # move <src rangeset> <tgt rangeset>
- #
- # version 2:
- # bsdiff patchstart patchlen <tgt rangeset> <src_str>
- # imgdiff patchstart patchlen <tgt rangeset> <src_str>
- # move <tgt rangeset> <src_str>
- #
- # version 3:
# bsdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
# imgdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
# move hash <tgt rangeset> <src_str>
@@ -503,41 +446,6 @@
assert xf.tgt_ranges
assert xf.src_ranges.size() == tgt_size
if xf.src_ranges != xf.tgt_ranges:
- if self.version == 1:
- out.append("%s %s %s\n" % (
- xf.style,
- xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
- elif self.version == 2:
- out.append("%s %s %s\n" % (
- xf.style,
- xf.tgt_ranges.to_string_raw(), src_str))
- elif self.version >= 3:
- # take into account automatic stashing of overlapping blocks
- if xf.src_ranges.overlaps(xf.tgt_ranges):
- temp_stash_usage = stashed_blocks + xf.src_ranges.size()
- if temp_stash_usage > max_stashed_blocks:
- max_stashed_blocks = temp_stash_usage
-
- self.touched_src_ranges = self.touched_src_ranges.union(
- xf.src_ranges)
-
- out.append("%s %s %s %s\n" % (
- xf.style,
- xf.tgt_sha1,
- xf.tgt_ranges.to_string_raw(), src_str))
- total += tgt_size
- elif xf.style in ("bsdiff", "imgdiff"):
- assert xf.tgt_ranges
- assert xf.src_ranges
- if self.version == 1:
- out.append("%s %d %d %s %s\n" % (
- xf.style, xf.patch_start, xf.patch_len,
- xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
- elif self.version == 2:
- out.append("%s %d %d %s %s\n" % (
- xf.style, xf.patch_start, xf.patch_len,
- xf.tgt_ranges.to_string_raw(), src_str))
- elif self.version >= 3:
# take into account automatic stashing of overlapping blocks
if xf.src_ranges.overlaps(xf.tgt_ranges):
temp_stash_usage = stashed_blocks + xf.src_ranges.size()
@@ -547,12 +455,28 @@
self.touched_src_ranges = self.touched_src_ranges.union(
xf.src_ranges)
- out.append("%s %d %d %s %s %s %s\n" % (
+ out.append("%s %s %s %s\n" % (
xf.style,
- xf.patch_start, xf.patch_len,
- xf.src_sha1,
xf.tgt_sha1,
xf.tgt_ranges.to_string_raw(), src_str))
+ total += tgt_size
+ elif xf.style in ("bsdiff", "imgdiff"):
+ assert xf.tgt_ranges
+ assert xf.src_ranges
+ # take into account automatic stashing of overlapping blocks
+ if xf.src_ranges.overlaps(xf.tgt_ranges):
+ temp_stash_usage = stashed_blocks + xf.src_ranges.size()
+ if temp_stash_usage > max_stashed_blocks:
+ max_stashed_blocks = temp_stash_usage
+
+ self.touched_src_ranges = self.touched_src_ranges.union(xf.src_ranges)
+
+ out.append("%s %d %d %s %s %s %s\n" % (
+ xf.style,
+ xf.patch_start, xf.patch_len,
+ xf.src_sha1,
+ xf.tgt_sha1,
+ xf.tgt_ranges.to_string_raw(), src_str))
total += tgt_size
elif xf.style == "zero":
assert xf.tgt_ranges
@@ -566,7 +490,7 @@
out.append("".join(free_string))
stashed_blocks -= free_size
- if self.version >= 2 and common.OPTIONS.cache_size is not None:
+ if common.OPTIONS.cache_size is not None:
# Sanity check: abort if we're going to need more stash space than
# the allowed size (cache_size * threshold). There are two purposes
# of having a threshold here. a) Part of the cache may have been
@@ -581,8 +505,7 @@
self.tgt.blocksize, max_allowed, cache_size,
stash_threshold)
- if self.version >= 3:
- self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges)
+ self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges)
# Zero out extended blocks as a workaround for bug 20881595.
if self.tgt.extended:
@@ -610,32 +533,25 @@
out.insert(0, "%d\n" % (self.version,)) # format version number
out.insert(1, "%d\n" % (total,))
- if self.version == 2:
- # v2 only: after the total block count, we give the number of stash slots
- # needed, and the maximum size needed (in blocks).
- out.insert(2, str(next_stash_id) + "\n")
- out.insert(3, str(max_stashed_blocks) + "\n")
- elif self.version >= 3:
- # v3+: the number of stash slots is unused.
- out.insert(2, "0\n")
- out.insert(3, str(max_stashed_blocks) + "\n")
+ # v3+: the number of stash slots is unused.
+ out.insert(2, "0\n")
+ out.insert(3, str(max_stashed_blocks) + "\n")
with open(prefix + ".transfer.list", "wb") as f:
for i in out:
f.write(i)
- if self.version >= 2:
- self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
- OPTIONS = common.OPTIONS
- if OPTIONS.cache_size is not None:
- max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
- print("max stashed blocks: %d (%d bytes), "
- "limit: %d bytes (%.2f%%)\n" % (
- max_stashed_blocks, self._max_stashed_size, max_allowed,
- self._max_stashed_size * 100.0 / max_allowed))
- else:
- print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
- max_stashed_blocks, self._max_stashed_size))
+ self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
+ OPTIONS = common.OPTIONS
+ if OPTIONS.cache_size is not None:
+ max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
+ print("max stashed blocks: %d (%d bytes), "
+ "limit: %d bytes (%.2f%%)\n" % (
+ max_stashed_blocks, self._max_stashed_size, max_allowed,
+ self._max_stashed_size * 100.0 / max_allowed))
+ else:
+ print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
+ max_stashed_blocks, self._max_stashed_size))
def ReviseStashSize(self):
print("Revising stash size...")
@@ -663,10 +579,6 @@
stashed_blocks = 0
new_blocks = 0
- if self.version == 2:
- free_stash_ids = []
- next_stash_id = 0
-
# Now go through all the commands. Compute the required stash size on the
# fly. If a command requires excess stash than available, it deletes the
# stash by replacing the command that uses the stash with a "new" command
@@ -678,12 +590,9 @@
for stash_raw_id, sr in xf.stash_before:
# Check the post-command stashed_blocks.
stashed_blocks_after = stashed_blocks
- if self.version == 2:
+ sh = self.src.RangeSha1(sr)
+ if sh not in stashes:
stashed_blocks_after += sr.size()
- else:
- sh = self.src.RangeSha1(sr)
- if sh not in stashes:
- stashed_blocks_after += sr.size()
if stashed_blocks_after > max_allowed:
# We cannot stash this one for a later command. Find out the command
@@ -693,24 +602,15 @@
print("%10d %9s %s" % (sr.size(), "explicit", use_cmd))
else:
# Update the stashes map.
- if self.version == 2:
- assert stash_raw_id not in stashes
- if free_stash_ids:
- sid = heapq.heappop(free_stash_ids)
- else:
- sid = next_stash_id
- next_stash_id += 1
- stashes[stash_raw_id] = sid
+ if sh in stashes:
+ stashes[sh] += 1
else:
- if sh in stashes:
- stashes[sh] += 1
- else:
- stashes[sh] = 1
+ stashes[sh] = 1
stashed_blocks = stashed_blocks_after
# "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to
# ComputePatches(), they both have the style of "diff".
- if xf.style == "diff" and self.version >= 3:
+ if xf.style == "diff":
assert xf.tgt_ranges and xf.src_ranges
if xf.src_ranges.overlaps(xf.tgt_ranges):
if stashed_blocks + xf.src_ranges.size() > max_allowed:
@@ -732,18 +632,13 @@
cmd.ConvertToNew()
# xf.use_stash may generate free commands.
- for stash_raw_id, sr in xf.use_stash:
- if self.version == 2:
- sid = stashes.pop(stash_raw_id)
+ for _, sr in xf.use_stash:
+ sh = self.src.RangeSha1(sr)
+ assert sh in stashes
+ stashes[sh] -= 1
+ if stashes[sh] == 0:
stashed_blocks -= sr.size()
- heapq.heappush(free_stash_ids, sid)
- else:
- sh = self.src.RangeSha1(sr)
- assert sh in stashes
- stashes[sh] -= 1
- if stashes[sh] == 0:
- stashed_blocks -= sr.size()
- stashes.pop(sh)
+ stashes.pop(sh)
num_of_bytes = new_blocks * self.tgt.blocksize
print(" Total %d blocks (%d bytes) are packed as new blocks due to "
@@ -823,6 +718,9 @@
diff_total = len(diff_queue)
patches = [None] * diff_total
+ if sys.stdout.isatty():
+ global diff_done
+ diff_done = 0
# Using multiprocessing doesn't give additional benefits, due to the
# pattern of the code. The diffing work is done by subprocess.call, which
@@ -863,7 +761,9 @@
with lock:
patches[patch_index] = (xf_index, patch)
if sys.stdout.isatty():
- progress = len(patches) * 100 / diff_total
+ global diff_done
+ diff_done += 1
+ progress = diff_done * 100 / diff_total
# '\033[K' is to clear to EOL.
print(' [%d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
sys.stdout.flush()
@@ -911,9 +811,8 @@
# Check that the input blocks for this transfer haven't yet been touched.
x = xf.src_ranges
- if self.version >= 2:
- for _, sr in xf.use_stash:
- x = x.subtract(sr)
+ for _, sr in xf.use_stash:
+ x = x.subtract(sr)
for s, e in x:
# Source image could be larger. Don't check the blocks that are in the
@@ -1364,7 +1263,7 @@
elif tgt_fn in self.src.file_map:
# Look for an exact pathname match in the source.
AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
- "diff", self.transfers, self.version >= 3)
+ "diff", self.transfers, True)
continue
b = os.path.basename(tgt_fn)
@@ -1372,7 +1271,7 @@
# Look for an exact basename match in the source.
src_fn = self.src_basenames[b]
AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
- "diff", self.transfers, self.version >= 3)
+ "diff", self.transfers, True)
continue
b = re.sub("[0-9]+", "#", b)
@@ -1383,7 +1282,7 @@
# that get bumped.)
src_fn = self.src_numpatterns[b]
AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
- "diff", self.transfers, self.version >= 3)
+ "diff", self.transfers, True)
continue
AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 73cd07e..16c8018 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -25,7 +25,6 @@
import re
import subprocess
import sys
-import commands
import common
import shlex
import shutil
@@ -52,29 +51,24 @@
return (output, p.returncode)
def GetVerityFECSize(partition_size):
- cmd = "fec -s %d" % partition_size
- status, output = commands.getstatusoutput(cmd)
- if status:
- print output
+ cmd = ["fec", "-s", str(partition_size)]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
return False, 0
return True, int(output)
def GetVerityTreeSize(partition_size):
- cmd = "build_verity_tree -s %d"
- cmd %= partition_size
- status, output = commands.getstatusoutput(cmd)
- if status:
- print output
+ cmd = ["build_verity_tree", "-s", str(partition_size)]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
return False, 0
return True, int(output)
def GetVerityMetadataSize(partition_size):
- cmd = "system/extras/verity/build_verity_metadata.py size %d"
- cmd %= partition_size
-
- status, output = commands.getstatusoutput(cmd)
- if status:
- print output
+ cmd = ["system/extras/verity/build_verity_metadata.py", "size",
+ str(partition_size)]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
return False, 0
return True, int(output)
@@ -191,21 +185,19 @@
def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
padding_size):
- cmd = "fec -e -p %d %s %s %s" % (padding_size, sparse_image_path,
- verity_path, verity_fec_path)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
+ cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
+ verity_path, verity_fec_path]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
print "Could not build FEC data! Error: %s" % output
return False
return True
def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
- cmd = "build_verity_tree -A %s %s %s" % (
- FIXED_SALT, sparse_image_path, verity_image_path)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
+ cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
+ verity_image_path]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
print "Could not build verity tree! Error: %s" % output
return False
root, salt = output.split()
@@ -215,16 +207,13 @@
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key, signer_args):
- cmd_template = (
- "system/extras/verity/build_verity_metadata.py build " +
- "%s %s %s %s %s %s %s")
- cmd = cmd_template % (image_size, verity_metadata_path, root_hash, salt,
- block_device, signer_path, key)
+ cmd = ["system/extras/verity/build_verity_metadata.py", "build",
+ str(image_size), verity_metadata_path, root_hash, salt, block_device,
+ signer_path, key]
if signer_args:
- cmd += " --signer_args=\"%s\"" % (' '.join(signer_args),)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
+ cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
print "Could not build verity metadata! Error: %s" % output
return False
return True
@@ -238,22 +227,19 @@
Returns:
True on success, False on failure.
"""
- cmd = "append2simg %s %s"
- cmd %= (sparse_image_path, unsparse_image_path)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
+ cmd = ["append2simg", sparse_image_path, unsparse_image_path]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
print "%s: %s" % (error_message, output)
return False
return True
def Append(target, file_to_append, error_message):
- cmd = 'cat %s >> %s' % (file_to_append, target)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
- print "%s: %s" % (error_message, output)
- return False
+ print "appending %s to %s" % (file_to_append, target)
+ with open(target, "a") as out_file:
+ with open(file_to_append, "r") as input_file:
+ for line in input_file:
+ out_file.write(line)
return True
def BuildVerifiedImage(data_image_path, verity_image_path,
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 3224333..e200f9f 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -1340,6 +1340,7 @@
version = max(
int(i) for i in
OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+ assert version >= 3
self.version = version
b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
@@ -1404,7 +1405,7 @@
# incremental OTA
else:
- if touched_blocks_only and self.version >= 3:
+ if touched_blocks_only:
ranges = self.touched_src_ranges
expected_sha1 = self.touched_src_sha1
else:
@@ -1416,16 +1417,12 @@
return
ranges_str = ranges.to_string_raw()
- if self.version >= 3:
- script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
- 'block_image_verify("%s", '
- 'package_extract_file("%s.transfer.list"), '
- '"%s.new.dat", "%s.patch.dat")) then') % (
- self.device, ranges_str, expected_sha1,
- self.device, partition, partition, partition))
- else:
- script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
- self.device, ranges_str, self.src.TotalSha1()))
+ script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
+ 'block_image_verify("%s", '
+ 'package_extract_file("%s.transfer.list"), '
+ '"%s.new.dat", "%s.patch.dat")) then') % (
+ self.device, ranges_str, expected_sha1,
+ self.device, partition, partition, partition))
script.Print('Verified %s image...' % (partition,))
script.AppendExtra('else')