Merge "Fix modules.dep breakage with multiple module dependencies"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 9d3fc23..acd5273 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -400,6 +400,16 @@
$(call add-clean-step, rm -rf $(TARGET_OUT_ETC)/init)
+# Libraries are moved from {system|vendor}/lib to ./lib/framework, ./lib/vndk, etc.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/vendor/lib*)
+
+# Revert that move
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/vendor/lib*)
+
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
# ************************************************
diff --git a/buildspec.mk.default b/buildspec.mk.default
index a7ac7ec..b31578a 100644
--- a/buildspec.mk.default
+++ b/buildspec.mk.default
@@ -36,6 +36,12 @@
#TARGET_BUILD_VARIANT:=eng
endif
+# Choose a targeted release. If you don't pick one, the default is the
+# soonest future release.
+ifndef TARGET_PLATFORM_RELEASE
+#TARGET_PLATFORM_RELEASE:=OPR1
+endif
+
# Choose additional targets to always install, even when building
# minimal targets like "make droid". This takes simple target names
# like "Browser" or "MyApp", the names used by LOCAL_MODULE or
@@ -105,4 +111,4 @@
# variable will be changed. After you have modified this file with the new
# changes (see buildspec.mk.default), update this to the new value from
# buildspec.mk.default.
-BUILD_ENV_SEQUENCE_NUMBER := 12
+BUILD_ENV_SEQUENCE_NUMBER := 13
diff --git a/core/Makefile b/core/Makefile
index 7c394bc..7de340f 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -835,7 +835,7 @@
endif
endif
-SELINUX_FC := $(TARGET_ROOT_OUT)/file_contexts.bin
+SELINUX_FC := $(call intermediates-dir-for,ETC,file_contexts.bin)/file_contexts.bin
INTERNAL_USERIMAGES_DEPS += $(SELINUX_FC)
INTERNAL_USERIMAGES_DEPS += $(BLK_ALLOC_TO_BASE_FS)
@@ -910,7 +910,15 @@
$(ALL_DEFAULT_INSTALLED_MODULES))
recovery_initrc := $(call include-path-for, recovery)/etc/init.rc
-recovery_sepolicy := $(call intermediates-dir-for,ETC,sepolicy.recovery)/sepolicy.recovery
+recovery_sepolicy := \
+ $(TARGET_RECOVERY_ROOT_OUT)/sepolicy \
+ $(TARGET_RECOVERY_ROOT_OUT)/file_contexts.bin \
+ $(TARGET_RECOVERY_ROOT_OUT)/plat_property_contexts \
+ $(TARGET_RECOVERY_ROOT_OUT)/nonplat_property_contexts
+# Passed into rsync from non-recovery root to recovery root, to avoid overwriting recovery-specific
+# SELinux files
+IGNORE_RECOVERY_SEPOLICY := $(patsubst $(TARGET_RECOVERY_OUT)/%,--exclude=/%,$(recovery_sepolicy))
+
recovery_kernel := $(INSTALLED_KERNEL_TARGET) # same as a non-recovery system
recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img
recovery_build_prop := $(intermediate_system_build_prop)
@@ -1045,14 +1053,13 @@
$(hide) mkdir -p $(TARGET_RECOVERY_OUT)
$(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc $(TARGET_RECOVERY_ROOT_OUT)/sdcard $(TARGET_RECOVERY_ROOT_OUT)/tmp
@echo Copying baseline ramdisk...
- $(hide) rsync -a --exclude=etc --exclude=sdcard $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT) # "cp -Rf" fails to overwrite broken symlinks on Mac.
+ # Use rsync because "cp -Rf" fails to overwrite broken symlinks on Mac.
+ $(hide) rsync -a --exclude=etc --exclude=sdcard $(IGNORE_RECOVERY_SEPOLICY) $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT)
@echo Modifying ramdisk contents...
$(if $(BOARD_RECOVERY_KERNEL_MODULES), \
$(call build-image-kernel-modules,$(BOARD_RECOVERY_KERNEL_MODULES),$(TARGET_RECOVERY_ROOT_OUT),,$(call intermediates-dir-for,PACKAGING,depmod_recovery)))
$(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/init*.rc
$(hide) cp -f $(recovery_initrc) $(TARGET_RECOVERY_ROOT_OUT)/
- $(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
- $(hide) cp -f $(recovery_sepolicy) $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
$(hide) cp $(TARGET_ROOT_OUT)/init.recovery.*.rc $(TARGET_RECOVERY_ROOT_OUT)/ || true # Ignore error when the src file doesn't exist.
$(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/res
$(hide) rm -rf $(TARGET_RECOVERY_ROOT_OUT)/res/*
@@ -1348,7 +1355,7 @@
$(hide) echo "PDK.DEXPREOPT.$(m).MULTILIB:=$(DEXPREOPT.$(m).MULTILIB)" >> $@$(newline)\
$(hide) echo "PDK.DEXPREOPT.$(m).DEX_PREOPT_FLAGS:=$(DEXPREOPT.$(m).DEX_PREOPT_FLAGS)" >> $@$(newline)\
$(hide) echo "PDK.DEXPREOPT.$(m).PRIVILEGED_MODULE:=$(DEXPREOPT.$(m).PRIVILEGED_MODULE)" >> $@$(newline)\
- $(hide) echo "PDK.DEXPREOPT.$(m).PROPRIETARY_MODULE:=$(DEXPREOPT.$(m).PROPRIETARY_MODULE)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).VENDOR_MODULE:=$(DEXPREOPT.$(m).VENDOR_MODULE)" >> $@$(newline)\
$(hide) echo "PDK.DEXPREOPT.$(m).TARGET_ARCH:=$(DEXPREOPT.$(m).TARGET_ARCH)" >> $@$(newline)\
$(hide) echo "PDK.DEXPREOPT.$(m).STRIPPED_SRC:=$(patsubst $(PRODUCT_OUT)/%,%,$(DEXPREOPT.$(m).INSTALLED_STRIPPED))" >> $@$(newline)\
)
@@ -1631,7 +1638,8 @@
@echo Installed file list: $@
@mkdir -p $(dir $@)
@rm -f $@
- $(hide) build/tools/fileslist.py $(TARGET_OUT_SYSTEM_OTHER) > $@
+ $(hide) build/tools/fileslist.py $(TARGET_OUT_SYSTEM_OTHER) > $(@:.txt=.json)
+ $(hide) build/tools/fileslist_util.py -c $(@:.txt=.json) > $@
systemotherimage_intermediates := \
$(call intermediates-dir-for,PACKAGING,system_other)
@@ -1701,8 +1709,8 @@
$(INSTALLED_VENDORIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_VENDORIMAGE_FILES) $(INSTALLED_FILES_FILE_VENDOR) $(BUILD_IMAGE_SRCS) $(DEPMOD) $(BOARD_VENDOR_KERNEL_MODULES)
$(build-vendorimage-target)
-.PHONY: vendorimage-nodeps
-vendorimage-nodeps: | $(INTERNAL_USERIMAGES_DEPS) $(DEPMOD)
+.PHONY: vendorimage-nodeps vnod
+vendorimage-nodeps vnod: | $(INTERNAL_USERIMAGES_DEPS) $(DEPMOD)
$(build-vendorimage-target)
else ifdef BOARD_PREBUILT_VENDORIMAGE
@@ -1884,14 +1892,18 @@
ifeq ($(TARGET_RELEASETOOLS_EXTENSIONS),)
# default to common dir for device vendor
-$(BUILT_TARGET_FILES_PACKAGE): tool_extensions := $(TARGET_DEVICE_DIR)/../common
+tool_extensions := $(TARGET_DEVICE_DIR)/../common
else
-$(BUILT_TARGET_FILES_PACKAGE): tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
+tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
endif
+tool_extension := $(wildcard $(tool_extensions)/releasetools.py)
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_TOOL_EXTENSIONS := $(tool_extensions)
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_TOOL_EXTENSION := $(tool_extension)
ifeq ($(AB_OTA_UPDATER),true)
# Build zlib fingerprint if using the AB Updater.
updater_dep := $(TARGET_OUT_COMMON_GEN)/zlib_fingerprint
+updater_dep += system/update_engine/update_engine.conf
else
# Build OTA tools if not using the AB Updater.
updater_dep := $(built_ota_tools)
@@ -1905,25 +1917,51 @@
$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_OUT := RECOVERY
endif
+ifeq ($(AB_OTA_UPDATER),true)
+ ifdef BRILLO_VENDOR_PARTITIONS
+ $(BUILT_TARGET_FILES_PACKAGE): $(foreach p,$(BRILLO_VENDOR_PARTITIONS),\
+ $(call word-colon,1,$(p))/$(call word-colon,2,$(p)))
+ endif
+ ifdef OSRELEASED_DIRECTORY
+ $(BUILT_TARGET_FILES_PACKAGE): $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_id
+ $(BUILT_TARGET_FILES_PACKAGE): $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_version
+ $(BUILT_TARGET_FILES_PACKAGE): $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/system_version
+ endif
+endif
+
+# Run fs_config while creating the target files package
+# $1: root directory
+# $2: add prefix
+define fs_config
+(cd $(1); find . -type d | sed 's,$$,/,'; find . \! -type d) | cut -c 3- | sort | sed 's,^,$(2),' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC)
+endef
+
# Depending on the various images guarantees that the underlying
# directories are up-to-date.
$(BUILT_TARGET_FILES_PACKAGE): \
$(INSTALLED_BOOTIMAGE_TARGET) \
$(INSTALLED_RADIOIMAGE_TARGET) \
$(INSTALLED_RECOVERYIMAGE_TARGET) \
- $(INSTALLED_SYSTEMIMAGE) \
+ $(FULL_SYSTEMIMAGE_DEPS) \
$(INSTALLED_USERDATAIMAGE_TARGET) \
$(INSTALLED_CACHEIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
- $(INSTALLED_SYSTEMOTHERIMAGE_TARGET) \
+ $(INTERNAL_SYSTEMOTHERIMAGE_FILES) \
$(INSTALLED_ANDROID_INFO_TXT_TARGET) \
+ $(INSTALLED_KERNEL_TARGET) \
+ $(INSTALLED_2NDBOOTLOADER_TARGET) \
+ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH) \
+ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH) \
$(SELINUX_FC) \
$(APKCERTS_FILE) \
$(SOONG_ZIP) \
$(HOST_OUT_EXECUTABLES)/fs_config \
- build/tools/releasetools/add_img_to_target_files \
+ $(HOST_OUT_EXECUTABLES)/imgdiff \
+ $(HOST_OUT_EXECUTABLES)/bsdiff \
+ $(BUILD_IMAGE_SRCS) \
| $(ACP)
@echo "Package target files: $@"
+ $(call create-system-vendor-symlink)
$(hide) rm -rf $@ $@.list $(zip_root)
$(hide) mkdir -p $(dir $@) $(zip_root)
ifneq (,$(INSTALLED_RECOVERYIMAGE_TARGET)$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
@@ -1932,11 +1970,10 @@
$(hide) $(call package_files-copy-root, \
$(TARGET_RECOVERY_ROOT_OUT),$(zip_root)/$(PRIVATE_RECOVERY_OUT)/RAMDISK)
ifdef INSTALLED_KERNEL_TARGET
- $(hide) $(ACP) $(INSTALLED_KERNEL_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/kernel
+ $(hide) cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/kernel
endif
ifdef INSTALLED_2NDBOOTLOADER_TARGET
- $(hide) $(ACP) \
- $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/second
+ $(hide) cp $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/second
endif
ifdef INTERNAL_KERNEL_CMDLINE
$(hide) echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/$(PRIVATE_RECOVERY_OUT)/cmdline
@@ -1961,11 +1998,10 @@
@# If we are using recovery as boot, this is already done when processing recovery.
ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
ifdef INSTALLED_KERNEL_TARGET
- $(hide) $(ACP) $(INSTALLED_KERNEL_TARGET) $(zip_root)/BOOT/kernel
+ $(hide) cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/BOOT/kernel
endif
ifdef INSTALLED_2NDBOOTLOADER_TARGET
- $(hide) $(ACP) \
- $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/BOOT/second
+ $(hide) cp $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/BOOT/second
endif
ifdef INTERNAL_KERNEL_CMDLINE
$(hide) echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/BOOT/cmdline
@@ -1979,7 +2015,7 @@
endif # BOARD_USES_RECOVERY_AS_BOOT
$(hide) $(foreach t,$(INSTALLED_RADIOIMAGE_TARGET),\
mkdir -p $(zip_root)/RADIO; \
- $(ACP) $(t) $(zip_root)/RADIO/$(notdir $(t));)
+ cp $(t) $(zip_root)/RADIO/$(notdir $(t));)
@# Contents of the system image
$(hide) $(call package_files-copy-root, \
$(SYSTEMIMAGE_SOURCE_DIR),$(zip_root)/SYSTEM)
@@ -1998,20 +2034,22 @@
endif
@# Extra contents of the OTA package
$(hide) mkdir -p $(zip_root)/OTA
- $(hide) $(ACP) $(INSTALLED_ANDROID_INFO_TXT_TARGET) $(zip_root)/OTA/
+ $(hide) cp $(INSTALLED_ANDROID_INFO_TXT_TARGET) $(zip_root)/OTA/
ifneq ($(AB_OTA_UPDATER),true)
ifneq ($(built_ota_tools),)
$(hide) mkdir -p $(zip_root)/OTA/bin
- $(hide) $(ACP) $(PRIVATE_OTA_TOOLS) $(zip_root)/OTA/bin/
+ $(hide) cp $(PRIVATE_OTA_TOOLS) $(zip_root)/OTA/bin/
endif
endif
@# Files that do not end up in any images, but are necessary to
@# build them.
$(hide) mkdir -p $(zip_root)/META
- $(hide) $(ACP) $(APKCERTS_FILE) $(zip_root)/META/apkcerts.txt
- $(hide) if test -e $(tool_extensions)/releasetools.py; then $(ACP) $(tool_extensions)/releasetools.py $(zip_root)/META/; fi
+ $(hide) cp $(APKCERTS_FILE) $(zip_root)/META/apkcerts.txt
+ifneq ($(tool_extension),)
+ $(hide) cp $(PRIVATE_TOOL_EXTENSION) $(zip_root)/META/
+endif
$(hide) echo "$(PRODUCT_OTA_PUBLIC_KEYS)" > $(zip_root)/META/otakeys.txt
- $(hide) $(ACP) $(SELINUX_FC) $(zip_root)/META/file_contexts.bin
+ $(hide) cp $(SELINUX_FC) $(zip_root)/META/file_contexts.bin
$(hide) echo "recovery_api_version=$(PRIVATE_RECOVERY_API_VERSION)" > $(zip_root)/META/misc_info.txt
$(hide) echo "fstab_version=$(PRIVATE_RECOVERY_FSTAB_VERSION)" >> $(zip_root)/META/misc_info.txt
ifdef BOARD_FLASH_BLOCK_SIZE
@@ -2038,27 +2076,25 @@
else
$(hide) echo "recovery_mount_options=$(DEFAULT_TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $(zip_root)/META/misc_info.txt
endif
- $(hide) echo "tool_extensions=$(tool_extensions)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "tool_extensions=$(PRIVATE_TOOL_EXTENSIONS)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "default_system_dev_certificate=$(DEFAULT_SYSTEM_DEV_CERTIFICATE)" >> $(zip_root)/META/misc_info.txt
ifdef PRODUCT_EXTRA_RECOVERY_KEYS
$(hide) echo "extra_recovery_keys=$(PRODUCT_EXTRA_RECOVERY_KEYS)" >> $(zip_root)/META/misc_info.txt
endif
$(hide) echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $(zip_root)/META/misc_info.txt
$(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $(zip_root)/META/misc_info.txt
- $(hide) echo "use_set_metadata=1" >> $(zip_root)/META/misc_info.txt
$(hide) echo "multistage_support=1" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "update_rename_support=1" >> $(zip_root)/META/misc_info.txt
- $(hide) echo "blockimgdiff_versions=1,2,3,4" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "blockimgdiff_versions=3,4" >> $(zip_root)/META/misc_info.txt
ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
# OTA scripts are only interested in fingerprint related properties
$(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt
endif
ifneq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH),)
- $(hide) $(ACP) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH) \
+ $(hide) cp $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH) \
$(zip_root)/META/$(notdir $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH))
endif
ifneq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH),)
- $(hide) $(ACP) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH) \
+ $(hide) cp $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH) \
$(zip_root)/META/$(notdir $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH))
endif
ifneq ($(strip $(SANITIZE_TARGET)),)
@@ -2092,8 +2128,8 @@
endif
ifeq ($(AB_OTA_UPDATER),true)
@# When using the A/B updater, include the updater config files in the zip.
- $(hide) $(ACP) $(TOPDIR)system/update_engine/update_engine.conf $(zip_root)/META/update_engine_config.txt
- $(hide) $(ACP) $(TARGET_OUT_COMMON_GEN)/zlib_fingerprint $(zip_root)/META/zlib_fingerprint.txt
+ $(hide) cp $(TOPDIR)system/update_engine/update_engine.conf $(zip_root)/META/update_engine_config.txt
+ $(hide) cp $(TARGET_OUT_COMMON_GEN)/zlib_fingerprint $(zip_root)/META/zlib_fingerprint.txt
$(hide) for part in $(AB_OTA_PARTITIONS); do \
echo "$${part}" >> $(zip_root)/META/ab_partitions.txt; \
done
@@ -2111,12 +2147,13 @@
src=$${pair1}/$${pair2}; \
dest=$(zip_root)/VENDOR_IMAGES/$${pair2}; \
mkdir -p $$(dirname "$${dest}"); \
- $(ACP) $${src} $${dest}; \
+ cp $${src} $${dest}; \
done;
endif
ifdef OSRELEASED_DIRECTORY
- $(hide) $(ACP) $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_id $(zip_root)/META/product_id.txt
- $(hide) $(ACP) $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_version $(zip_root)/META/product_version.txt
+ $(hide) cp $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_id $(zip_root)/META/product_id.txt
+ $(hide) cp $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_version $(zip_root)/META/product_version.txt
+ $(hide) cp $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/system_version $(zip_root)/META/system_version.txt
endif
endif
ifeq ($(BREAKPAD_GENERATE_SYMBOLS),true)
@@ -2127,28 +2164,29 @@
$(hide) mkdir -p $(zip_root)/IMAGES
$(hide) cp $(INSTALLED_VENDORIMAGE_TARGET) $(zip_root)/IMAGES/
endif
+ @# Run fs_config on all the system, vendor, boot ramdisk,
+ @# and recovery ramdisk files in the zip, and save the output
+ $(hide) $(call fs_config,$(zip_root)/SYSTEM,system/) > $(zip_root)/META/filesystem_config.txt
+ifdef BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE
+ $(hide) $(call fs_config,$(zip_root)/VENDOR,vendor/) > $(zip_root)/META/vendor_filesystem_config.txt
+endif
+ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+ $(hide) $(call fs_config,$(zip_root)/ROOT,) > $(zip_root)/META/root_filesystem_config.txt
+endif
+ $(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
+ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
+ $(hide) $(call fs_config,$(zip_root)/RECOVERY/RAMDISK,) > $(zip_root)/META/recovery_filesystem_config.txt
+endif
+ifdef INSTALLED_SYSTEMOTHERIMAGE_TARGET
+ $(hide) $(call fs_config,$(zip_root)/SYSTEM_OTHER,system/) > $(zip_root)/META/system_other_filesystem_config.txt
+endif
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
+ ./build/tools/releasetools/add_img_to_target_files -a -v -p $(HOST_OUT) $(zip_root)
@# Zip everything up, preserving symlinks and placing META/ files first to
@# help early validation of the .zip file while uploading it.
$(hide) find $(zip_root)/META | sort >$@.list
- $(hide) find $(zip_root) | grep -v "^$(zip_root)/META/" | sort >>$@.list
+ $(hide) find $(zip_root) -path $(zip_root)/META -prune -o -print | sort >>$@.list
$(hide) $(SOONG_ZIP) -d -o $@ -C $(zip_root) -l $@.list
- @# Run fs_config on all the system, vendor, boot ramdisk,
- @# and recovery ramdisk files in the zip, and save the output
- $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="SYSTEM/" } /^SYSTEM\// {print "system/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/filesystem_config.txt
- $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="VENDOR/" } /^VENDOR\// {print "vendor/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/vendor_filesystem_config.txt
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
- $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="ROOT/" } /^ROOT\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/root_filesystem_config.txt
-endif
- $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="BOOT/RAMDISK/" } /^BOOT\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/boot_filesystem_config.txt
-ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
- $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="RECOVERY/RAMDISK/" } /^RECOVERY\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/recovery_filesystem_config.txt
-endif
-ifdef INSTALLED_SYSTEMOTHERIMAGE_TARGET
- $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="SYSTEM_OTHER/" } /^SYSTEM_OTHER\// { print "system/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/system_other_filesystem_config.txt
-endif
- $(hide) (cd $(zip_root) && zip -qX ../$(notdir $@) META/*filesystem_config.txt)
- $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
- ./build/tools/releasetools/add_img_to_target_files -a -v -p $(HOST_OUT) $@
.PHONY: target-files-package
target-files-package: $(BUILT_TARGET_FILES_PACKAGE)
@@ -2186,6 +2224,7 @@
$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
./build/tools/releasetools/ota_from_target_files -v \
--block \
+ --extracted_input_target_files $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE)) \
-p $(HOST_OUT) \
-k $(KEY_CERT_PAIR) \
$(if $(OEM_OTA_CONFIG), -o $(OEM_OTA_CONFIG)) \
diff --git a/core/base_rules.mk b/core/base_rules.mk
index c65d3ce..92e69bb 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -65,6 +65,16 @@
my_host_cross :=
endif
+ifndef LOCAL_PROPRIETARY_MODULE
+ LOCAL_PROPRIETARY_MODULE := $(LOCAL_VENDOR_MODULE)
+endif
+ifndef LOCAL_VENDOR_MODULE
+ LOCAL_VENDOR_MODULE := $(LOCAL_PROPRIETARY_MODULE)
+endif
+ifneq ($(filter-out $(LOCAL_PROPRIETARY_MODULE),$(LOCAL_VENDOR_MODULE))$(filter-out $(LOCAL_VENDOR_MODULE),$(LOCAL_PROPRIETARY_MODULE)),)
+$(call pretty-error,Only one of LOCAL_PROPRIETARY_MODULE[$(LOCAL_PROPRIETARY_MODULE)] and LOCAL_VENDOR_MODULE[$(LOCAL_VENDOR_MODULE)] may be set, or they must be equal)
+endif
+
include $(BUILD_SYSTEM)/local_vndk.mk
my_module_tags := $(LOCAL_MODULE_TAGS)
@@ -166,7 +176,7 @@
ifdef LOCAL_IS_HOST_MODULE
partition_tag :=
else
-ifeq (true,$(LOCAL_PROPRIETARY_MODULE))
+ifeq (true,$(LOCAL_VENDOR_MODULE))
partition_tag := _VENDOR
else ifeq (true,$(LOCAL_OEM_MODULE))
partition_tag := _OEM
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index f30c4ba..063fe19 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -226,6 +226,7 @@
LOCAL_UNSTRIPPED_PATH:=
LOCAL_USE_AAPT2:=$(USE_AAPT2)
LOCAL_USE_VNDK:=
+LOCAL_VENDOR_MODULE:=
LOCAL_VTSC_FLAGS:=
LOCAL_VTS_INCLUDES:=
LOCAL_WARNINGS_ENABLE:=
diff --git a/core/config.mk b/core/config.mk
index 7709d3c..319a069 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -817,6 +817,7 @@
art/% \
bionic/% \
external/fio/% \
+ hardware/interfaces/% \
define find_warning_disallowed_projects
$(filter $(ANDROID_WARNING_DISALLOWED_PROJECTS),$(1)/)
@@ -851,7 +852,7 @@
userdataimage-nodeps userdatatarball-nodeps \
cacheimage-nodeps \
bptimage-nodeps \
- vendorimage-nodeps \
+ vnod vendorimage-nodeps \
systemotherimage-nodeps \
ramdisk-nodeps \
bootimage-nodeps \
diff --git a/core/definitions.mk b/core/definitions.mk
index 1a7cc50..3da48b5 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -2513,6 +2513,13 @@
$(filter-out -classpath -bootclasspath "",$(subst :,$(space),$(1)))
endef
+# Takes an sdk version that might be PLATFORM_VERSION_CODENAME (for example P),
+# returns a number greater than the highest existing sdk version if it is, or
+# the input if it is not.
+define codename-or-sdk-to-sdk
+$(if $(filter $(1),$(PLATFORM_VERSION_CODENAME)),10000,$(1))
+endef
+
define desugar-classes-jar
@echo Desugar: $@
@mkdir -p $(dir $@)
@@ -2520,7 +2527,7 @@
$(hide) java -jar $(DESUGAR) \
$(addprefix --bootclasspath_entry ,$(call desugar-bootclasspath,$(PRIVATE_BOOTCLASSPATH))) \
$(addprefix --classpath_entry ,$(PRIVATE_ALL_JAVA_LIBRARIES)) \
- --min_sdk_version $(PRIVATE_SDK_VERSION) \
+ --min_sdk_version $(call codename-or-sdk-to-sdk,$(PRIVATE_DEFAULT_APP_TARGET_SDK)) \
--allow_empty_bootclasspath \
$(if $(filter --core-library,$(PRIVATE_DX_FLAGS)),--core_library) \
-i $< -o $@.tmp
@@ -2537,7 +2544,7 @@
$(hide) $(DX) \
-JXms16M -JXmx2048M \
--dex --output=$(dir $@) \
- --min-sdk-version=$(PRIVATE_SDK_VERSION) \
+ --min-sdk-version=$(call codename-or-sdk-to-sdk,$(PRIVATE_DEFAULT_APP_TARGET_SDK)) \
$(if $(NO_OPTIMIZE_DX), \
--no-optimize) \
$(if $(GENERATE_DEX_DEBUG), \
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 269a61f..68c46f5 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -128,17 +128,17 @@
my_dex_location := $(patsubst $(PRODUCT_OUT)%,%,$(LOCAL_INSTALLED_MODULE))
$(built_odex): $(my_built_profile)
$(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS := --profile-file=$(my_built_profile)
-$(my_built_profile): PRIVATE_INSTALLED_MODULE := $(LOCAL_INSTALLED_MODULE)
+$(my_built_profile): PRIVATE_BUILT_MODULE := $(LOCAL_BUILT_MODULE)
$(my_built_profile): PRIVATE_DEX_LOCATION := $(my_dex_location)
$(my_built_profile): PRIVATE_SOURCE_CLASSES := $(LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING)
$(my_built_profile): $(LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING)
$(my_built_profile): $(PROFMAN)
-$(my_built_profile): $(PRIVATE_INSTALLED_MODULE)
+$(my_built_profile): $(LOCAL_BUILT_MODULE)
$(my_built_profile):
$(hide) mkdir -p $(dir $@)
ANDROID_LOG_TAGS="*:e" $(PROFMAN) \
--create-profile-from=$(PRIVATE_SOURCE_CLASSES) \
- --apk=$(PRIVATE_INSTALLED_MODULE) \
+ --apk=$(PRIVATE_BUILT_MODULE) \
--dex-location=$(PRIVATE_DEX_LOCATION) \
--reference-profile-file=$@
else
@@ -169,7 +169,7 @@
DEXPREOPT.$(LOCAL_MODULE).MULTILIB := $(LOCAL_MULTILIB)
DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
DEXPREOPT.$(LOCAL_MODULE).PRIVILEGED_MODULE := $(LOCAL_PRIVILEGED_MODULE)
-DEXPREOPT.$(LOCAL_MODULE).PROPRIETARY_MODULE := $(LOCAL_PROPRIETARY_MODULE)
+DEXPREOPT.$(LOCAL_MODULE).VENDOR_MODULE := $(LOCAL_VENDOR_MODULE)
DEXPREOPT.$(LOCAL_MODULE).TARGET_ARCH := $(LOCAL_MODULE_TARGET_ARCH)
DEXPREOPT.$(LOCAL_MODULE).INSTALLED := $(installed_odex)
DEXPREOPT.$(LOCAL_MODULE).INSTALLED_STRIPPED := $(LOCAL_INSTALLED_MODULE)
diff --git a/core/dumpvar.mk b/core/dumpvar.mk
index 74ea3ff..acae48e 100644
--- a/core/dumpvar.mk
+++ b/core/dumpvar.mk
@@ -6,6 +6,7 @@
TARGET_PRODUCT \
TARGET_BUILD_VARIANT \
TARGET_BUILD_TYPE \
+ TARGET_PLATFORM_VERSION \
TARGET_BUILD_APPS \
TARGET_ARCH \
TARGET_ARCH_VARIANT \
diff --git a/core/envsetup.mk b/core/envsetup.mk
index d17b7c9..199fe9b 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -20,7 +20,7 @@
# people who haven't re-run those will have to do so before they
# can build. Make sure to also update the corresponding value in
# buildspec.mk.default and envsetup.sh.
-CORRECT_BUILD_ENV_SEQUENCE_NUMBER := 12
+CORRECT_BUILD_ENV_SEQUENCE_NUMBER := 13
# ---------------------------------------------------------------
# The product defaults to generic on hardware
@@ -422,6 +422,7 @@
else
TARGET_OUT_VENDOR_SHARED_LIBRARIES := $(target_out_vendor_shared_libraries_base)/lib
endif
+TARGET_OUT_VENDOR_RENDERSCRIPT_BITCODE := $(TARGET_OUT_VENDOR_SHARED_LIBRARIES)
TARGET_OUT_VENDOR_JAVA_LIBRARIES := $(TARGET_OUT_VENDOR)/framework
TARGET_OUT_VENDOR_APPS := $(TARGET_OUT_VENDOR)/app
TARGET_OUT_VENDOR_APPS_PRIVILEGED := $(TARGET_OUT_VENDOR)/priv-app
@@ -433,6 +434,7 @@
else
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES := $(target_out_vendor_shared_libraries_base)/lib
endif
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_RENDERSCRIPT_BITCODE := $($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES)
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_APPS := $(TARGET_OUT_VENDOR_APPS)
$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_APPS_PRIVILEGED := $(TARGET_OUT_VENDOR_APPS_PRIVILEGED)
diff --git a/core/help.mk b/core/help.mk
index 6e0b2c0..c034e79 100644
--- a/core/help.mk
+++ b/core/help.mk
@@ -22,6 +22,7 @@
@echo "droid Default target"
@echo "clean (aka clobber) equivalent to rm -rf out/"
@echo "snod Quickly rebuild the system image from built packages"
+ @echo "vnod Quickly rebuild the vendor image from built packages"
@echo "offline-sdk-docs Generate the HTML for the developer SDK docs"
@echo "doc-comment-check-docs Check HTML doc links & validity, without generating HTML"
@echo "libandroid_runtime All the JNI framework stuff"
diff --git a/core/java.mk b/core/java.mk
index 1835983..c4dd84b 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -455,7 +455,7 @@
my_desugaring :=
ifndef LOCAL_JACK_ENABLED
-ifeq ($(LOCAL_JAVA_LANGUAGE_VERSION),1.8)
+ifndef LOCAL_IS_STATIC_JAVA_LIBRARY
my_desugaring := true
$(full_classes_desugar_jar): PRIVATE_DX_FLAGS := $(LOCAL_DX_FLAGS)
$(full_classes_desugar_jar): $(full_classes_compiled_jar) $(DESUGAR)
diff --git a/core/local_vndk.mk b/core/local_vndk.mk
index f7970f0..5ac5f26 100644
--- a/core/local_vndk.mk
+++ b/core/local_vndk.mk
@@ -3,7 +3,7 @@
#If LOCAL_SDK_VERSION is set, thats a more restrictive set, so they dont need LOCAL_USE_VNDK
ifndef LOCAL_IS_HOST_MODULE
ifndef LOCAL_SDK_VERSION
- ifneq (,$(filter true,$(LOCAL_PROPRIETARY_MODULE) $(LOCAL_ODM_MODULE) $(LOCAL_OEM_MODULE)))
+ ifneq (,$(filter true,$(LOCAL_VENDOR_MODULE) $(LOCAL_ODM_MODULE) $(LOCAL_OEM_MODULE)))
LOCAL_USE_VNDK:=true
else
ifneq (,$(filter $(TARGET_OUT_VENDOR)%,$(LOCAL_MODULE_PATH) $(LOCAL_MODULE_PATH_32) $(LOCAL_MODULE_PATH_64)))
diff --git a/core/main.mk b/core/main.mk
index 7433f90..41d903b 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -10,9 +10,6 @@
endif
ifndef KATI
-USE_SOONG_UI ?= true
-endif
-ifeq ($(USE_SOONG_UI),true)
host_prebuilts := linux-x86
ifeq ($(shell uname),Darwin)
@@ -27,7 +24,7 @@
$(sort $(MAKECMDGOALS)) : run_soong_ui
@#empty
-else # USE_SOONG_UI
+else # KATI
# Absolute path of the present working direcotry.
# This overrides the shell variable $PWD, which does not necessarily points to
@@ -47,23 +44,10 @@
.PHONY: droid_targets
droid_targets:
-# Targets that provide quick help on the build system.
-include $(BUILD_SYSTEM)/help.mk
-
# Set up various standard variables based on configuration
# and host information.
include $(BUILD_SYSTEM)/config.mk
-ifndef KATI
-ifdef USE_NINJA
-$(warning USE_NINJA is ignored. Ninja is always used.)
-endif
-
-# Mark this is a ninja build.
-$(shell mkdir -p $(OUT_DIR) && touch $(OUT_DIR)/ninja_build)
-include build/core/ninja.mk
-else # KATI
-
ifneq ($(filter $(dont_bother_goals), $(MAKECMDGOALS)),)
dont_bother := true
endif
@@ -1149,4 +1133,3 @@
all_link_types:
endif # KATI
-endif # USE_SOONG_UI
diff --git a/core/ninja.mk b/core/ninja.mk
deleted file mode 100644
index af2ede0..0000000
--- a/core/ninja.mk
+++ /dev/null
@@ -1,103 +0,0 @@
-include $(BUILD_SYSTEM)/soong.mk
-
-# Modifier goals we don't need to pass to Ninja.
-.PHONY : $(NINJA_EXCLUDE_GOALS)
-
-define replace_space_and_slash
-$(subst /,_,$(subst $(space),_,$(sort $1)))
-endef
-
-KATI_NINJA_SUFFIX := -$(TARGET_PRODUCT)
-ifneq ($(KATI_GOALS),)
-KATI_NINJA_SUFFIX := $(KATI_NINJA_SUFFIX)-$(call replace_space_and_slash,$(KATI_GOALS))
-endif
-ifneq ($(ONE_SHOT_MAKEFILE),)
-KATI_NINJA_SUFFIX := $(KATI_NINJA_SUFFIX)-mmm-$(call replace_space_and_slash,$(ONE_SHOT_MAKEFILE))
-endif
-
-my_checksum_suffix :=
-my_ninja_suffix_too_long := $(filter 1, $(shell v='$(KATI_NINJA_SUFFIX)' && echo $$(($${$(pound)v} > 64))))
-ifneq ($(my_ninja_suffix_too_long),)
-# Replace the suffix with a checksum if it gets too long.
-my_checksum_suffix := $(KATI_NINJA_SUFFIX)
-KATI_NINJA_SUFFIX := -$(word 1, $(shell echo $(my_checksum_suffix) | $(MD5SUM)))
-endif
-
-KATI_BUILD_NINJA := $(OUT_DIR)/build$(KATI_NINJA_SUFFIX).ninja
-KATI_ENV_SH := $(OUT_DIR)/env$(KATI_NINJA_SUFFIX).sh
-
-# Write out a file mapping checksum to the real suffix.
-ifneq ($(my_checksum_suffix),)
-my_ninja_suffix_file := $(basename $(KATI_BUILD_NINJA)).suf
-$(shell mkdir -p $(dir $(my_ninja_suffix_file)) && \
- echo $(my_checksum_suffix) > $(my_ninja_suffix_file))
-endif
-
-ifeq (,$(NINJA_STATUS))
-NINJA_STATUS := [%p %f/%t]$(space)
-endif
-
-NINJA_EXTRA_ARGS :=
-
-ifneq (,$(filter showcommands,$(ORIGINAL_MAKECMDGOALS)))
-NINJA_EXTRA_ARGS += "-v"
-endif
-
-# Make multiple rules to generate the same target an error instead of
-# proceeding with undefined behavior.
-NINJA_EXTRA_ARGS += -w dupbuild=err
-
-ifneq ($(filter-out false,$(USE_GOMA)),)
-KATI_MAKEPARALLEL := $(MAKEPARALLEL)
-# Ninja runs remote jobs (i.e., commands which contain gomacc) with
-# this parallelism. Note the parallelism of all other jobs is still
-# limited by the -j flag passed to GNU make.
-NINJA_REMOTE_NUM_JOBS ?= 500
-NINJA_EXTRA_ARGS += -j$(NINJA_REMOTE_NUM_JOBS)
-else
-NINJA_MAKEPARALLEL := $(MAKEPARALLEL) --ninja
-
-# We never want Kati to see MAKEFLAGS, as forcefully overriding variables is
-# terrible. The variables in MAKEFLAGS are still available in the environment,
-# so if part of the build wants input from the user, it should be explicitly
-# checking for an environment variable or using ?=
-#
-# makeparallel already clears MAKEFLAGS, so it's not necessary in the GOMA case
-KATI_MAKEPARALLEL := MAKEFLAGS=
-endif
-
-NINJA_ARGS += $(NINJA_EXTRA_ARGS)
-
-COMBINED_BUILD_NINJA := $(OUT_DIR)/combined$(KATI_NINJA_SUFFIX).ninja
-
-$(COMBINED_BUILD_NINJA): $(KATI_BUILD_NINJA) FORCE
- $(hide) echo "builddir = $(OUT_DIR)" > $(COMBINED_BUILD_NINJA)
- $(hide) echo "include $(KATI_BUILD_NINJA)" >> $(COMBINED_BUILD_NINJA)
- $(hide) echo "include $(SOONG_BUILD_NINJA)" >> $(COMBINED_BUILD_NINJA)
- $(hide) echo "build $(COMBINED_BUILD_NINJA): phony $(SOONG_BUILD_NINJA)" >> $(COMBINED_BUILD_NINJA)
-
-$(sort $(DEFAULT_GOAL) $(ANDROID_GOALS)) : ninja_wrapper
- @#empty
-
-.PHONY: ninja_wrapper
-ninja_wrapper: $(COMBINED_BUILD_NINJA) $(MAKEPARALLEL)
- @echo Starting build with ninja
- +$(hide) export NINJA_STATUS="$(NINJA_STATUS)" && source $(KATI_ENV_SH) && exec $(NINJA_MAKEPARALLEL) $(NINJA) -d keepdepfile $(NINJA_GOALS) -C $(TOP) -f $(COMBINED_BUILD_NINJA) $(NINJA_ARGS)
-
-# Dummy Android.mk and CleanSpec.mk files so that kati won't recurse into the
-# out directory
-DUMMY_OUT_MKS := $(OUT_DIR)/Android.mk $(OUT_DIR)/CleanSpec.mk
-$(DUMMY_OUT_MKS):
- @mkdir -p $(dir $@)
- $(hide) echo '# This file prevents findleaves.py from traversing this directory further' >$@
-
-KATI_FIND_EMULATOR := --use_find_emulator
-ifeq ($(KATI_EMULATE_FIND),false)
- KATI_FIND_EMULATOR :=
-endif
-$(KATI_BUILD_NINJA): $(CKATI) $(MAKEPARALLEL) $(DUMMY_OUT_MKS) run_soong FORCE
- @echo Running kati to generate build$(KATI_NINJA_SUFFIX).ninja...
- +$(hide) $(KATI_MAKEPARALLEL) $(CKATI) --ninja --ninja_dir=$(OUT_DIR) --ninja_suffix=$(KATI_NINJA_SUFFIX) --regen --ignore_optional_include=$(OUT_DIR)/%.P --detect_android_echo $(KATI_FIND_EMULATOR) -f build/core/main.mk $(KATI_GOALS) --gen_all_targets BUILDING_WITH_NINJA=true SOONG_ANDROID_MK=$(SOONG_ANDROID_MK) SOONG_MAKEVARS_MK=$(SOONG_MAKEVARS_MK)
-
-.PHONY: FORCE
-FORCE:
diff --git a/core/pdk_fusion_modules.mk b/core/pdk_fusion_modules.mk
index 49b30dc..0c03f37 100644
--- a/core/pdk_fusion_modules.mk
+++ b/core/pdk_fusion_modules.mk
@@ -23,7 +23,7 @@
LOCAL_BUILT_MODULE_STEM:=$(7)
LOCAL_MODULE_SUFFIX:=$(suffix $(7))
LOCAL_PRIVILEGED_MODULE:=$(8)
-LOCAL_PROPRIETARY_MODULE:=$(9)
+LOCAL_VENDOR_MODULE:=$(9)
LOCAL_MODULE_TARGET_ARCH:=$(10)
LOCAL_REPLACE_PREBUILT_APK_INSTALLED:=$(11)
LOCAL_CERTIFICATE:=PRESIGNED
@@ -72,7 +72,7 @@
$(PDK.DEXPREOPT.$(a).DEX_PREOPT_FLAGS),\
package.apk,\
$(PDK.DEXPREOPT.$(a).PRIVILEGED_MODULE),\
- $(PDK.DEXPREOPT.$(a).PROPRIETARY_MODULE),\
+ $(PDK.DEXPREOPT.$(a).VENDOR_MODULE),\
$(PDK.DEXPREOPT.$(a).TARGET_ARCH),\
$(_pdk_fusion_intermediates)/$(PDK.DEXPREOPT.$(a).STRIPPED_SRC),\
)))
diff --git a/core/soong.mk b/core/soong.mk
deleted file mode 100644
index 4a74f2e..0000000
--- a/core/soong.mk
+++ /dev/null
@@ -1,29 +0,0 @@
-# We need to rebootstrap soong if SOONG_OUT_DIR or the reverse path from
-# SOONG_OUT_DIR to TOP changes
-SOONG_NEEDS_REBOOTSTRAP :=
-ifneq ($(wildcard $(SOONG_BOOTSTRAP)),)
- ifneq ($(SOONG_OUT_DIR),$(strip $(shell source $(SOONG_BOOTSTRAP); echo $$BUILDDIR)))
- SOONG_NEEDS_REBOOTSTRAP := FORCE
- $(warning soong_out_dir changed)
- endif
- ifneq ($(strip $(shell build/soong/scripts/reverse_path.py $(SOONG_OUT_DIR))),$(strip $(shell source $(SOONG_BOOTSTRAP); echo $$SRCDIR_FROM_BUILDDIR)))
- SOONG_NEEDS_REBOOTSTRAP := FORCE
- $(warning reverse path changed)
- endif
-endif
-
-# Bootstrap soong.
-$(SOONG_BOOTSTRAP): bootstrap.bash $(SOONG_NEEDS_REBOOTSTRAP)
- $(hide) mkdir -p $(dir $@)
- $(hide) BUILDDIR=$(SOONG_OUT_DIR) ./bootstrap.bash
-
-# Tell soong that it is embedded in make
-$(SOONG_IN_MAKE):
- $(hide) mkdir -p $(dir $@)
- $(hide) touch $@
-
-# Run Soong, this implicitly create an Android.mk listing all soong outputs as
-# prebuilts.
-.PHONY: run_soong
-run_soong: $(SOONG_BOOTSTRAP) $(SOONG_VARIABLES) $(SOONG_IN_MAKE) FORCE
- $(hide) SKIP_NINJA=true $(SOONG)
diff --git a/core/tasks/device-tests.mk b/core/tasks/device-tests.mk
index a0662ca..731937f 100644
--- a/core/tasks/device-tests.mk
+++ b/core/tasks/device-tests.mk
@@ -14,4 +14,14 @@
.PHONY: device-tests
-device-tests: $(COMPATIBILITY.device-tests.FILES)
+
+device-tests-zip := $(PRODUCT_OUT)/device-tests.zip
+$(device-tests-zip): $(COMPATIBILITY.device-tests.FILES) $(SOONG_ZIP)
+ echo $(COMPATIBILITY.device-tests.FILES) > $@.list
+ sed -i -e 's/\s\+/\n/g' $@.list
+ grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+ grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
+ $(hide) $(SOONG_ZIP) -d -o $@ -C $(HOST_OUT) -l $@-host.list -C $(PRODUCT_OUT) -l $@-target.list
+
+device-tests: $(device-tests-zip)
+$(call dist-for-goals, device-tests, $(device-tests-zip))
diff --git a/core/tasks/general-tests.mk b/core/tasks/general-tests.mk
index ddaede4..e02faa7 100644
--- a/core/tasks/general-tests.mk
+++ b/core/tasks/general-tests.mk
@@ -13,4 +13,14 @@
# limitations under the License.
.PHONY: general-tests
-device-tests: $(COMPATIBILITY.general-tests.FILES)
+
+general-tests-zip := $(PRODUCT_OUT)/general-tests.zip
+$(general-tests-zip): $(COMPATIBILITY.general-tests.FILES) $(SOONG_ZIP)
+ echo $(COMPATIBILITY.general-tests.FILES) > $@.list
+ sed -i -e 's/\s\+/\n/g' $@.list
+ grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+ grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
+ $(hide) $(SOONG_ZIP) -d -o $@ -C $(HOST_OUT) -l $@-host.list -C $(PRODUCT_OUT) -l $@-target.list
+
+general-tests: $(general-tests-zip)
+$(call dist-for-goals, general-tests, $(general-tests-zip))
diff --git a/core/tasks/tools/compatibility.mk b/core/tasks/tools/compatibility.mk
index d8f900e..1455a44 100644
--- a/core/tasks/tools/compatibility.mk
+++ b/core/tasks/tools/compatibility.mk
@@ -37,13 +37,14 @@
$(compatibility_zip): PRIVATE_TOOLS := $(test_tools)
$(compatibility_zip): PRIVATE_SUITE_NAME := $(test_suite_name)
$(compatibility_zip): PRIVATE_DYNAMIC_CONFIG := $(test_suite_dynamic_config)
-$(compatibility_zip): $(test_artifacts) $(test_tools) $(test_suite_dynamic_config) | $(ADB) $(ACP)
+$(compatibility_zip): $(test_artifacts) $(test_tools) $(test_suite_dynamic_config) $(SOONG_ZIP) | $(ADB) $(ACP)
# Make dir structure
$(hide) mkdir -p $(PRIVATE_OUT_DIR)/tools $(PRIVATE_OUT_DIR)/testcases
# Copy tools
$(hide) $(ACP) -fp $(PRIVATE_TOOLS) $(PRIVATE_OUT_DIR)/tools
$(if $(PRIVATE_DYNAMIC_CONFIG),$(hide) $(ACP) -fp $(PRIVATE_DYNAMIC_CONFIG) $(PRIVATE_OUT_DIR)/testcases/$(PRIVATE_SUITE_NAME).dynamic)
- $(hide) cd $(dir $@) && zip -rq $(notdir $@) $(PRIVATE_NAME)
+ $(hide) find $(dir $@)/$(PRIVATE_NAME) | sort >$@.list
+ $(hide) $(SOONG_ZIP) -d -o $@ -C $(dir $@) -l $@.list
# Reset all input variables
test_suite_name :=
diff --git a/core/tasks/tools/package-modules.mk b/core/tasks/tools/package-modules.mk
index a643882..4dde9fd 100644
--- a/core/tasks/tools/package-modules.mk
+++ b/core/tasks/tools/package-modules.mk
@@ -48,26 +48,16 @@
$(eval my_copy_pairs += $(bui):$(my_staging_dir)/$(my_copy_dest)))\
))
-define copy-tests-in-batch
-$(hide) $(foreach p, $(1),\
- $(eval pair := $(subst :,$(space),$(p)))\
- mkdir -p $(dir $(word 2,$(pair)));\
- cp -Rf $(word 1,$(pair)) $(word 2,$(pair));)
-endef
-
my_package_zip := $(my_staging_dir)/$(my_package_name).zip
$(my_package_zip): PRIVATE_COPY_PAIRS := $(my_copy_pairs)
$(my_package_zip): PRIVATE_PICKUP_FILES := $(my_pickup_files)
$(my_package_zip) : $(my_built_modules)
@echo "Package $@"
@rm -rf $(dir $@) && mkdir -p $(dir $@)
- $(call copy-tests-in-batch,$(wordlist 1,200,$(PRIVATE_COPY_PAIRS)))
- $(call copy-tests-in-batch,$(wordlist 201,400,$(PRIVATE_COPY_PAIRS)))
- $(call copy-tests-in-batch,$(wordlist 401,600,$(PRIVATE_COPY_PAIRS)))
- $(call copy-tests-in-batch,$(wordlist 601,800,$(PRIVATE_COPY_PAIRS)))
- $(call copy-tests-in-batch,$(wordlist 801,1000,$(PRIVATE_COPY_PAIRS)))
- $(call copy-tests-in-batch,$(wordlist 1001,1200,$(PRIVATE_COPY_PAIRS)))
- $(call copy-tests-in-batch,$(wordlist 1201,9999,$(PRIVATE_COPY_PAIRS)))
+ $(foreach p, $(PRIVATE_COPY_PAIRS),\
+ $(eval pair := $(subst :,$(space),$(p)))\
+ mkdir -p $(dir $(word 2,$(pair))) && \
+ cp -Rf $(word 1,$(pair)) $(word 2,$(pair)) && ) true
$(hide) $(foreach f, $(PRIVATE_PICKUP_FILES),\
- cp -RfL $(f) $(dir $@);)
+ cp -RfL $(f) $(dir $@) && ) true
$(hide) cd $(dir $@) && zip -rqX $(notdir $@) *
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 2245aa2..62e5499 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -52,17 +52,24 @@
#$(warning $(call find_and_earlier,A B C,D))
define version-list
-$(1) $(1)DR1 $(1)DR2 $(1)MR1 $(1)MR2
+$(1)PR1 $(1)PD1 $(1)PD2 $(1)PM1 $(1)PM2
endef
ALL_VERSIONS := O P
ALL_VERSIONS := $(foreach v,$(ALL_VERSIONS),$(call version-list,$(v)))
+DEFAULT_PLATFORM_VERSION := OPR1
+
+# HACK: forward P to PPR1 until the build server config is updated
+ifeq (P,$(TARGET_PLATFORM_VERSION))
+ TARGET_PLATFORM_VERSION := PPR1
+endif
+
ifeq (,$(TARGET_PLATFORM_VERSION))
# Default targeted platform version
# TODO: PLATFORM_VERSION, PLATFORM_SDK_VERSION, etc. should be conditional
# on this
- TARGET_PLATFORM_VERSION := O
+ TARGET_PLATFORM_VERSION := $(DEFAULT_PLATFORM_VERSION)
endif
ifeq (,$(filter $(ALL_VERSIONS), $(TARGET_PLATFORM_VERSION)))
@@ -76,12 +83,24 @@
$(foreach v,$(ENABLED_VERSIONS), \
$(eval IS_AT_LEAST_$(v) := true))
+# Default versions for each TARGET_PLATFORM_VERSION
+
+# This is the canonical definition of the platform version,
+# which is the version that we reveal to the end user.
+# Update this value when the platform version changes (rather
+# than overriding it somewhere else). Can be an arbitrary string.
+PLATFORM_VERSION.OPR1 := O
+
+# This is the current development code-name, if the build is not a final
+# release build. If this is a final release build, it is simply "REL".
+PLATFORM_VERSION_CODENAME.OPR1 := O
+
ifndef PLATFORM_VERSION
- # This is the canonical definition of the platform version,
- # which is the version that we reveal to the end user.
- # Update this value when the platform version changes (rather
- # than overriding it somewhere else). Can be an arbitrary string.
- PLATFORM_VERSION := 7.1.1
+ PLATFORM_VERSION := $(PLATFORM_VERSION.$(TARGET_PLATFORM_VERSION))
+ ifndef PLATFORM_VERSION
+ # PLATFORM_VERSION falls back to TARGET_PLATFORM_VERSION
+ PLATFORM_VERSION := $(TARGET_PLATFORM_VERSION)
+ endif
endif
ifndef PLATFORM_SDK_VERSION
@@ -106,9 +125,11 @@
endif
ifndef PLATFORM_VERSION_CODENAME
- # This is the current development code-name, if the build is not a final
- # release build. If this is a final release build, it is simply "REL".
- PLATFORM_VERSION_CODENAME := REL
+ PLATFORM_VERSION_CODENAME := $(PLATFORM_VERSION_CODENAME.$(TARGET_PLATFORM_VERSION))
+ ifndef PLATFORM_VERSION_CODENAME
+ # PLATFORM_VERSION_CODENAME falls back to TARGET_PLATFORM_VERSION
+ PLATFORM_VERSION_CODENAME := $(TARGET_PLATFORM_VERSION)
+ endif
# This is all of the development codenames that are active. Should be either
# the same as PLATFORM_VERSION_CODENAME or a comma-separated list of additional
diff --git a/envsetup.sh b/envsetup.sh
index 9680780..03fdf89 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -28,9 +28,9 @@
Look at the source to view more functions. The complete list is:
EOF
- T=$(gettop)
- local A
- A=""
+ local T=$(gettop)
+ local A=""
+ local i
for i in `cat $T/build/envsetup.sh | sed -n "/^[[:blank:]]*function /s/function \([a-z_]*\).*/\1/p" | sort | uniq`; do
A="$A $i"
done
@@ -40,7 +40,7 @@
# Get all the build variables needed by this script in a single call to the build system.
function build_build_var_cache()
{
- T=$(gettop)
+ local T=$(gettop)
# Grep out the variable names from the script.
cached_vars=`cat $T/build/envsetup.sh | tr '()' ' ' | awk '{for(i=1;i<=NF;i++) if($i~/get_build_var/) print $(i+1)}' | sort -u | tr '\n' ' '`
cached_abs_vars=`cat $T/build/envsetup.sh | tr '()' ' ' | awk '{for(i=1;i<=NF;i++) if($i~/get_abs_build_var/) print $(i+1)}' | sort -u | tr '\n' ' '`
@@ -74,6 +74,7 @@
function destroy_build_var_cache()
{
unset BUILD_VAR_CACHE_READY
+ local v
for v in $cached_vars; do
unset var_cache_$v
done
@@ -93,7 +94,7 @@
return
fi
- T=$(gettop)
+ local T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
@@ -111,7 +112,7 @@
return
fi
- T=$(gettop)
+ local T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
@@ -123,7 +124,7 @@
# check to see if the supplied product is one we can build
function check_product()
{
- T=$(gettop)
+ local T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
@@ -141,6 +142,7 @@
# check to see if the supplied variant is valid
function check_variant()
{
+ local v
for v in ${VARIANT_CHOICES[@]}
do
if [ "$v" = "$1" ]
@@ -153,7 +155,7 @@
function setpaths()
{
- T=$(gettop)
+ local T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP."
return
@@ -184,18 +186,19 @@
fi
# and in with the new
- prebuiltdir=$(getprebuilt)
- gccprebuiltdir=$(get_abs_build_var ANDROID_GCC_PREBUILTS)
+ local prebuiltdir=$(getprebuilt)
+ local gccprebuiltdir=$(get_abs_build_var ANDROID_GCC_PREBUILTS)
# defined in core/config.mk
- targetgccversion=$(get_build_var TARGET_GCC_VERSION)
- targetgccversion2=$(get_build_var 2ND_TARGET_GCC_VERSION)
+ local targetgccversion=$(get_build_var TARGET_GCC_VERSION)
+ local targetgccversion2=$(get_build_var 2ND_TARGET_GCC_VERSION)
export TARGET_GCC_VERSION=$targetgccversion
# The gcc toolchain does not exists for windows/cygwin. In this case, do not reference it.
export ANDROID_TOOLCHAIN=
export ANDROID_TOOLCHAIN_2ND_ARCH=
local ARCH=$(get_build_var TARGET_ARCH)
+ local toolchaindir toolchaindir2=
case $ARCH in
x86) toolchaindir=x86/x86_64-linux-android-$targetgccversion/bin
;;
@@ -217,7 +220,7 @@
export ANDROID_TOOLCHAIN=$gccprebuiltdir/$toolchaindir
fi
- if [ -d "$gccprebuiltdir/$toolchaindir2" ]; then
+ if [ "$toolchaindir2" -a -d "$gccprebuiltdir/$toolchaindir2" ]; then
export ANDROID_TOOLCHAIN_2ND_ARCH=$gccprebuiltdir/$toolchaindir2
fi
@@ -273,7 +276,7 @@
function printconfig()
{
- T=$(gettop)
+ local T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
@@ -296,7 +299,7 @@
function set_sequence_number()
{
- export BUILD_ENV_SEQUENCE_NUMBER=12
+ export BUILD_ENV_SEQUENCE_NUMBER=13
}
function settitle()
@@ -399,6 +402,7 @@
#
function chooseproduct()
{
+ local default_value
if [ "x$TARGET_PRODUCT" != x ] ; then
default_value=$TARGET_PRODUCT
else
@@ -569,50 +573,42 @@
then
selection=${LUNCH_MENU_CHOICES[$(($answer-1))]}
fi
- elif (echo -n $answer | grep -q -e "^[^\-][^\-]*-[^\-][^\-]*$")
- then
+ else
selection=$answer
fi
- if [ -z "$selection" ]
- then
- echo
- echo "Invalid lunch combo: $answer"
- return 1
- fi
-
export TARGET_BUILD_APPS=
- local variant=$(echo -n $selection | sed -e "s/^[^\-]*-//")
- check_variant $variant
- if [ $? -ne 0 ]
- then
- echo
- echo "** Invalid variant: '$variant'"
- echo "** Must be one of ${VARIANT_CHOICES[@]}"
- variant=
+ local product variant_and_version variant version
+
+ product=${selection%%-*} # Trim everything after first dash
+ variant_and_version=${selection#*-} # Trim everything up to first dash
+ if [ "$variant_and_version" != "$selection" ]; then
+ variant=${variant_and_version%%-*}
+ if [ "$variant" != "$variant_and_version" ]; then
+ version=${variant_and_version#*-}
+ fi
fi
- local product=$(echo -n $selection | sed -e "s/-.*$//")
- TARGET_PRODUCT=$product \
- TARGET_BUILD_VARIANT=$variant \
- build_build_var_cache
- if [ $? -ne 0 ]
+ if [ -z "$product" ]
then
echo
- echo "** Don't have a product spec for: '$product'"
- echo "** Do you have the right repo manifest?"
- product=
- fi
-
- if [ -z "$product" -o -z "$variant" ]
- then
- echo
+ echo "Invalid lunch combo: $selection"
return 1
fi
- export TARGET_PRODUCT=$product
- export TARGET_BUILD_VARIANT=$variant
+ TARGET_PRODUCT=$product \
+ TARGET_BUILD_VARIANT=$variant \
+ TARGET_PLATFORM_VERSION=$version \
+ build_build_var_cache
+ if [ $? -ne 0 ]
+ then
+ return 1
+ fi
+
+ export TARGET_PRODUCT=$(get_build_var TARGET_PRODUCT)
+ export TARGET_BUILD_VARIANT=$(get_build_var TARGET_BUILD_VARIANT)
+ export TARGET_PLATFORM_VERSION=$(get_build_var TARGET_PLATFORM_VERSION)
export TARGET_BUILD_TYPE=release
echo
@@ -702,7 +698,7 @@
PWD= /bin/pwd
else
local HERE=$PWD
- T=
+ local T=
while [ \( ! \( -f $TOPFILE \) \) -a \( $PWD != "/" \) ]; do
\cd ..
T=`PWD= /bin/pwd -P`
@@ -750,9 +746,9 @@
function findmakefile()
{
- TOPFILE=build/core/envsetup.mk
+ local TOPFILE=build/core/envsetup.mk
local HERE=$PWD
- T=
+ local T=
while [ \( ! \( -f $TOPFILE \) \) -a \( $PWD != "/" \) ]; do
T=`PWD= /bin/pwd`
if [ -f "$T/Android.mk" -o -f "$T/Android.bp" ]; then
@@ -788,6 +784,7 @@
echo "Couldn't locate a makefile from the current directory."
return 1
else
+ local ARG
for ARG in $@; do
case $ARG in
GET-INSTALL-PATH) GET_INSTALL_PATH=$ARG;;
@@ -945,7 +942,7 @@
function croot()
{
- T=$(gettop)
+ local T=$(gettop)
if [ "$T" ]; then
if [ "$1" ]; then
\cd $(gettop)/$1
@@ -959,9 +956,9 @@
function cproj()
{
- TOPFILE=build/core/envsetup.mk
+ local TOPFILE=build/core/envsetup.mk
local HERE=$PWD
- T=
+ local T=
while [ \( ! \( -f $TOPFILE \) \) -a \( $PWD != "/" \) ]; do
T=$PWD
if [ -f "$T/Android.mk" ]; then
@@ -1212,6 +1209,7 @@
function resgrep()
{
+ local dir
for dir in `find . -name .repo -prune -o -name .git -prune -o -name out -prune -o -name res -type d`; do
find $dir -type f -name '*\.xml' -exec grep --color -n "$@" {} +
done
@@ -1273,7 +1271,7 @@
function tracedmdump()
{
- T=$(gettop)
+ local T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP."
return
@@ -1450,7 +1448,7 @@
echo "Couldn't locate output files. Try running 'lunch' first." >&2
return
fi
- T=$(gettop)
+ local T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
@@ -1467,7 +1465,7 @@
# simple shortcut to the runtest command
function runtest()
{
- T=$(gettop)
+ local T=$(gettop)
if [ ! "$T" ]; then
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
@@ -1480,7 +1478,8 @@
echo "Usage: godir <regex>"
return
fi
- T=$(gettop)
+ local T=$(gettop)
+ local FILELIST
if [ ! "$OUT_DIR" = "" ]; then
mkdir -p $OUT_DIR
FILELIST=$OUT_DIR/filelist
diff --git a/target/board/generic/sepolicy/logpersist.te b/target/board/generic/sepolicy/logpersist.te
index 0c52986..3fc0250 100644
--- a/target/board/generic/sepolicy/logpersist.te
+++ b/target/board/generic/sepolicy/logpersist.te
@@ -10,3 +10,4 @@
# Write to /dev/ttyS2 and /dev/ttyGF2.
allow logpersist serial_device:chr_file { write open };
+get_prop(logpersist, qemu_cmdline)
diff --git a/target/board/generic/sepolicy/property.te b/target/board/generic/sepolicy/property.te
index 22d580a..a486702 100644
--- a/target/board/generic/sepolicy/property.te
+++ b/target/board/generic/sepolicy/property.te
@@ -1,3 +1,4 @@
type qemu_prop, property_type;
+type qemu_cmdline, property_type;
type radio_noril_prop, property_type;
type opengles_prop, property_type;
diff --git a/target/board/generic/sepolicy/property_contexts b/target/board/generic/sepolicy/property_contexts
index 142b062..c66a85f 100644
--- a/target/board/generic/sepolicy/property_contexts
+++ b/target/board/generic/sepolicy/property_contexts
@@ -1,4 +1,5 @@
qemu. u:object_r:qemu_prop:s0
+qemu.cmdline u:object_r:qemu_cmdline:s0
ro.emu. u:object_r:qemu_prop:s0
ro.emulator. u:object_r:qemu_prop:s0
ro.radio.noril u:object_r:radio_noril_prop:s0
diff --git a/target/board/generic/sepolicy/qemu_props.te b/target/board/generic/sepolicy/qemu_props.te
index 6768ce7..95174d6 100644
--- a/target/board/generic/sepolicy/qemu_props.te
+++ b/target/board/generic/sepolicy/qemu_props.te
@@ -9,3 +9,4 @@
set_prop(qemu_props, dalvik_prop)
set_prop(qemu_props, config_prop)
set_prop(qemu_props, opengles_prop)
+set_prop(qemu_props, qemu_cmdline)
diff --git a/target/product/core_minimal.mk b/target/product/core_minimal.mk
index fe1a382..701a69c 100644
--- a/target/product/core_minimal.mk
+++ b/target/product/core_minimal.mk
@@ -125,6 +125,9 @@
PRODUCT_COPY_FILES += \
system/core/rootdir/etc/public.libraries.android.txt:system/etc/public.libraries.txt
+PRODUCT_COPY_FILES += \
+ system/core/rootdir/etc/ld.config.txt:system/etc/ld.config.txt
+
# Different dexopt types for different package update/install times.
# On eng builds, make "boot" reasons do pure JIT for faster turnaround.
ifeq (eng,$(TARGET_BUILD_VARIANT))
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index e26c628..a9075c9 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -20,6 +20,7 @@
PRODUCT_PACKAGES += \
adb \
adbd \
+ android.hardware.configstore@1.0-service \
android.hidl.allocator@1.0-service \
android.hidl.memory@1.0-impl \
atrace \
@@ -84,18 +85,15 @@
# SELinux packages
PRODUCT_PACKAGES += \
- file_contexts.bin \
- nonplat_file_contexts \
nonplat_mac_permissions.xml \
nonplat_property_contexts \
nonplat_seapp_contexts \
nonplat_service_contexts \
- plat_file_contexts \
plat_mac_permissions.xml \
plat_property_contexts \
plat_seapp_contexts \
plat_service_contexts \
- selinux_version
+ selinux_policy
# AID Generation for
# <pwd.h> and <grp.h>
diff --git a/tests/envsetup_tests.sh b/tests/envsetup_tests.sh
new file mode 100755
index 0000000..4aae255
--- /dev/null
+++ b/tests/envsetup_tests.sh
@@ -0,0 +1,36 @@
+#!/bin/bash -e
+
+source $(dirname $0)/../envsetup.sh
+
+unset TARGET_PRODUCT TARGET_BUILD_VARIANT TARGET_PLATFORM_VERSION
+
+function check_lunch
+(
+ echo lunch $1
+ set +e
+ lunch $1 > /dev/null 2> /dev/null
+ set -e
+ [ "$TARGET_PRODUCT" = "$2" ] || ( echo "lunch $1: expected TARGET_PRODUCT='$2', got '$TARGET_PRODUCT'" && exit 1 )
+ [ "$TARGET_BUILD_VARIANT" = "$3" ] || ( echo "lunch $1: expected TARGET_BUILD_VARIANT='$3', got '$TARGET_BUILD_VARIANT'" && exit 1 )
+ [ "$TARGET_PLATFORM_VERSION" = "$4" ] || ( echo "lunch $1: expected TARGET_PLATFORM_VERSION='$4', got '$TARGET_PLATFORM_VERSION'" && exit 1 )
+)
+
+default_version=$(get_build_var DEFAULT_PLATFORM_VERSION)
+valid_version=PPR1
+
+# lunch tests
+check_lunch "aosp_arm64" "aosp_arm64" "eng" "$default_version"
+check_lunch "aosp_arm64-userdebug" "aosp_arm64" "userdebug" "$default_version"
+check_lunch "aosp_arm64-userdebug-$valid_version" "aosp_arm64" "userdebug" "$valid_version"
+check_lunch "abc" "" "" ""
+check_lunch "aosp_arm64-abc" "" "" ""
+check_lunch "aosp_arm64-userdebug-abc" "" "" ""
+check_lunch "aosp_arm64-abc-$valid_version" "" "" ""
+check_lunch "abc-userdebug-$valid_version" "" "" ""
+check_lunch "-" "" "" ""
+check_lunch "--" "" "" ""
+check_lunch "-userdebug" "" "" ""
+check_lunch "-userdebug-" "" "" ""
+check_lunch "-userdebug-$valid_version" "" "" ""
+check_lunch "aosp_arm64-userdebug-$valid_version-" "" "" ""
+check_lunch "aosp_arm64-userdebug-$valid_version-abc" "" "" ""
diff --git a/tools/fs_config/README b/tools/fs_config/README
index d884e32..9919131 100644
--- a/tools/fs_config/README
+++ b/tools/fs_config/README
@@ -3,8 +3,7 @@
| _ <| __|| _ || | || \/ || __|
\__|\_/\_____/\__|__/|_____/\__ \__/\_____/
-
-Generating the android_filesystem_config.h
+Generating the android_filesystem_config.h:
To generate the android_filesystem_config.h file, one can choose from
one of two methods. The first method, is to declare
@@ -140,3 +139,26 @@
To add new tests, simply add a test_<xxx> method to the test class. It will automatically
get picked up and added to the test suite.
+
+Using the android_filesystem_config.h:
+
+The tool fs_config_generate is built as a dependency to fs_config_dirs and
+fs_config_files host targets, and #includes the above supplied or generated
+android_filesystem_config.h file, and can be instructed to generate the binary
+data that lands in the device target locations /system/etc/fs_config_dirs and
+/system/etc/fs_config_files and in the host's ${OUT} locations
+${OUT}/target/product/<device>/system/etc/fs_config_dirs and
+${OUT}/target/product/<device>/system/etc/fs_config_files. The binary files
+are interpreted by the libcutils fs_conf() function, along with the built-in
+defaults, to serve as overrides to complete the results. The Target files are
+used by filesystem and adb tools to ensure that the file and directory
+properties are preserved during runtime operations. The host files in the
+${OUT} directory are used in the final stages when building the filesystem
+images to set the file and directory properties.
+
+fs_config_generate --help reports:
+
+Generate binary content for fs_config_dirs (-D) and fs_config_files (-F)
+from device-specific android_filesystem_config.h override
+
+Usage: fs_config_generate -D|-F [-o output-file]
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 9403a77..2b7aee4 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -73,6 +73,25 @@
OPTIONS.replace_verity_private_key = False
OPTIONS.is_signing = False
+
+class OutputFile(object):
+ def __init__(self, output_zip, input_dir, prefix, name):
+ self._output_zip = output_zip
+ self.input_name = os.path.join(input_dir, prefix, name)
+
+ if self._output_zip:
+ self._zip_name = os.path.join(prefix, name)
+
+ root, suffix = os.path.splitext(name)
+ self.name = common.MakeTempFile(prefix=root + '-', suffix=suffix)
+ else:
+ self.name = self.input_name
+
+ def Write(self):
+ if self._output_zip:
+ common.ZipWrite(self._output_zip, self.name, self._zip_name)
+
+
def GetCareMap(which, imgname):
"""Generate care_map of system (or vendor) partition"""
@@ -98,10 +117,10 @@
"""Turn the contents of SYSTEM into a system image and store it in
output_zip. Returns the name of the system image file."""
- prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system.img")
- if os.path.exists(prebuilt_path):
+ img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system.img")
+ if os.path.exists(img.input_name):
print("system.img already exists in %s, no need to rebuild..." % (prefix,))
- return prebuilt_path
+ return img.input_name
def output_sink(fn, data):
ofile = open(os.path.join(OPTIONS.input_tmp, "SYSTEM", fn), "w")
@@ -113,74 +132,52 @@
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
boot_img, info_dict=OPTIONS.info_dict)
- block_list = common.MakeTempFile(prefix="system-blocklist-", suffix=".map")
- imgname = BuildSystem(OPTIONS.input_tmp, OPTIONS.info_dict,
- block_list=block_list)
+ block_list = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system.map")
+ CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system", img,
+ block_list=block_list)
- common.ZipWrite(output_zip, imgname, prefix + "system.img")
- common.ZipWrite(output_zip, block_list, prefix + "system.map")
- return imgname
-
-
-def BuildSystem(input_dir, info_dict, block_list=None):
- """Build the (sparse) system image and return the name of a temp
- file containing it."""
- return CreateImage(input_dir, info_dict, "system", block_list=block_list)
+ return img.name
def AddSystemOther(output_zip, prefix="IMAGES/"):
"""Turn the contents of SYSTEM_OTHER into a system_other image
and store it in output_zip."""
- prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system_other.img")
- if os.path.exists(prebuilt_path):
+ img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system_other.img")
+ if os.path.exists(img.input_name):
print("system_other.img already exists in %s, no need to rebuild..." % (
prefix,))
return
- imgname = BuildSystemOther(OPTIONS.input_tmp, OPTIONS.info_dict)
- common.ZipWrite(output_zip, imgname, prefix + "system_other.img")
-
-def BuildSystemOther(input_dir, info_dict):
- """Build the (sparse) system_other image and return the name of a temp
- file containing it."""
- return CreateImage(input_dir, info_dict, "system_other", block_list=None)
+ CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system_other", img)
def AddVendor(output_zip, prefix="IMAGES/"):
"""Turn the contents of VENDOR into a vendor image and store in it
output_zip."""
- prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "vendor.img")
- if os.path.exists(prebuilt_path):
+ img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vendor.img")
+ if os.path.exists(img.input_name):
print("vendor.img already exists in %s, no need to rebuild..." % (prefix,))
- return prebuilt_path
+ return img.input_name
- block_list = common.MakeTempFile(prefix="vendor-blocklist-", suffix=".map")
- imgname = BuildVendor(OPTIONS.input_tmp, OPTIONS.info_dict,
- block_list=block_list)
- common.ZipWrite(output_zip, imgname, prefix + "vendor.img")
- common.ZipWrite(output_zip, block_list, prefix + "vendor.map")
- return imgname
+ block_list = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vendor.map")
+ CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "vendor", img,
+ block_list=block_list)
+ return img.name
-def BuildVendor(input_dir, info_dict, block_list=None):
- """Build the (sparse) vendor image and return the name of a temp
- file containing it."""
- return CreateImage(input_dir, info_dict, "vendor", block_list=block_list)
-
-
-def CreateImage(input_dir, info_dict, what, block_list=None):
+def CreateImage(input_dir, info_dict, what, output_file, block_list=None):
print("creating " + what + ".img...")
- img = common.MakeTempFile(prefix=what + "-", suffix=".img")
-
# The name of the directory it is making an image out of matters to
# mkyaffs2image. It wants "system" but we have a directory named
# "SYSTEM", so create a symlink.
+ temp_dir = tempfile.mkdtemp()
+ OPTIONS.tempfiles.append(temp_dir)
try:
os.symlink(os.path.join(input_dir, what.upper()),
- os.path.join(input_dir, what))
+ os.path.join(temp_dir, what))
except OSError as e:
# bogus error on my mac version?
# File "./build/tools/releasetools/img_from_target_files"
@@ -215,12 +212,16 @@
if fs_config:
image_props["fs_config"] = fs_config
if block_list:
- image_props["block_list"] = block_list
+ image_props["block_list"] = block_list.name
- succ = build_image.BuildImage(os.path.join(input_dir, what),
- image_props, img)
+ succ = build_image.BuildImage(os.path.join(temp_dir, what),
+ image_props, output_file.name)
assert succ, "build " + what + ".img image failed"
+ output_file.Write()
+ if block_list:
+ block_list.Write()
+
is_verity_partition = "verity_block_device" in image_props
verity_supported = image_props.get("verity") == "true"
if is_verity_partition and verity_supported:
@@ -229,8 +230,6 @@
adjusted_blocks_key = what + "_adjusted_partition_size"
info_dict[adjusted_blocks_key] = int(adjusted_blocks_value)/4096 - 1
- return img
-
def AddUserdata(output_zip, prefix="IMAGES/"):
"""Create a userdata image and store it in output_zip.
@@ -241,8 +240,8 @@
in OPTIONS.info_dict.
"""
- prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "userdata.img")
- if os.path.exists(prebuilt_path):
+ img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "userdata.img")
+ if os.path.exists(img.input_name):
print("userdata.img already exists in %s, no need to rebuild..." % (
prefix,))
return
@@ -265,6 +264,7 @@
# empty dir named "data", or a symlink to the DATA dir,
# and build the image from that.
temp_dir = tempfile.mkdtemp()
+ OPTIONS.tempfiles.append(temp_dir)
user_dir = os.path.join(temp_dir, "data")
empty = (OPTIONS.info_dict.get("userdata_img_with_data") != "true")
if empty:
@@ -275,8 +275,6 @@
os.symlink(os.path.join(OPTIONS.input_tmp, "DATA"),
user_dir)
- img = tempfile.NamedTemporaryFile()
-
fstab = OPTIONS.info_dict["fstab"]
if fstab:
image_props["fs_type"] = fstab["/data"].fs_type
@@ -284,17 +282,15 @@
assert succ, "build userdata.img image failed"
common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict)
- common.ZipWrite(output_zip, img.name, prefix + "userdata.img")
- img.close()
- shutil.rmtree(temp_dir)
+ img.Write()
def AddVBMeta(output_zip, boot_img_path, system_img_path, prefix="IMAGES/"):
"""Create a VBMeta image and store it in output_zip."""
- _, img_file_name = tempfile.mkstemp()
+ img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vbmeta.img")
avbtool = os.getenv('AVBTOOL') or "avbtool"
cmd = [avbtool, "make_vbmeta_image",
- "--output", img_file_name,
+ "--output", img.name,
"--include_descriptors_from_image", boot_img_path,
"--include_descriptors_from_image", system_img_path,
"--generate_dm_verity_cmdline_from_hashtree", system_img_path]
@@ -305,19 +301,19 @@
p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "avbtool make_vbmeta_image failed"
- common.ZipWrite(output_zip, img_file_name, prefix + "vbmeta.img")
+ img.Write()
def AddPartitionTable(output_zip, prefix="IMAGES/"):
"""Create a partition table image and store it in output_zip."""
- _, img_file_name = tempfile.mkstemp()
- _, bpt_file_name = tempfile.mkstemp()
+ img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "partition-table.img")
+ bpt = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "partition-table.bpt")
# use BPTTOOL from environ, or "bpttool" if empty or not set.
bpttool = os.getenv("BPTTOOL") or "bpttool"
- cmd = [bpttool, "make_table", "--output_json", bpt_file_name,
- "--output_gpt", img_file_name]
+ cmd = [bpttool, "make_table", "--output_json", bpt.name,
+ "--output_gpt", img.name]
input_files_str = OPTIONS.info_dict["board_bpt_input_files"]
input_files = input_files_str.split(" ")
for i in input_files:
@@ -333,15 +329,15 @@
p.communicate()
assert p.returncode == 0, "bpttool make_table failed"
- common.ZipWrite(output_zip, img_file_name, prefix + "partition-table.img")
- common.ZipWrite(output_zip, bpt_file_name, prefix + "partition-table.bpt")
+ img.Write()
+ bpt.Write()
def AddCache(output_zip, prefix="IMAGES/"):
"""Create an empty cache image and store it in output_zip."""
- prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "cache.img")
- if os.path.exists(prebuilt_path):
+ img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "cache.img")
+ if os.path.exists(img.input_name):
print("cache.img already exists in %s, no need to rebuild..." % (prefix,))
return
@@ -362,9 +358,9 @@
# mkyaffs2image. So we create a temp dir, and within it we create an
# empty dir named "cache", and build the image from that.
temp_dir = tempfile.mkdtemp()
+ OPTIONS.tempfiles.append(temp_dir)
user_dir = os.path.join(temp_dir, "cache")
os.mkdir(user_dir)
- img = tempfile.NamedTemporaryFile()
fstab = OPTIONS.info_dict["fstab"]
if fstab:
@@ -373,38 +369,41 @@
assert succ, "build cache.img image failed"
common.CheckSize(img.name, "cache.img", OPTIONS.info_dict)
- common.ZipWrite(output_zip, img.name, prefix + "cache.img")
- img.close()
- os.rmdir(user_dir)
- os.rmdir(temp_dir)
+ img.Write()
def AddImagesToTargetFiles(filename):
- OPTIONS.input_tmp, input_zip = common.UnzipTemp(filename)
+ if os.path.isdir(filename):
+ OPTIONS.input_tmp = os.path.abspath(filename)
+ input_zip = None
+ else:
+ OPTIONS.input_tmp, input_zip = common.UnzipTemp(filename)
if not OPTIONS.add_missing:
- for n in input_zip.namelist():
- if n.startswith("IMAGES/"):
- print("target_files appears to already contain images.")
- sys.exit(1)
+ if os.path.isdir(os.path.join(OPTIONS.input_tmp, "IMAGES")):
+ print("target_files appears to already contain images.")
+ sys.exit(1)
- try:
- input_zip.getinfo("VENDOR/")
- has_vendor = True
- except KeyError:
- has_vendor = False
+ has_vendor = os.path.isdir(os.path.join(OPTIONS.input_tmp, "VENDOR"))
+ has_system_other = os.path.isdir(os.path.join(OPTIONS.input_tmp,
+ "SYSTEM_OTHER"))
- has_system_other = "SYSTEM_OTHER/" in input_zip.namelist()
+ if input_zip:
+ OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.input_tmp)
- OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.input_tmp)
-
- common.ZipClose(input_zip)
- output_zip = zipfile.ZipFile(filename, "a",
- compression=zipfile.ZIP_DEFLATED,
- allowZip64=True)
+ common.ZipClose(input_zip)
+ output_zip = zipfile.ZipFile(filename, "a",
+ compression=zipfile.ZIP_DEFLATED,
+ allowZip64=True)
+ else:
+ OPTIONS.info_dict = common.LoadInfoDict(filename, filename)
+ output_zip = None
+ images_dir = os.path.join(OPTIONS.input_tmp, "IMAGES")
+ if not os.path.isdir(images_dir):
+ os.makedirs(images_dir)
+ images_dir = None
has_recovery = (OPTIONS.info_dict.get("no_recovery") != "true")
- system_root_image = (OPTIONS.info_dict.get("system_root_image", None) == "true")
def banner(s):
print("\n\n++++ " + s + " ++++\n\n")
@@ -422,7 +421,10 @@
boot_image = common.GetBootableImage(
"IMAGES/boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
if boot_image:
- boot_image.AddToZip(output_zip)
+ if output_zip:
+ boot_image.AddToZip(output_zip)
+ else:
+ boot_image.WriteToDir(OPTIONS.input_tmp)
recovery_image = None
if has_recovery:
@@ -438,7 +440,10 @@
recovery_image = common.GetBootableImage(
"IMAGES/recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY")
if recovery_image:
- recovery_image.AddToZip(output_zip)
+ if output_zip:
+ recovery_image.AddToZip(output_zip)
+ else:
+ recovery_image.WriteToDir(OPTIONS.input_tmp)
banner("recovery (two-step image)")
# The special recovery.img for two-step package use.
@@ -446,7 +451,10 @@
"IMAGES/recovery-two-step.img", "recovery-two-step.img",
OPTIONS.input_tmp, "RECOVERY", two_step_image=True)
if recovery_two_step_image:
- recovery_two_step_image.AddToZip(output_zip)
+ if output_zip:
+ recovery_two_step_image.AddToZip(output_zip)
+ else:
+ recovery_two_step_image.WriteToDir(OPTIONS.input_tmp)
banner("system")
system_img_path = AddSystem(
@@ -502,24 +510,39 @@
img_vendor_dir = os.path.join(
OPTIONS.input_tmp, "VENDOR_IMAGES")
if os.path.exists(img_radio_path):
- common.ZipWrite(output_zip, img_radio_path,
- os.path.join("IMAGES", img_name))
+ if output_zip:
+ common.ZipWrite(output_zip, img_radio_path,
+ os.path.join("IMAGES", img_name))
+ else:
+ shutil.copy(img_radio_path, prebuilt_path)
else:
for root, _, files in os.walk(img_vendor_dir):
if img_name in files:
- common.ZipWrite(output_zip, os.path.join(root, img_name),
- os.path.join("IMAGES", img_name))
+ if output_zip:
+ common.ZipWrite(output_zip, os.path.join(root, img_name),
+ os.path.join("IMAGES", img_name))
+ else:
+ shutil.copy(os.path.join(root, img_name), prebuilt_path)
break
- # Zip spec says: All slashes MUST be forward slashes.
- img_path = 'IMAGES/' + img_name
- assert img_path in output_zip.namelist(), "cannot find " + img_name
+ if output_zip:
+ # Zip spec says: All slashes MUST be forward slashes.
+ img_path = 'IMAGES/' + img_name
+ assert img_path in output_zip.namelist(), "cannot find " + img_name
+ else:
+ img_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name)
+ assert os.path.exists(img_path), "cannot find " + img_name
if care_map_list:
file_path = "META/care_map.txt"
- common.ZipWriteStr(output_zip, file_path, '\n'.join(care_map_list))
+ if output_zip:
+ common.ZipWriteStr(output_zip, file_path, '\n'.join(care_map_list))
+ else:
+ with open(os.path.join(OPTIONS.input_tmp, file_path), 'w') as fp:
+ fp.write('\n'.join(care_map_list))
- common.ZipClose(output_zip)
+ if output_zip:
+ common.ZipClose(output_zip)
def main(argv):
def option_handler(o, a):
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index c204c90..e385866 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -24,8 +24,8 @@
import os.path
import re
import subprocess
+import sys
import threading
-import tempfile
from collections import deque, OrderedDict
from hashlib import sha1
@@ -35,69 +35,65 @@
__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
-def compute_patch(src, tgt, imgdiff=False):
- srcfd, srcfile = tempfile.mkstemp(prefix="src-")
- tgtfd, tgtfile = tempfile.mkstemp(prefix="tgt-")
- patchfd, patchfile = tempfile.mkstemp(prefix="patch-")
- os.close(patchfd)
+def compute_patch(srcfile, tgtfile, imgdiff=False):
+ patchfile = common.MakeTempFile(prefix='patch-')
- try:
- with os.fdopen(srcfd, "wb") as f_src:
- for p in src:
- f_src.write(p)
+ cmd = ['imgdiff', '-z'] if imgdiff else ['bsdiff']
+ cmd.extend([srcfile, tgtfile, patchfile])
- with os.fdopen(tgtfd, "wb") as f_tgt:
- for p in tgt:
- f_tgt.write(p)
- try:
- os.unlink(patchfile)
- except OSError:
- pass
- if imgdiff:
- p = subprocess.call(["imgdiff", "-z", srcfile, tgtfile, patchfile],
- stdout=open("/dev/null", "a"),
- stderr=subprocess.STDOUT)
- else:
- p = subprocess.call(["bsdiff", srcfile, tgtfile, patchfile])
+ # Not using common.Run(), which would otherwise dump all the bsdiff/imgdiff
+ # commands when OPTIONS.verbose is True - not useful for the case here, since
+ # they contain temp filenames only.
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ output, _ = p.communicate()
- if p:
- raise ValueError("diff failed: " + str(p))
+ if p.returncode != 0:
+ raise ValueError(output)
- with open(patchfile, "rb") as f:
- return f.read()
- finally:
- try:
- os.unlink(srcfile)
- os.unlink(tgtfile)
- os.unlink(patchfile)
- except OSError:
- pass
+ with open(patchfile, 'rb') as f:
+ return f.read()
class Image(object):
+ def RangeSha1(self, ranges):
+ raise NotImplementedError
+
def ReadRangeSet(self, ranges):
raise NotImplementedError
def TotalSha1(self, include_clobbered_blocks=False):
raise NotImplementedError
+ def WriteRangeDataToFd(self, ranges, fd):
+ raise NotImplementedError
+
class EmptyImage(Image):
"""A zero-length image."""
- blocksize = 4096
- care_map = RangeSet()
- clobbered_blocks = RangeSet()
- extended = RangeSet()
- total_blocks = 0
- file_map = {}
+
+ def __init__(self):
+ self.blocksize = 4096
+ self.care_map = RangeSet()
+ self.clobbered_blocks = RangeSet()
+ self.extended = RangeSet()
+ self.total_blocks = 0
+ self.file_map = {}
+
+ def RangeSha1(self, ranges):
+ return sha1().hexdigest()
+
def ReadRangeSet(self, ranges):
return ()
+
def TotalSha1(self, include_clobbered_blocks=False):
# EmptyImage always carries empty clobbered_blocks, so
# include_clobbered_blocks can be ignored.
assert self.clobbered_blocks.size() == 0
return sha1().hexdigest()
+ def WriteRangeDataToFd(self, ranges, fd):
+ raise ValueError("Can't write data from EmptyImage to file")
+
class DataImage(Image):
"""An image wrapped around a single string of data."""
@@ -160,23 +156,39 @@
if clobbered_blocks:
self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
+ def _GetRangeData(self, ranges):
+ for s, e in ranges:
+ yield self.data[s*self.blocksize:e*self.blocksize]
+
+ def RangeSha1(self, ranges):
+ h = sha1()
+ for data in self._GetRangeData(ranges):
+ h.update(data)
+ return h.hexdigest()
+
def ReadRangeSet(self, ranges):
- return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges]
+ return [self._GetRangeData(ranges)]
def TotalSha1(self, include_clobbered_blocks=False):
if not include_clobbered_blocks:
- ranges = self.care_map.subtract(self.clobbered_blocks)
- return sha1(self.ReadRangeSet(ranges)).hexdigest()
+ return self.RangeSha1(self.care_map.subtract(self.clobbered_blocks))
else:
return sha1(self.data).hexdigest()
+ def WriteRangeDataToFd(self, ranges, fd):
+ for data in self._GetRangeData(ranges):
+ fd.write(data)
+
class Transfer(object):
- def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, style, by_id):
+ def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, tgt_sha1,
+ src_sha1, style, by_id):
self.tgt_name = tgt_name
self.src_name = src_name
self.tgt_ranges = tgt_ranges
self.src_ranges = src_ranges
+ self.tgt_sha1 = tgt_sha1
+ self.src_sha1 = src_sha1
self.style = style
self.intact = (getattr(tgt_ranges, "monotonic", False) and
getattr(src_ranges, "monotonic", False))
@@ -251,6 +263,9 @@
# Implementations are free to break up the data into list/tuple
# elements in any way that is convenient.
#
+# RangeSha1(): a function that returns (as a hex string) the SHA-1
+# hash of all the data in the specified range.
+#
# TotalSha1(): a function that returns (as a hex string) the SHA-1
# hash of all the data in the image (ie, all the blocks in the
# care_map minus clobbered_blocks, or including the clobbered
@@ -277,7 +292,7 @@
self.touched_src_sha1 = None
self.disable_imgdiff = disable_imgdiff
- assert version in (1, 2, 3, 4)
+ assert version in (3, 4)
self.tgt = tgt
if src is None:
@@ -316,14 +331,11 @@
self.FindVertexSequence()
# Fix up the ordering dependencies that the sequence didn't
# satisfy.
- if self.version == 1:
- self.RemoveBackwardEdges()
- else:
- self.ReverseBackwardEdges()
- self.ImproveVertexSequence()
+ self.ReverseBackwardEdges()
+ self.ImproveVertexSequence()
# Ensure the runtime stash size is under the limit.
- if self.version >= 2 and common.OPTIONS.cache_size is not None:
+ if common.OPTIONS.cache_size is not None:
self.ReviseStashSize()
# Double-check our work.
@@ -332,15 +344,6 @@
self.ComputePatches(prefix)
self.WriteTransfers(prefix)
- def HashBlocks(self, source, ranges): # pylint: disable=no-self-use
- data = source.ReadRangeSet(ranges)
- ctx = sha1()
-
- for p in data:
- ctx.update(p)
-
- return ctx.hexdigest()
-
def WriteTransfers(self, prefix):
def WriteSplitTransfers(out, style, target_blocks):
"""Limit the size of operand in command 'new' and 'zero' to 1024 blocks.
@@ -361,13 +364,6 @@
out = []
total = 0
- # In BBOTA v2, 'stashes' records the map from 'stash_raw_id' to 'stash_id'
- # (aka 'sid', which is the stash slot id). The stash in a 'stash_id' will
- # be freed immediately after its use. So unlike 'stash_raw_id' (which
- # uniquely identifies each pair of stashed blocks), the same 'stash_id'
- # may be reused during the life cycle of an update (maintained by
- # 'free_stash_ids' heap and 'next_stash_id').
- #
# In BBOTA v3+, it uses the hash of the stashed blocks as the stash slot
# id. 'stashes' records the map from 'hash' to the ref count. The stash
# will be freed only if the count decrements to zero.
@@ -375,36 +371,17 @@
stashed_blocks = 0
max_stashed_blocks = 0
- if self.version == 2:
- free_stash_ids = []
- next_stash_id = 0
-
for xf in self.transfers:
- if self.version < 2:
- assert not xf.stash_before
- assert not xf.use_stash
-
- for stash_raw_id, sr in xf.stash_before:
- if self.version == 2:
- assert stash_raw_id not in stashes
- if free_stash_ids:
- sid = heapq.heappop(free_stash_ids)
- else:
- sid = next_stash_id
- next_stash_id += 1
- stashes[stash_raw_id] = sid
- stashed_blocks += sr.size()
- out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
+ for _, sr in xf.stash_before:
+ sh = self.src.RangeSha1(sr)
+ if sh in stashes:
+ stashes[sh] += 1
else:
- sh = self.HashBlocks(self.src, sr)
- if sh in stashes:
- stashes[sh] += 1
- else:
- stashes[sh] = 1
- stashed_blocks += sr.size()
- self.touched_src_ranges = self.touched_src_ranges.union(sr)
- out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
+ stashes[sh] = 1
+ stashed_blocks += sr.size()
+ self.touched_src_ranges = self.touched_src_ranges.union(sr)
+ out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
if stashed_blocks > max_stashed_blocks:
max_stashed_blocks = stashed_blocks
@@ -412,75 +389,47 @@
free_string = []
free_size = 0
- if self.version == 1:
- src_str = xf.src_ranges.to_string_raw() if xf.src_ranges else ""
- elif self.version >= 2:
+ # <# blocks> <src ranges>
+ # OR
+ # <# blocks> <src ranges> <src locs> <stash refs...>
+ # OR
+ # <# blocks> - <stash refs...>
- # <# blocks> <src ranges>
- # OR
- # <# blocks> <src ranges> <src locs> <stash refs...>
- # OR
- # <# blocks> - <stash refs...>
+ size = xf.src_ranges.size()
+ src_str = [str(size)]
- size = xf.src_ranges.size()
- src_str = [str(size)]
+ unstashed_src_ranges = xf.src_ranges
+ mapped_stashes = []
+ for _, sr in xf.use_stash:
+ unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
+ sh = self.src.RangeSha1(sr)
+ sr = xf.src_ranges.map_within(sr)
+ mapped_stashes.append(sr)
+ assert sh in stashes
+ src_str.append("%s:%s" % (sh, sr.to_string_raw()))
+ stashes[sh] -= 1
+ if stashes[sh] == 0:
+ free_string.append("free %s\n" % (sh,))
+ free_size += sr.size()
+ stashes.pop(sh)
- unstashed_src_ranges = xf.src_ranges
- mapped_stashes = []
- for stash_raw_id, sr in xf.use_stash:
- unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
- sh = self.HashBlocks(self.src, sr)
- sr = xf.src_ranges.map_within(sr)
- mapped_stashes.append(sr)
- if self.version == 2:
- sid = stashes.pop(stash_raw_id)
- src_str.append("%d:%s" % (sid, sr.to_string_raw()))
- # A stash will be used only once. We need to free the stash
- # immediately after the use, instead of waiting for the automatic
- # clean-up at the end. Because otherwise it may take up extra space
- # and lead to OTA failures.
- # Bug: 23119955
- free_string.append("free %d\n" % (sid,))
- free_size += sr.size()
- heapq.heappush(free_stash_ids, sid)
- else:
- assert sh in stashes
- src_str.append("%s:%s" % (sh, sr.to_string_raw()))
- stashes[sh] -= 1
- if stashes[sh] == 0:
- free_string.append("free %s\n" % (sh,))
- free_size += sr.size()
- stashes.pop(sh)
-
- if unstashed_src_ranges:
- src_str.insert(1, unstashed_src_ranges.to_string_raw())
- if xf.use_stash:
- mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
- src_str.insert(2, mapped_unstashed.to_string_raw())
- mapped_stashes.append(mapped_unstashed)
- self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
- else:
- src_str.insert(1, "-")
+ if unstashed_src_ranges:
+ src_str.insert(1, unstashed_src_ranges.to_string_raw())
+ if xf.use_stash:
+ mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
+ src_str.insert(2, mapped_unstashed.to_string_raw())
+ mapped_stashes.append(mapped_unstashed)
self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
+ else:
+ src_str.insert(1, "-")
+ self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
- src_str = " ".join(src_str)
+ src_str = " ".join(src_str)
- # all versions:
+ # version 3+:
# zero <rangeset>
# new <rangeset>
# erase <rangeset>
- #
- # version 1:
- # bsdiff patchstart patchlen <src rangeset> <tgt rangeset>
- # imgdiff patchstart patchlen <src rangeset> <tgt rangeset>
- # move <src rangeset> <tgt rangeset>
- #
- # version 2:
- # bsdiff patchstart patchlen <tgt rangeset> <src_str>
- # imgdiff patchstart patchlen <tgt rangeset> <src_str>
- # move <tgt rangeset> <src_str>
- #
- # version 3:
# bsdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
# imgdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
# move hash <tgt rangeset> <src_str>
@@ -495,41 +444,6 @@
assert xf.tgt_ranges
assert xf.src_ranges.size() == tgt_size
if xf.src_ranges != xf.tgt_ranges:
- if self.version == 1:
- out.append("%s %s %s\n" % (
- xf.style,
- xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
- elif self.version == 2:
- out.append("%s %s %s\n" % (
- xf.style,
- xf.tgt_ranges.to_string_raw(), src_str))
- elif self.version >= 3:
- # take into account automatic stashing of overlapping blocks
- if xf.src_ranges.overlaps(xf.tgt_ranges):
- temp_stash_usage = stashed_blocks + xf.src_ranges.size()
- if temp_stash_usage > max_stashed_blocks:
- max_stashed_blocks = temp_stash_usage
-
- self.touched_src_ranges = self.touched_src_ranges.union(
- xf.src_ranges)
-
- out.append("%s %s %s %s\n" % (
- xf.style,
- self.HashBlocks(self.tgt, xf.tgt_ranges),
- xf.tgt_ranges.to_string_raw(), src_str))
- total += tgt_size
- elif xf.style in ("bsdiff", "imgdiff"):
- assert xf.tgt_ranges
- assert xf.src_ranges
- if self.version == 1:
- out.append("%s %d %d %s %s\n" % (
- xf.style, xf.patch_start, xf.patch_len,
- xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
- elif self.version == 2:
- out.append("%s %d %d %s %s\n" % (
- xf.style, xf.patch_start, xf.patch_len,
- xf.tgt_ranges.to_string_raw(), src_str))
- elif self.version >= 3:
# take into account automatic stashing of overlapping blocks
if xf.src_ranges.overlaps(xf.tgt_ranges):
temp_stash_usage = stashed_blocks + xf.src_ranges.size()
@@ -539,12 +453,28 @@
self.touched_src_ranges = self.touched_src_ranges.union(
xf.src_ranges)
- out.append("%s %d %d %s %s %s %s\n" % (
+ out.append("%s %s %s %s\n" % (
xf.style,
- xf.patch_start, xf.patch_len,
- self.HashBlocks(self.src, xf.src_ranges),
- self.HashBlocks(self.tgt, xf.tgt_ranges),
+ xf.tgt_sha1,
xf.tgt_ranges.to_string_raw(), src_str))
+ total += tgt_size
+ elif xf.style in ("bsdiff", "imgdiff"):
+ assert xf.tgt_ranges
+ assert xf.src_ranges
+ # take into account automatic stashing of overlapping blocks
+ if xf.src_ranges.overlaps(xf.tgt_ranges):
+ temp_stash_usage = stashed_blocks + xf.src_ranges.size()
+ if temp_stash_usage > max_stashed_blocks:
+ max_stashed_blocks = temp_stash_usage
+
+ self.touched_src_ranges = self.touched_src_ranges.union(xf.src_ranges)
+
+ out.append("%s %d %d %s %s %s %s\n" % (
+ xf.style,
+ xf.patch_start, xf.patch_len,
+ xf.src_sha1,
+ xf.tgt_sha1,
+ xf.tgt_ranges.to_string_raw(), src_str))
total += tgt_size
elif xf.style == "zero":
assert xf.tgt_ranges
@@ -558,7 +488,7 @@
out.append("".join(free_string))
stashed_blocks -= free_size
- if self.version >= 2 and common.OPTIONS.cache_size is not None:
+ if common.OPTIONS.cache_size is not None:
# Sanity check: abort if we're going to need more stash space than
# the allowed size (cache_size * threshold). There are two purposes
# of having a threshold here. a) Part of the cache may have been
@@ -573,9 +503,7 @@
self.tgt.blocksize, max_allowed, cache_size,
stash_threshold)
- if self.version >= 3:
- self.touched_src_sha1 = self.HashBlocks(
- self.src, self.touched_src_ranges)
+ self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges)
# Zero out extended blocks as a workaround for bug 20881595.
if self.tgt.extended:
@@ -603,32 +531,25 @@
out.insert(0, "%d\n" % (self.version,)) # format version number
out.insert(1, "%d\n" % (total,))
- if self.version == 2:
- # v2 only: after the total block count, we give the number of stash slots
- # needed, and the maximum size needed (in blocks).
- out.insert(2, str(next_stash_id) + "\n")
- out.insert(3, str(max_stashed_blocks) + "\n")
- elif self.version >= 3:
- # v3+: the number of stash slots is unused.
- out.insert(2, "0\n")
- out.insert(3, str(max_stashed_blocks) + "\n")
+ # v3+: the number of stash slots is unused.
+ out.insert(2, "0\n")
+ out.insert(3, str(max_stashed_blocks) + "\n")
with open(prefix + ".transfer.list", "wb") as f:
for i in out:
f.write(i)
- if self.version >= 2:
- self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
- OPTIONS = common.OPTIONS
- if OPTIONS.cache_size is not None:
- max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
- print("max stashed blocks: %d (%d bytes), "
- "limit: %d bytes (%.2f%%)\n" % (
- max_stashed_blocks, self._max_stashed_size, max_allowed,
- self._max_stashed_size * 100.0 / max_allowed))
- else:
- print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
- max_stashed_blocks, self._max_stashed_size))
+ self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
+ OPTIONS = common.OPTIONS
+ if OPTIONS.cache_size is not None:
+ max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
+ print("max stashed blocks: %d (%d bytes), "
+ "limit: %d bytes (%.2f%%)\n" % (
+ max_stashed_blocks, self._max_stashed_size, max_allowed,
+ self._max_stashed_size * 100.0 / max_allowed))
+ else:
+ print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
+ max_stashed_blocks, self._max_stashed_size))
def ReviseStashSize(self):
print("Revising stash size...")
@@ -656,10 +577,6 @@
stashed_blocks = 0
new_blocks = 0
- if self.version == 2:
- free_stash_ids = []
- next_stash_id = 0
-
# Now go through all the commands. Compute the required stash size on the
# fly. If a command requires excess stash than available, it deletes the
# stash by replacing the command that uses the stash with a "new" command
@@ -671,12 +588,9 @@
for stash_raw_id, sr in xf.stash_before:
# Check the post-command stashed_blocks.
stashed_blocks_after = stashed_blocks
- if self.version == 2:
+ sh = self.src.RangeSha1(sr)
+ if sh not in stashes:
stashed_blocks_after += sr.size()
- else:
- sh = self.HashBlocks(self.src, sr)
- if sh not in stashes:
- stashed_blocks_after += sr.size()
if stashed_blocks_after > max_allowed:
# We cannot stash this one for a later command. Find out the command
@@ -686,24 +600,15 @@
print("%10d %9s %s" % (sr.size(), "explicit", use_cmd))
else:
# Update the stashes map.
- if self.version == 2:
- assert stash_raw_id not in stashes
- if free_stash_ids:
- sid = heapq.heappop(free_stash_ids)
- else:
- sid = next_stash_id
- next_stash_id += 1
- stashes[stash_raw_id] = sid
+ if sh in stashes:
+ stashes[sh] += 1
else:
- if sh in stashes:
- stashes[sh] += 1
- else:
- stashes[sh] = 1
+ stashes[sh] = 1
stashed_blocks = stashed_blocks_after
# "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to
# ComputePatches(), they both have the style of "diff".
- if xf.style == "diff" and self.version >= 3:
+ if xf.style == "diff":
assert xf.tgt_ranges and xf.src_ranges
if xf.src_ranges.overlaps(xf.tgt_ranges):
if stashed_blocks + xf.src_ranges.size() > max_allowed:
@@ -725,18 +630,13 @@
cmd.ConvertToNew()
# xf.use_stash may generate free commands.
- for stash_raw_id, sr in xf.use_stash:
- if self.version == 2:
- sid = stashes.pop(stash_raw_id)
+ for _, sr in xf.use_stash:
+ sh = self.src.RangeSha1(sr)
+ assert sh in stashes
+ stashes[sh] -= 1
+ if stashes[sh] == 0:
stashed_blocks -= sr.size()
- heapq.heappush(free_stash_ids, sid)
- else:
- sh = self.HashBlocks(self.src, sr)
- assert sh in stashes
- stashes[sh] -= 1
- if stashes[sh] == 0:
- stashed_blocks -= sr.size()
- stashes.pop(sh)
+ stashes.pop(sh)
num_of_bytes = new_blocks * self.tgt.blocksize
print(" Total %d blocks (%d bytes) are packed as new blocks due to "
@@ -745,10 +645,10 @@
def ComputePatches(self, prefix):
print("Reticulating splines...")
- diff_q = []
+ diff_queue = []
patch_num = 0
with open(prefix + ".new.dat", "wb") as new_f:
- for xf in self.transfers:
+ for index, xf in enumerate(self.transfers):
if xf.style == "zero":
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
print("%10d %10d (%6.2f%%) %7s %s %s" % (
@@ -756,17 +656,13 @@
str(xf.tgt_ranges)))
elif xf.style == "new":
- for piece in self.tgt.ReadRangeSet(xf.tgt_ranges):
- new_f.write(piece)
+ self.tgt.WriteRangeDataToFd(xf.tgt_ranges, new_f)
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
print("%10d %10d (%6.2f%%) %7s %s %s" % (
tgt_size, tgt_size, 100.0, xf.style,
xf.tgt_name, str(xf.tgt_ranges)))
elif xf.style == "diff":
- src = self.src.ReadRangeSet(xf.src_ranges)
- tgt = self.tgt.ReadRangeSet(xf.tgt_ranges)
-
# We can't compare src and tgt directly because they may have
# the same content but be broken up into blocks differently, eg:
#
@@ -775,20 +671,11 @@
# We want those to compare equal, ideally without having to
# actually concatenate the strings (these may be tens of
# megabytes).
-
- src_sha1 = sha1()
- for p in src:
- src_sha1.update(p)
- tgt_sha1 = sha1()
- tgt_size = 0
- for p in tgt:
- tgt_sha1.update(p)
- tgt_size += len(p)
-
- if src_sha1.digest() == tgt_sha1.digest():
+ if xf.src_sha1 == xf.tgt_sha1:
# These are identical; we don't need to generate a patch,
# just issue copy commands on the device.
xf.style = "move"
+ tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
if xf.src_ranges != xf.tgt_ranges:
print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % (
tgt_size, tgt_size, 100.0, xf.style,
@@ -815,38 +702,74 @@
xf.tgt_name.split(".")[-1].lower()
in ("apk", "jar", "zip"))
xf.style = "imgdiff" if imgdiff else "bsdiff"
- diff_q.append((tgt_size, src, tgt, xf, patch_num))
+ diff_queue.append((index, imgdiff, patch_num))
patch_num += 1
else:
assert False, "unknown style " + xf.style
- if diff_q:
+ if diff_queue:
if self.threads > 1:
print("Computing patches (using %d threads)..." % (self.threads,))
else:
print("Computing patches...")
- diff_q.sort()
- patches = [None] * patch_num
+ diff_total = len(diff_queue)
+ patches = [None] * diff_total
+ error_messages = []
+ if sys.stdout.isatty():
+ global diff_done
+ diff_done = 0
- # TODO: Rewrite with multiprocessing.ThreadPool?
+ # Using multiprocessing doesn't give additional benefits, due to the
+ # pattern of the code. The diffing work is done by subprocess.call, which
+ # already runs in a separate process (not affected much by the GIL -
+ # Global Interpreter Lock). Using multiprocess also requires either a)
+ # writing the diff input files in the main process before forking, or b)
+ # reopening the image file (SparseImage) in the worker processes. Doing
+ # neither of them further improves the performance.
lock = threading.Lock()
def diff_worker():
while True:
with lock:
- if not diff_q:
+ if not diff_queue:
return
- tgt_size, src, tgt, xf, patchnum = diff_q.pop()
- patch = compute_patch(src, tgt, imgdiff=(xf.style == "imgdiff"))
- size = len(patch)
+ xf_index, imgdiff, patch_index = diff_queue.pop()
+
+ xf = self.transfers[xf_index]
+ src_ranges = xf.src_ranges
+ tgt_ranges = xf.tgt_ranges
+
+ # Needs lock since WriteRangeDataToFd() is stateful (calling seek).
with lock:
- patches[patchnum] = (patch, xf)
- print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
- size, tgt_size, size * 100.0 / tgt_size, xf.style,
- xf.tgt_name if xf.tgt_name == xf.src_name else (
- xf.tgt_name + " (from " + xf.src_name + ")"),
- str(xf.tgt_ranges), str(xf.src_ranges)))
+ src_file = common.MakeTempFile(prefix="src-")
+ with open(src_file, "wb") as fd:
+ self.src.WriteRangeDataToFd(src_ranges, fd)
+
+ tgt_file = common.MakeTempFile(prefix="tgt-")
+ with open(tgt_file, "wb") as fd:
+ self.tgt.WriteRangeDataToFd(tgt_ranges, fd)
+
+ try:
+ patch = compute_patch(src_file, tgt_file, imgdiff)
+ except ValueError as e:
+ with lock:
+ error_messages.append(
+ "Failed to generate %s for %s: tgt=%s, src=%s:\n%s" % (
+ "imgdiff" if imgdiff else "bsdiff",
+ xf.tgt_name if xf.tgt_name == xf.src_name else
+ xf.tgt_name + " (from " + xf.src_name + ")",
+ xf.tgt_ranges, xf.src_ranges, e.message))
+
+ with lock:
+ patches[patch_index] = (xf_index, patch)
+ if sys.stdout.isatty():
+ global diff_done
+ diff_done += 1
+ progress = diff_done * 100 / diff_total
+ # '\033[K' is to clear to EOL.
+ print(' [%d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
+ sys.stdout.flush()
threads = [threading.Thread(target=diff_worker)
for _ in range(self.threads)]
@@ -854,16 +777,33 @@
th.start()
while threads:
threads.pop().join()
+
+ if sys.stdout.isatty():
+ print('\n')
+
+ if error_messages:
+ print('\n'.join(error_messages))
+ sys.exit(1)
else:
patches = []
- p = 0
- with open(prefix + ".patch.dat", "wb") as patch_f:
- for patch, xf in patches:
- xf.patch_start = p
+ offset = 0
+ with open(prefix + ".patch.dat", "wb") as patch_fd:
+ for index, patch in patches:
+ xf = self.transfers[index]
xf.patch_len = len(patch)
- patch_f.write(patch)
- p += len(patch)
+ xf.patch_start = offset
+ offset += xf.patch_len
+ patch_fd.write(patch)
+
+ if common.OPTIONS.verbose:
+ tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+ print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
+ xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
+ xf.style,
+ xf.tgt_name if xf.tgt_name == xf.src_name else (
+ xf.tgt_name + " (from " + xf.src_name + ")"),
+ xf.tgt_ranges, xf.src_ranges))
def AssertSequenceGood(self):
# Simulate the sequences of transfers we will output, and check that:
@@ -878,9 +818,8 @@
# Check that the input blocks for this transfer haven't yet been touched.
x = xf.src_ranges
- if self.version >= 2:
- for _, sr in xf.use_stash:
- x = x.subtract(sr)
+ for _, sr in xf.use_stash:
+ x = x.subtract(sr)
for s, e in x:
# Source image could be larger. Don't check the blocks that are in the
@@ -1211,7 +1150,9 @@
# Change nothing for small files.
if (tgt_ranges.size() <= max_blocks_per_transfer and
src_ranges.size() <= max_blocks_per_transfer):
- Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+ Transfer(tgt_name, src_name, tgt_ranges, src_ranges,
+ self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+ style, by_id)
return
while (tgt_ranges.size() > max_blocks_per_transfer and
@@ -1221,8 +1162,9 @@
tgt_first = tgt_ranges.first(max_blocks_per_transfer)
src_first = src_ranges.first(max_blocks_per_transfer)
- Transfer(tgt_split_name, src_split_name, tgt_first, src_first, style,
- by_id)
+ Transfer(tgt_split_name, src_split_name, tgt_first, src_first,
+ self.tgt.RangeSha1(tgt_first), self.src.RangeSha1(src_first),
+ style, by_id)
tgt_ranges = tgt_ranges.subtract(tgt_first)
src_ranges = src_ranges.subtract(src_first)
@@ -1234,8 +1176,9 @@
assert tgt_ranges.size() and src_ranges.size()
tgt_split_name = "%s-%d" % (tgt_name, pieces)
src_split_name = "%s-%d" % (src_name, pieces)
- Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges, style,
- by_id)
+ Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges,
+ self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+ style, by_id)
def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id,
split=False):
@@ -1244,7 +1187,9 @@
# We specialize diff transfers only (which covers bsdiff/imgdiff/move);
# otherwise add the Transfer() as is.
if style != "diff" or not split:
- Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+ Transfer(tgt_name, src_name, tgt_ranges, src_ranges,
+ self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+ style, by_id)
return
# Handle .odex files specially to analyze the block-wise difference. If
@@ -1325,7 +1270,7 @@
elif tgt_fn in self.src.file_map:
# Look for an exact pathname match in the source.
AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
- "diff", self.transfers, self.version >= 3)
+ "diff", self.transfers, True)
continue
b = os.path.basename(tgt_fn)
@@ -1333,7 +1278,7 @@
# Look for an exact basename match in the source.
src_fn = self.src_basenames[b]
AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
- "diff", self.transfers, self.version >= 3)
+ "diff", self.transfers, True)
continue
b = re.sub("[0-9]+", "#", b)
@@ -1344,7 +1289,7 @@
# that get bumped.)
src_fn = self.src_numpatterns[b]
AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
- "diff", self.transfers, self.version >= 3)
+ "diff", self.transfers, True)
continue
AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 73cd07e..16c8018 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -25,7 +25,6 @@
import re
import subprocess
import sys
-import commands
import common
import shlex
import shutil
@@ -52,29 +51,24 @@
return (output, p.returncode)
def GetVerityFECSize(partition_size):
- cmd = "fec -s %d" % partition_size
- status, output = commands.getstatusoutput(cmd)
- if status:
- print output
+ cmd = ["fec", "-s", str(partition_size)]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
return False, 0
return True, int(output)
def GetVerityTreeSize(partition_size):
- cmd = "build_verity_tree -s %d"
- cmd %= partition_size
- status, output = commands.getstatusoutput(cmd)
- if status:
- print output
+ cmd = ["build_verity_tree", "-s", str(partition_size)]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
return False, 0
return True, int(output)
def GetVerityMetadataSize(partition_size):
- cmd = "system/extras/verity/build_verity_metadata.py size %d"
- cmd %= partition_size
-
- status, output = commands.getstatusoutput(cmd)
- if status:
- print output
+ cmd = ["system/extras/verity/build_verity_metadata.py", "size",
+ str(partition_size)]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
return False, 0
return True, int(output)
@@ -191,21 +185,19 @@
def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
padding_size):
- cmd = "fec -e -p %d %s %s %s" % (padding_size, sparse_image_path,
- verity_path, verity_fec_path)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
+ cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
+ verity_path, verity_fec_path]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
print "Could not build FEC data! Error: %s" % output
return False
return True
def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
- cmd = "build_verity_tree -A %s %s %s" % (
- FIXED_SALT, sparse_image_path, verity_image_path)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
+ cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
+ verity_image_path]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
print "Could not build verity tree! Error: %s" % output
return False
root, salt = output.split()
@@ -215,16 +207,13 @@
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key, signer_args):
- cmd_template = (
- "system/extras/verity/build_verity_metadata.py build " +
- "%s %s %s %s %s %s %s")
- cmd = cmd_template % (image_size, verity_metadata_path, root_hash, salt,
- block_device, signer_path, key)
+ cmd = ["system/extras/verity/build_verity_metadata.py", "build",
+ str(image_size), verity_metadata_path, root_hash, salt, block_device,
+ signer_path, key]
if signer_args:
- cmd += " --signer_args=\"%s\"" % (' '.join(signer_args),)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
+ cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
print "Could not build verity metadata! Error: %s" % output
return False
return True
@@ -238,22 +227,19 @@
Returns:
True on success, False on failure.
"""
- cmd = "append2simg %s %s"
- cmd %= (sparse_image_path, unsparse_image_path)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
+ cmd = ["append2simg", sparse_image_path, unsparse_image_path]
+ output, exit_code = RunCommand(cmd)
+ if exit_code != 0:
print "%s: %s" % (error_message, output)
return False
return True
def Append(target, file_to_append, error_message):
- cmd = 'cat %s >> %s' % (file_to_append, target)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
- print "%s: %s" % (error_message, output)
- return False
+ print "appending %s to %s" % (file_to_append, target)
+ with open(target, "a") as out_file:
+ with open(file_to_append, "r") as input_file:
+ for line in input_file:
+ out_file.write(line)
return True
def BuildVerifiedImage(data_image_path, verity_image_path,
diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py
index 3048488..f9aa4fa 100755
--- a/tools/releasetools/check_target_files_signatures.py
+++ b/tools/releasetools/check_target_files_signatures.py
@@ -235,7 +235,7 @@
self.certmap = None
def LoadZipFile(self, filename):
- d, z = common.UnzipTemp(filename, '*.apk')
+ d, z = common.UnzipTemp(filename, ['*.apk'])
try:
self.apks = {}
self.apks_by_basename = {}
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index f174747..e200f9f 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -564,7 +564,7 @@
def unzip_to_dir(filename, dirname):
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if pattern is not None:
- cmd.append(pattern)
+ cmd.extend(pattern)
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
if p.returncode != 0:
@@ -1191,6 +1191,10 @@
t.flush()
return t
+ def WriteToDir(self, d):
+ with open(os.path.join(d, self.name), "wb") as fp:
+ fp.write(self.data)
+
def AddToZip(self, z, compression=None):
ZipWriteStr(z, self.name, self.data, compress_type=compression)
@@ -1336,6 +1340,7 @@
version = max(
int(i) for i in
OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+ assert version >= 3
self.version = version
b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
@@ -1400,7 +1405,7 @@
# incremental OTA
else:
- if touched_blocks_only and self.version >= 3:
+ if touched_blocks_only:
ranges = self.touched_src_ranges
expected_sha1 = self.touched_src_sha1
else:
@@ -1412,16 +1417,12 @@
return
ranges_str = ranges.to_string_raw()
- if self.version >= 3:
- script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
- 'block_image_verify("%s", '
- 'package_extract_file("%s.transfer.list"), '
- '"%s.new.dat", "%s.patch.dat")) then') % (
- self.device, ranges_str, expected_sha1,
- self.device, partition, partition, partition))
- else:
- script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
- self.device, ranges_str, self.src.TotalSha1()))
+ script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
+ 'block_image_verify("%s", '
+ 'package_extract_file("%s.transfer.list"), '
+ '"%s.new.dat", "%s.patch.dat")) then') % (
+ self.device, ranges_str, expected_sha1,
+ self.device, partition, partition, partition))
script.Print('Verified %s image...' % (partition,))
script.AppendExtra('else')
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 343f344..2a9a417 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -277,36 +277,6 @@
self.script.append('wipe_block_device("%s", %s);' % (device, size))
- def DeleteFiles(self, file_list):
- """Delete all files in file_list."""
- if not file_list:
- return
- cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");"
- self.script.append(self.WordWrap(cmd))
-
- def DeleteFilesIfNotMatching(self, file_list):
- """Delete the file in file_list if not matching the checksum."""
- if not file_list:
- return
- for name, sha1 in file_list:
- cmd = ('sha1_check(read_file("{name}"), "{sha1}") || '
- 'delete("{name}");'.format(name=name, sha1=sha1))
- self.script.append(self.WordWrap(cmd))
-
- def RenameFile(self, srcfile, tgtfile):
- """Moves a file from one location to another."""
- if self.info.get("update_rename_support", False):
- self.script.append('rename("%s", "%s");' % (srcfile, tgtfile))
- else:
- raise ValueError("Rename not supported by update binary")
-
- def SkipNextActionIfTargetExists(self, tgtfile, tgtsha1):
- """Prepend an action with an apply_patch_check in order to
- skip the action if the file exists. Used when a patch
- is later renamed."""
- cmd = ('sha1_check(read_file("%s"), %s) ||' % (tgtfile, tgtsha1))
- self.script.append(self.WordWrap(cmd))
-
def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
"""Apply binary patches (in *patchpairs) to the given srcfile to
produce tgtfile (which may be "-" to indicate overwriting the
@@ -343,48 +313,6 @@
raise ValueError(
"don't know how to write \"%s\" partitions" % p.fs_type)
- def SetPermissions(self, fn, uid, gid, mode, selabel, capabilities):
- """Set file ownership and permissions."""
- if not self.info.get("use_set_metadata", False):
- self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn))
- else:
- if capabilities is None:
- capabilities = "0x0"
- cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o, ' \
- '"capabilities", %s' % (fn, uid, gid, mode, capabilities)
- if selabel is not None:
- cmd += ', "selabel", "%s"' % selabel
- cmd += ');'
- self.script.append(cmd)
-
- def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel,
- capabilities):
- """Recursively set path ownership and permissions."""
- if not self.info.get("use_set_metadata", False):
- self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");'
- % (uid, gid, dmode, fmode, fn))
- else:
- if capabilities is None:
- capabilities = "0x0"
- cmd = 'set_metadata_recursive("%s", "uid", %d, "gid", %d, ' \
- '"dmode", 0%o, "fmode", 0%o, "capabilities", %s' \
- % (fn, uid, gid, dmode, fmode, capabilities)
- if selabel is not None:
- cmd += ', "selabel", "%s"' % selabel
- cmd += ');'
- self.script.append(cmd)
-
- def MakeSymlinks(self, symlink_list):
- """Create symlinks, given a list of (dest, link) pairs."""
- by_dest = {}
- for d, l in symlink_list:
- by_dest.setdefault(d, []).append(l)
-
- for dest, links in sorted(by_dest.iteritems()):
- cmd = ('symlink("%s", ' % (dest,) +
- ",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
- self.script.append(self.WordWrap(cmd))
-
def AppendExtra(self, extra):
"""Append text verbatim to the output script."""
self.script.append(extra)
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index be01a6d..f75bb96 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -55,7 +55,6 @@
properties on the OEM partition of the intended device.
Multiple expected values can be used by providing multiple files.
-
--oem_no_mount
For devices with OEM-specific properties but without an OEM partition,
do not mount the OEM partition in the updater-script. This should be
@@ -95,9 +94,10 @@
using the new recovery (new kernel, etc.).
--block
- Generate a block-based OTA if possible. Will fall back to a
- file-based OTA if the target_files is older and doesn't support
- block-based OTAs.
+ Generate a block-based OTA for non-A/B device. We have deprecated the
+ support for file-based OTA since O. Block-based OTA will be used by
+ default for all non-A/B devices. Keeping this flag here to not break
+ existing callers.
-b (--binary) <file>
Use the given binary as the update-binary in the output package,
@@ -156,8 +156,6 @@
OPTIONS.package_key = None
OPTIONS.incremental_source = None
OPTIONS.verify = False
-OPTIONS.require_verbatim = set()
-OPTIONS.prohibit_verbatim = set(("system/build.prop",))
OPTIONS.patch_threshold = 0.95
OPTIONS.wipe_user_data = False
OPTIONS.downgrade = False
@@ -168,7 +166,7 @@
OPTIONS.worker_threads = 1
OPTIONS.two_step = False
OPTIONS.no_signing = False
-OPTIONS.block_based = False
+OPTIONS.block_based = True
OPTIONS.updater_binary = None
OPTIONS.oem_source = None
OPTIONS.oem_no_mount = False
@@ -182,274 +180,10 @@
OPTIONS.log_diff = None
OPTIONS.payload_signer = None
OPTIONS.payload_signer_args = []
+OPTIONS.extracted_input = None
METADATA_NAME = 'META-INF/com/android/metadata'
-
-def MostPopularKey(d, default):
- """Given a dict, return the key corresponding to the largest
- value. Returns 'default' if the dict is empty."""
- x = [(v, k) for (k, v) in d.iteritems()]
- if not x:
- return default
- x.sort()
- return x[-1][1]
-
-
-def IsSymlink(info):
- """Return true if the zipfile.ZipInfo object passed in represents a
- symlink."""
- return (info.external_attr >> 16) & 0o770000 == 0o120000
-
-def IsRegular(info):
- """Return true if the zipfile.ZipInfo object passed in represents a
- regular file."""
- return (info.external_attr >> 16) & 0o770000 == 0o100000
-
-def ClosestFileMatch(src, tgtfiles, existing):
- """Returns the closest file match between a source file and list
- of potential matches. The exact filename match is preferred,
- then the sha1 is searched for, and finally a file with the same
- basename is evaluated. Rename support in the updater-binary is
- required for the latter checks to be used."""
-
- result = tgtfiles.get("path:" + src.name)
- if result is not None:
- return result
-
- if not OPTIONS.target_info_dict.get("update_rename_support", False):
- return None
-
- if src.size < 1000:
- return None
-
- result = tgtfiles.get("sha1:" + src.sha1)
- if result is not None and existing.get(result.name) is None:
- return result
- result = tgtfiles.get("file:" + src.name.split("/")[-1])
- if result is not None and existing.get(result.name) is None:
- return result
- return None
-
-class ItemSet(object):
- def __init__(self, partition, fs_config):
- self.partition = partition
- self.fs_config = fs_config
- self.ITEMS = {}
-
- def Get(self, name, is_dir=False):
- if name not in self.ITEMS:
- self.ITEMS[name] = Item(self, name, is_dir=is_dir)
- return self.ITEMS[name]
-
- def GetMetadata(self, input_zip):
- # The target_files contains a record of what the uid,
- # gid, and mode are supposed to be.
- output = input_zip.read(self.fs_config)
-
- for line in output.split("\n"):
- if not line:
- continue
- columns = line.split()
- name, uid, gid, mode = columns[:4]
- selabel = None
- capabilities = None
-
- # After the first 4 columns, there are a series of key=value
- # pairs. Extract out the fields we care about.
- for element in columns[4:]:
- key, value = element.split("=")
- if key == "selabel":
- selabel = value
- if key == "capabilities":
- capabilities = value
-
- i = self.ITEMS.get(name, None)
- if i is not None:
- i.uid = int(uid)
- i.gid = int(gid)
- i.mode = int(mode, 8)
- i.selabel = selabel
- i.capabilities = capabilities
- if i.is_dir:
- i.children.sort(key=lambda i: i.name)
-
- # Set metadata for the files generated by this script. For full recovery
- # image at system/etc/recovery.img, it will be taken care by fs_config.
- i = self.ITEMS.get("system/recovery-from-boot.p", None)
- if i:
- i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o644, None, None
- i = self.ITEMS.get("system/etc/install-recovery.sh", None)
- if i:
- i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o544, None, None
-
-
-class Item(object):
- """Items represent the metadata (user, group, mode) of files and
- directories in the system image."""
- def __init__(self, itemset, name, is_dir=False):
- self.itemset = itemset
- self.name = name
- self.uid = None
- self.gid = None
- self.mode = None
- self.selabel = None
- self.capabilities = None
- self.is_dir = is_dir
- self.descendants = None
- self.best_subtree = None
-
- if name:
- self.parent = itemset.Get(os.path.dirname(name), is_dir=True)
- self.parent.children.append(self)
- else:
- self.parent = None
- if self.is_dir:
- self.children = []
-
- def Dump(self, indent=0):
- if self.uid is not None:
- print("%s%s %d %d %o" % (
- " " * indent, self.name, self.uid, self.gid, self.mode))
- else:
- print("%s%s %s %s %s" % (
- " " * indent, self.name, self.uid, self.gid, self.mode))
- if self.is_dir:
- print("%s%s" % (" " * indent, self.descendants))
- print("%s%s" % (" " * indent, self.best_subtree))
- for i in self.children:
- i.Dump(indent=indent+1)
-
- def CountChildMetadata(self):
- """Count up the (uid, gid, mode, selabel, capabilities) tuples for
- all children and determine the best strategy for using set_perm_recursive
- and set_perm to correctly chown/chmod all the files to their desired
- values. Recursively calls itself for all descendants.
-
- Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count}
- counting up all descendants of this node. (dmode or fmode may be None.)
- Also sets the best_subtree of each directory Item to the (uid, gid, dmode,
- fmode, selabel, capabilities) tuple that will match the most descendants of
- that Item.
- """
-
- assert self.is_dir
- key = (self.uid, self.gid, self.mode, None, self.selabel,
- self.capabilities)
- self.descendants = {key: 1}
- d = self.descendants
- for i in self.children:
- if i.is_dir:
- for k, v in i.CountChildMetadata().iteritems():
- d[k] = d.get(k, 0) + v
- else:
- k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities)
- d[k] = d.get(k, 0) + 1
-
- # Find the (uid, gid, dmode, fmode, selabel, capabilities)
- # tuple that matches the most descendants.
-
- # First, find the (uid, gid) pair that matches the most
- # descendants.
- ug = {}
- for (uid, gid, _, _, _, _), count in d.iteritems():
- ug[(uid, gid)] = ug.get((uid, gid), 0) + count
- ug = MostPopularKey(ug, (0, 0))
-
- # Now find the dmode, fmode, selabel, and capabilities that match
- # the most descendants with that (uid, gid), and choose those.
- best_dmode = (0, 0o755)
- best_fmode = (0, 0o644)
- best_selabel = (0, None)
- best_capabilities = (0, None)
- for k, count in d.iteritems():
- if k[:2] != ug:
- continue
- if k[2] is not None and count >= best_dmode[0]:
- best_dmode = (count, k[2])
- if k[3] is not None and count >= best_fmode[0]:
- best_fmode = (count, k[3])
- if k[4] is not None and count >= best_selabel[0]:
- best_selabel = (count, k[4])
- if k[5] is not None and count >= best_capabilities[0]:
- best_capabilities = (count, k[5])
- self.best_subtree = ug + (
- best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1])
-
- return d
-
- def SetPermissions(self, script):
- """Append set_perm/set_perm_recursive commands to 'script' to
- set all permissions, users, and groups for the tree of files
- rooted at 'self'."""
-
- self.CountChildMetadata()
-
- def recurse(item, current):
- # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple
- # that the current item (and all its children) have already been set to.
- # We only need to issue set_perm/set_perm_recursive commands if we're
- # supposed to be something different.
- if item.is_dir:
- if current != item.best_subtree:
- script.SetPermissionsRecursive("/"+item.name, *item.best_subtree)
- current = item.best_subtree
-
- if item.uid != current[0] or item.gid != current[1] or \
- item.mode != current[2] or item.selabel != current[4] or \
- item.capabilities != current[5]:
- script.SetPermissions("/"+item.name, item.uid, item.gid,
- item.mode, item.selabel, item.capabilities)
-
- for i in item.children:
- recurse(i, current)
- else:
- if item.uid != current[0] or item.gid != current[1] or \
- item.mode != current[3] or item.selabel != current[4] or \
- item.capabilities != current[5]:
- script.SetPermissions("/"+item.name, item.uid, item.gid,
- item.mode, item.selabel, item.capabilities)
-
- recurse(self, (-1, -1, -1, -1, None, None))
-
-
-def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None):
- """Copies files for the partition in the input zip to the output
- zip. Populates the Item class with their metadata, and returns a
- list of symlinks. output_zip may be None, in which case the copy is
- skipped (but the other side effects still happen). substitute is an
- optional dict of {output filename: contents} to be output instead of
- certain input files.
- """
-
- symlinks = []
-
- partition = itemset.partition
-
- for info in input_zip.infolist():
- prefix = partition.upper() + "/"
- if info.filename.startswith(prefix):
- basefilename = info.filename[len(prefix):]
- if IsSymlink(info):
- symlinks.append((input_zip.read(info.filename),
- "/" + partition + "/" + basefilename))
- else:
- info2 = copy.copy(info)
- fn = info2.filename = partition + "/" + basefilename
- if substitute and fn in substitute and substitute[fn] is None:
- continue
- if output_zip is not None:
- if substitute and fn in substitute:
- data = substitute[fn]
- else:
- data = input_zip.read(info.filename)
- common.ZipWriteStr(output_zip, info2, data)
- if fn.endswith("/"):
- itemset.Get(fn[:-1], is_dir=True)
- else:
- itemset.Get(fn)
-
- symlinks.sort()
- return symlinks
+UNZIP_PATTERN = ['IMAGES/*', 'META/*']
def SignOutput(temp_zip_name, output_zip_name):
@@ -480,12 +214,12 @@
script.AssertOemProperty(prop, values)
-def _LoadOemDicts(script, recovery_mount_options):
+def _LoadOemDicts(script, recovery_mount_options=None):
"""Returns the list of loaded OEM properties dict."""
oem_dicts = None
if OPTIONS.oem_source is None:
raise common.ExternalError("OEM source required for this build")
- if not OPTIONS.oem_no_mount:
+ if not OPTIONS.oem_no_mount and script:
script.Mount("/oem", recovery_mount_options)
oem_dicts = []
for oem_file in OPTIONS.oem_source:
@@ -532,6 +266,7 @@
return ("SYSTEM/recovery-from-boot.p" in namelist or
"SYSTEM/etc/recovery.img" in namelist)
+
def HasVendorPartition(target_files_zip):
try:
target_files_zip.getinfo("VENDOR/")
@@ -539,6 +274,7 @@
except KeyError:
return False
+
def GetOemProperty(name, oem_props, oem_dict, info_dict):
if oem_props is not None and name in oem_props:
return oem_dict[name]
@@ -555,36 +291,21 @@
GetBuildProp("ro.build.thumbprint", info_dict))
-def GetImage(which, tmpdir, info_dict):
- # Return an image object (suitable for passing to BlockImageDiff)
- # for the 'which' partition (most be "system" or "vendor"). If a
- # prebuilt image and file map are found in tmpdir they are used,
- # otherwise they are reconstructed from the individual files.
+def GetImage(which, tmpdir):
+ """Returns an image object suitable for passing to BlockImageDiff.
+
+ 'which' partition must be "system" or "vendor". A prebuilt image and file
+ map must already exist in tmpdir.
+ """
assert which in ("system", "vendor")
path = os.path.join(tmpdir, "IMAGES", which + ".img")
mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
- if os.path.exists(path) and os.path.exists(mappath):
- print("using %s.img from target-files" % (which,))
- # This is a 'new' target-files, which already has the image in it.
- else:
- print("building %s.img from target-files" % (which,))
-
- # This is an 'old' target-files, which does not contain images
- # already built. Build them.
-
- mappath = tempfile.mkstemp()[1]
- OPTIONS.tempfiles.append(mappath)
-
- import add_img_to_target_files
- if which == "system":
- path = add_img_to_target_files.BuildSystem(
- tmpdir, info_dict, block_list=mappath)
- elif which == "vendor":
- path = add_img_to_target_files.BuildVendor(
- tmpdir, info_dict, block_list=mappath)
+ # The image and map files must have been created prior to calling
+ # ota_from_target_files.py (since LMP).
+ assert os.path.exists(path) and os.path.exists(mappath)
# Bug: http://b/20939131
# In ext4 filesystems, block 0 might be changed even being mounted
@@ -611,6 +332,7 @@
target_fp = CalculateFingerprint(oem_props, oem_dicts and oem_dicts[0],
OPTIONS.info_dict)
metadata = {
+ "post-build": target_fp,
"pre-device": GetOemProperty("ro.product.device", oem_props,
oem_dicts and oem_dicts[0],
OPTIONS.info_dict),
@@ -626,10 +348,9 @@
metadata=metadata,
info_dict=OPTIONS.info_dict)
- has_recovery_patch = HasRecoveryPatch(input_zip)
- block_based = OPTIONS.block_based and has_recovery_patch
+ assert HasRecoveryPatch(input_zip)
- metadata["ota-type"] = "BLOCK" if block_based else "FILE"
+ metadata["ota-type"] = "BLOCK"
ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict)
ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict)
@@ -703,61 +424,27 @@
recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options")
- system_items = ItemSet("system", "META/filesystem_config.txt")
script.ShowProgress(system_progress, 0)
- if block_based:
- # Full OTA is done as an "incremental" against an empty source
- # image. This has the effect of writing new data from the package
- # to the entire partition, but lets us reuse the updater code that
- # writes incrementals to do it.
- system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict)
- system_tgt.ResetFileMap()
- system_diff = common.BlockDifference("system", system_tgt, src=None)
- system_diff.WriteScript(script, output_zip)
- else:
- script.FormatPartition("/system")
- script.Mount("/system", recovery_mount_options)
- if not has_recovery_patch:
- script.UnpackPackageDir("recovery", "/system")
- script.UnpackPackageDir("system", "/system")
-
- symlinks = CopyPartitionFiles(system_items, input_zip, output_zip)
- script.MakeSymlinks(symlinks)
+ # Full OTA is done as an "incremental" against an empty source image. This
+ # has the effect of writing new data from the package to the entire
+ # partition, but lets us reuse the updater code that writes incrementals to
+ # do it.
+ system_tgt = GetImage("system", OPTIONS.input_tmp)
+ system_tgt.ResetFileMap()
+ system_diff = common.BlockDifference("system", system_tgt, src=None)
+ system_diff.WriteScript(script, output_zip)
boot_img = common.GetBootableImage(
"boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
- if not block_based:
- def output_sink(fn, data):
- common.ZipWriteStr(output_zip, "recovery/" + fn, data)
- system_items.Get("system/" + fn)
-
- common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink,
- recovery_img, boot_img)
-
- system_items.GetMetadata(input_zip)
- system_items.Get("system").SetPermissions(script)
-
if HasVendorPartition(input_zip):
- vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
script.ShowProgress(0.1, 0)
- if block_based:
- vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict)
- vendor_tgt.ResetFileMap()
- vendor_diff = common.BlockDifference("vendor", vendor_tgt)
- vendor_diff.WriteScript(script, output_zip)
- else:
- script.FormatPartition("/vendor")
- script.Mount("/vendor", recovery_mount_options)
- script.UnpackPackageDir("vendor", "/vendor")
-
- symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip)
- script.MakeSymlinks(symlinks)
-
- vendor_items.GetMetadata(input_zip)
- vendor_items.Get("vendor").SetPermissions(script)
+ vendor_tgt = GetImage("vendor", OPTIONS.input_tmp)
+ vendor_tgt.ResetFileMap()
+ vendor_diff = common.BlockDifference("vendor", vendor_tgt)
+ vendor_diff.WriteScript(script, output_zip)
common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict)
common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
@@ -810,20 +497,6 @@
compress_type=zipfile.ZIP_STORED)
-def LoadPartitionFiles(z, partition):
- """Load all the files from the given partition in a given target-files
- ZipFile, and return a dict of {filename: File object}."""
- out = {}
- prefix = partition.upper() + "/"
- for info in z.infolist():
- if info.filename.startswith(prefix) and not IsSymlink(info):
- basefilename = info.filename[len(prefix):]
- fn = partition + "/" + basefilename
- data = z.read(info.filename)
- out[fn] = common.File(fn, data, info.compress_size)
- return out
-
-
def GetBuildProp(prop, info_dict):
"""Return the fingerprint of the build of a given target-files info_dict."""
try:
@@ -832,18 +505,6 @@
raise common.ExternalError("couldn't find %s in build.prop" % (prop,))
-def AddToKnownPaths(filename, known_paths):
- if filename[-1] == "/":
- return
- dirs = filename.split("/")[:-1]
- while len(dirs) > 0:
- path = "/".join(dirs)
- if path in known_paths:
- break
- known_paths.add(path)
- dirs.pop()
-
-
def HandleDowngradeMetadata(metadata):
# Only incremental OTAs are allowed to reach here.
assert OPTIONS.incremental_source is not None
@@ -872,8 +533,6 @@
def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
- # TODO(tbao): We should factor out the common parts between
- # WriteBlockIncrementalOTAPackage() and WriteIncrementalOTAPackage().
source_version = OPTIONS.source_info_dict["recovery_api_version"]
target_version = OPTIONS.target_info_dict["recovery_api_version"]
@@ -933,8 +592,8 @@
target_recovery = common.GetBootableImage(
"/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
- system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict)
- system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict)
+ system_src = GetImage("system", OPTIONS.source_tmp)
+ system_tgt = GetImage("system", OPTIONS.target_tmp)
blockimgdiff_version = 1
if OPTIONS.info_dict:
@@ -961,10 +620,8 @@
if HasVendorPartition(target_zip):
if not HasVendorPartition(source_zip):
raise RuntimeError("can't generate incremental that adds /vendor")
- vendor_src = GetImage("vendor", OPTIONS.source_tmp,
- OPTIONS.source_info_dict)
- vendor_tgt = GetImage("vendor", OPTIONS.target_tmp,
- OPTIONS.target_info_dict)
+ vendor_src = GetImage("vendor", OPTIONS.source_tmp)
+ vendor_tgt = GetImage("vendor", OPTIONS.target_tmp)
# Check first block of vendor partition for remount R/W only if
# disk type is ext4
@@ -1190,7 +847,7 @@
"recovery_mount_options")
oem_dicts = None
if oem_props:
- oem_dicts = _LoadOemDicts(script, oem_props, recovery_mount_options)
+ oem_dicts = _LoadOemDicts(script, recovery_mount_options)
target_fp = CalculateFingerprint(oem_props, oem_dicts and oem_dicts[0],
OPTIONS.info_dict)
@@ -1234,13 +891,13 @@
recovery_type, recovery_device, recovery_img.size, recovery_img.sha1))
script.AppendExtra("")
- system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict)
+ system_tgt = GetImage("system", OPTIONS.input_tmp)
system_tgt.ResetFileMap()
system_diff = common.BlockDifference("system", system_tgt, src=None)
system_diff.WriteStrictVerifyScript(script)
if HasVendorPartition(input_zip):
- vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict)
+ vendor_tgt = GetImage("vendor", OPTIONS.input_tmp)
vendor_tgt.ResetFileMap()
vendor_diff = common.BlockDifference("vendor", vendor_tgt, src=None)
vendor_diff.WriteStrictVerifyScript(script)
@@ -1335,7 +992,7 @@
oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties", None)
oem_dicts = None
if oem_props:
- oem_dicts = _LoadOemDicts(script, None)
+ oem_dicts = _LoadOemDicts(None)
metadata = {
"post-build": CalculateFingerprint(oem_props, oem_dicts and oem_dicts[0],
@@ -1521,556 +1178,6 @@
common.ZipClose(output_zip)
-class FileDifference(object):
- def __init__(self, partition, source_zip, target_zip, output_zip):
- self.deferred_patch_list = None
- print("Loading target...")
- self.target_data = target_data = LoadPartitionFiles(target_zip, partition)
- print("Loading source...")
- self.source_data = source_data = LoadPartitionFiles(source_zip, partition)
-
- self.verbatim_targets = verbatim_targets = []
- self.patch_list = patch_list = []
- diffs = []
- self.renames = renames = {}
- known_paths = set()
- largest_source_size = 0
-
- matching_file_cache = {}
- for fn, sf in source_data.items():
- assert fn == sf.name
- matching_file_cache["path:" + fn] = sf
- if fn in target_data.keys():
- AddToKnownPaths(fn, known_paths)
- # Only allow eligibility for filename/sha matching
- # if there isn't a perfect path match.
- if target_data.get(sf.name) is None:
- matching_file_cache["file:" + fn.split("/")[-1]] = sf
- matching_file_cache["sha:" + sf.sha1] = sf
-
- for fn in sorted(target_data.keys()):
- tf = target_data[fn]
- assert fn == tf.name
- sf = ClosestFileMatch(tf, matching_file_cache, renames)
- if sf is not None and sf.name != tf.name:
- print("File has moved from " + sf.name + " to " + tf.name)
- renames[sf.name] = tf
-
- if sf is None or fn in OPTIONS.require_verbatim:
- # This file should be included verbatim
- if fn in OPTIONS.prohibit_verbatim:
- raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
- print("send", fn, "verbatim")
- tf.AddToZip(output_zip)
- verbatim_targets.append((fn, tf.size, tf.sha1))
- if fn in target_data.keys():
- AddToKnownPaths(fn, known_paths)
- elif tf.sha1 != sf.sha1:
- # File is different; consider sending as a patch
- diffs.append(common.Difference(tf, sf))
- else:
- # Target file data identical to source (may still be renamed)
- pass
-
- common.ComputeDifferences(diffs)
-
- for diff in diffs:
- tf, sf, d = diff.GetPatch()
- path = "/".join(tf.name.split("/")[:-1])
- if d is None or len(d) > tf.compress_size * OPTIONS.patch_threshold or \
- path not in known_paths:
- # patch is almost as big as the file; don't bother patching
- # or a patch + rename cannot take place due to the target
- # directory not existing
- tf.AddToZip(output_zip)
- verbatim_targets.append((tf.name, tf.size, tf.sha1))
- if sf.name in renames:
- del renames[sf.name]
- AddToKnownPaths(tf.name, known_paths)
- else:
- common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d)
- patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest()))
- largest_source_size = max(largest_source_size, sf.size)
-
- self.largest_source_size = largest_source_size
-
- def EmitVerification(self, script):
- so_far = 0
- for tf, sf, _, _ in self.patch_list:
- if tf.name != sf.name:
- script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
- script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1)
- so_far += sf.size
- return so_far
-
- def EmitExplicitTargetVerification(self, script):
- for fn, _, sha1 in self.verbatim_targets:
- if fn[-1] != "/":
- script.FileCheck("/"+fn, sha1)
- for tf, _, _, _ in self.patch_list:
- script.FileCheck(tf.name, tf.sha1)
-
- def RemoveUnneededFiles(self, script, extras=()):
- file_list = ["/" + i[0] for i in self.verbatim_targets]
- file_list += ["/" + i for i in self.source_data
- if i not in self.target_data and i not in self.renames]
- file_list += list(extras)
- # Sort the list in descending order, which removes all the files first
- # before attempting to remove the folder. (Bug: 22960996)
- script.DeleteFiles(sorted(file_list, reverse=True))
-
- def TotalPatchSize(self):
- return sum(i[1].size for i in self.patch_list)
-
- def EmitPatches(self, script, total_patch_size, so_far):
- self.deferred_patch_list = deferred_patch_list = []
- for item in self.patch_list:
- tf, sf, _, _ = item
- if tf.name == "system/build.prop":
- deferred_patch_list.append(item)
- continue
- if sf.name != tf.name:
- script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
- script.ApplyPatch("/" + sf.name, "-", tf.size, tf.sha1, sf.sha1,
- "patch/" + sf.name + ".p")
- so_far += tf.size
- script.SetProgress(so_far / total_patch_size)
- return so_far
-
- def EmitDeferredPatches(self, script):
- for item in self.deferred_patch_list:
- tf, sf, _, _ = item
- script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1,
- "patch/" + sf.name + ".p")
- script.SetPermissions("/system/build.prop", 0, 0, 0o644, None, None)
-
- def EmitRenames(self, script):
- if len(self.renames) > 0:
- script.Print("Renaming files...")
- for src, tgt in self.renames.iteritems():
- print("Renaming " + src + " to " + tgt.name)
- script.RenameFile(src, tgt.name)
-
-
-def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
- target_has_recovery_patch = HasRecoveryPatch(target_zip)
- source_has_recovery_patch = HasRecoveryPatch(source_zip)
-
- if (OPTIONS.block_based and
- target_has_recovery_patch and
- source_has_recovery_patch):
- return WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip)
-
- source_version = OPTIONS.source_info_dict["recovery_api_version"]
- target_version = OPTIONS.target_info_dict["recovery_api_version"]
-
- if source_version == 0:
- print("WARNING: generating edify script for a source that "
- "can't install it.")
- script = edify_generator.EdifyGenerator(
- source_version, OPTIONS.target_info_dict,
- fstab=OPTIONS.source_info_dict["fstab"])
-
- recovery_mount_options = OPTIONS.source_info_dict.get(
- "recovery_mount_options")
- source_oem_props = OPTIONS.source_info_dict.get("oem_fingerprint_properties")
- target_oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties")
- oem_dicts = None
- if source_oem_props or target_oem_props:
- oem_dicts = _LoadOemDicts(script, recovery_mount_options)
-
- metadata = {
- "pre-device": GetOemProperty("ro.product.device", source_oem_props,
- oem_dicts and oem_dicts[0],
- OPTIONS.source_info_dict),
- "ota-type": "FILE",
- }
-
- HandleDowngradeMetadata(metadata)
-
- device_specific = common.DeviceSpecificParams(
- source_zip=source_zip,
- source_version=source_version,
- target_zip=target_zip,
- target_version=target_version,
- output_zip=output_zip,
- script=script,
- metadata=metadata,
- info_dict=OPTIONS.source_info_dict)
-
- system_diff = FileDifference("system", source_zip, target_zip, output_zip)
- script.Mount("/system", recovery_mount_options)
- if HasVendorPartition(target_zip):
- vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip)
- script.Mount("/vendor", recovery_mount_options)
- else:
- vendor_diff = None
-
- target_fp = CalculateFingerprint(target_oem_props, oem_dicts and oem_dicts[0],
- OPTIONS.target_info_dict)
- source_fp = CalculateFingerprint(source_oem_props, oem_dicts and oem_dicts[0],
- OPTIONS.source_info_dict)
-
- if source_oem_props is None and target_oem_props is None:
- script.AssertSomeFingerprint(source_fp, target_fp)
- elif source_oem_props is not None and target_oem_props is not None:
- script.AssertSomeThumbprint(
- GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict),
- GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
- elif source_oem_props is None and target_oem_props is not None:
- script.AssertFingerprintOrThumbprint(
- source_fp,
- GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict))
- else:
- script.AssertFingerprintOrThumbprint(
- target_fp,
- GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
-
- metadata["pre-build"] = source_fp
- metadata["post-build"] = target_fp
- metadata["pre-build-incremental"] = GetBuildProp(
- "ro.build.version.incremental", OPTIONS.source_info_dict)
- metadata["post-build-incremental"] = GetBuildProp(
- "ro.build.version.incremental", OPTIONS.target_info_dict)
-
- source_boot = common.GetBootableImage(
- "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT",
- OPTIONS.source_info_dict)
- target_boot = common.GetBootableImage(
- "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT")
- updating_boot = (not OPTIONS.two_step and
- (source_boot.data != target_boot.data))
-
- source_recovery = common.GetBootableImage(
- "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY",
- OPTIONS.source_info_dict)
- target_recovery = common.GetBootableImage(
- "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
- updating_recovery = (source_recovery.data != target_recovery.data)
-
- # Here's how we divide up the progress bar:
- # 0.1 for verifying the start state (PatchCheck calls)
- # 0.8 for applying patches (ApplyPatch calls)
- # 0.1 for unpacking verbatim files, symlinking, and doing the
- # device-specific commands.
-
- AppendAssertions(script, OPTIONS.target_info_dict, oem_dicts)
- device_specific.IncrementalOTA_Assertions()
-
- # Two-step incremental package strategy (in chronological order,
- # which is *not* the order in which the generated script has
- # things):
- #
- # if stage is not "2/3" or "3/3":
- # do verification on current system
- # write recovery image to boot partition
- # set stage to "2/3"
- # reboot to boot partition and restart recovery
- # else if stage is "2/3":
- # write recovery image to recovery partition
- # set stage to "3/3"
- # reboot to recovery partition and restart recovery
- # else:
- # (stage must be "3/3")
- # perform update:
- # patch system files, etc.
- # force full install of new boot image
- # set up system to update recovery partition on first boot
- # complete script normally
- # (allow recovery to mark itself finished and reboot)
-
- if OPTIONS.two_step:
- if not OPTIONS.source_info_dict.get("multistage_support", None):
- assert False, "two-step packages not supported by this build"
- fs = OPTIONS.source_info_dict["fstab"]["/misc"]
- assert fs.fs_type.upper() == "EMMC", \
- "two-step packages only supported on devices with EMMC /misc partitions"
- bcb_dev = {"bcb_dev": fs.device}
- common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data)
- script.AppendExtra("""
-if get_stage("%(bcb_dev)s") == "2/3" then
-""" % bcb_dev)
-
- # Stage 2/3: Write recovery image to /recovery (currently running /boot).
- script.Comment("Stage 2/3")
- script.AppendExtra("sleep(20);\n")
- script.WriteRawImage("/recovery", "recovery.img")
- script.AppendExtra("""
-set_stage("%(bcb_dev)s", "3/3");
-reboot_now("%(bcb_dev)s", "recovery");
-else if get_stage("%(bcb_dev)s") != "3/3" then
-""" % bcb_dev)
-
- # Stage 1/3: (a) Verify the current system.
- script.Comment("Stage 1/3")
-
- # Dump fingerprints
- script.Print("Source: %s" % (source_fp,))
- script.Print("Target: %s" % (target_fp,))
-
- script.Print("Verifying current system...")
-
- device_specific.IncrementalOTA_VerifyBegin()
-
- script.ShowProgress(0.1, 0)
- so_far = system_diff.EmitVerification(script)
- if vendor_diff:
- so_far += vendor_diff.EmitVerification(script)
-
- size = []
- if system_diff.patch_list:
- size.append(system_diff.largest_source_size)
- if vendor_diff:
- if vendor_diff.patch_list:
- size.append(vendor_diff.largest_source_size)
-
- if updating_boot:
- d = common.Difference(target_boot, source_boot)
- _, _, d = d.ComputePatch()
- print("boot target: %d source: %d diff: %d" % (
- target_boot.size, source_boot.size, len(d)))
-
- common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
-
- boot_type, boot_device = common.GetTypeAndDevice(
- "/boot", OPTIONS.source_info_dict)
-
- script.PatchCheck("%s:%s:%d:%s:%d:%s" %
- (boot_type, boot_device,
- source_boot.size, source_boot.sha1,
- target_boot.size, target_boot.sha1))
- so_far += source_boot.size
- size.append(target_boot.size)
-
- if size:
- script.CacheFreeSpaceCheck(max(size))
-
- device_specific.IncrementalOTA_VerifyEnd()
-
- if OPTIONS.two_step:
- # Stage 1/3: (b) Write recovery image to /boot.
- _WriteRecoveryImageToBoot(script, output_zip)
-
- script.AppendExtra("""
-set_stage("%(bcb_dev)s", "2/3");
-reboot_now("%(bcb_dev)s", "");
-else
-""" % bcb_dev)
-
- # Stage 3/3: Make changes.
- script.Comment("Stage 3/3")
-
- script.Comment("---- start making changes here ----")
-
- device_specific.IncrementalOTA_InstallBegin()
-
- if OPTIONS.two_step:
- common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
- script.WriteRawImage("/boot", "boot.img")
- print("writing full boot image (forced by two-step mode)")
-
- script.Print("Removing unneeded files...")
- system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",))
- if vendor_diff:
- vendor_diff.RemoveUnneededFiles(script)
-
- script.ShowProgress(0.8, 0)
- total_patch_size = 1.0 + system_diff.TotalPatchSize()
- if vendor_diff:
- total_patch_size += vendor_diff.TotalPatchSize()
- if updating_boot:
- total_patch_size += target_boot.size
-
- script.Print("Patching system files...")
- so_far = system_diff.EmitPatches(script, total_patch_size, 0)
- if vendor_diff:
- script.Print("Patching vendor files...")
- so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far)
-
- if not OPTIONS.two_step:
- if updating_boot:
- # Produce the boot image by applying a patch to the current
- # contents of the boot partition, and write it back to the
- # partition.
- script.Print("Patching boot image...")
- script.ApplyPatch("%s:%s:%d:%s:%d:%s"
- % (boot_type, boot_device,
- source_boot.size, source_boot.sha1,
- target_boot.size, target_boot.sha1),
- "-",
- target_boot.size, target_boot.sha1,
- source_boot.sha1, "patch/boot.img.p")
- so_far += target_boot.size
- script.SetProgress(so_far / total_patch_size)
- print("boot image changed; including.")
- else:
- print("boot image unchanged; skipping.")
-
- system_items = ItemSet("system", "META/filesystem_config.txt")
- if vendor_diff:
- vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
-
- if updating_recovery:
- # Recovery is generated as a patch using both the boot image
- # (which contains the same linux kernel as recovery) and the file
- # /system/etc/recovery-resource.dat (which contains all the images
- # used in the recovery UI) as sources. This lets us minimize the
- # size of the patch, which must be included in every OTA package.
- #
- # For older builds where recovery-resource.dat is not present, we
- # use only the boot image as the source.
-
- if not target_has_recovery_patch:
- def output_sink(fn, data):
- common.ZipWriteStr(output_zip, "recovery/" + fn, data)
- system_items.Get("system/" + fn)
-
- common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink,
- target_recovery, target_boot)
- script.DeleteFiles(["/system/recovery-from-boot.p",
- "/system/etc/recovery.img",
- "/system/etc/install-recovery.sh"])
- print("recovery image changed; including as patch from boot.")
- else:
- print("recovery image unchanged; skipping.")
-
- script.ShowProgress(0.1, 10)
-
- target_symlinks = CopyPartitionFiles(system_items, target_zip, None)
- if vendor_diff:
- target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None))
-
- temp_script = script.MakeTemporary()
- system_items.GetMetadata(target_zip)
- system_items.Get("system").SetPermissions(temp_script)
- if vendor_diff:
- vendor_items.GetMetadata(target_zip)
- vendor_items.Get("vendor").SetPermissions(temp_script)
-
- # Note that this call will mess up the trees of Items, so make sure
- # we're done with them.
- source_symlinks = CopyPartitionFiles(system_items, source_zip, None)
- if vendor_diff:
- source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None))
-
- target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
- source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks])
-
- # Delete all the symlinks in source that aren't in target. This
- # needs to happen before verbatim files are unpacked, in case a
- # symlink in the source is replaced by a real file in the target.
-
- # If a symlink in the source will be replaced by a regular file, we cannot
- # delete the symlink/file in case the package gets applied again. For such
- # a symlink, we prepend a sha1_check() to detect if it has been updated.
- # (Bug: 23646151)
- replaced_symlinks = dict()
- if system_diff:
- for i in system_diff.verbatim_targets:
- replaced_symlinks["/%s" % (i[0],)] = i[2]
- if vendor_diff:
- for i in vendor_diff.verbatim_targets:
- replaced_symlinks["/%s" % (i[0],)] = i[2]
-
- if system_diff:
- for tf in system_diff.renames.values():
- replaced_symlinks["/%s" % (tf.name,)] = tf.sha1
- if vendor_diff:
- for tf in vendor_diff.renames.values():
- replaced_symlinks["/%s" % (tf.name,)] = tf.sha1
-
- always_delete = []
- may_delete = []
- for dest, link in source_symlinks:
- if link not in target_symlinks_d:
- if link in replaced_symlinks:
- may_delete.append((link, replaced_symlinks[link]))
- else:
- always_delete.append(link)
- script.DeleteFiles(always_delete)
- script.DeleteFilesIfNotMatching(may_delete)
-
- if system_diff.verbatim_targets:
- script.Print("Unpacking new system files...")
- script.UnpackPackageDir("system", "/system")
- if vendor_diff and vendor_diff.verbatim_targets:
- script.Print("Unpacking new vendor files...")
- script.UnpackPackageDir("vendor", "/vendor")
-
- if updating_recovery and not target_has_recovery_patch:
- script.Print("Unpacking new recovery...")
- script.UnpackPackageDir("recovery", "/system")
-
- system_diff.EmitRenames(script)
- if vendor_diff:
- vendor_diff.EmitRenames(script)
-
- script.Print("Symlinks and permissions...")
-
- # Create all the symlinks that don't already exist, or point to
- # somewhere different than what we want. Delete each symlink before
- # creating it, since the 'symlink' command won't overwrite.
- to_create = []
- for dest, link in target_symlinks:
- if link in source_symlinks_d:
- if dest != source_symlinks_d[link]:
- to_create.append((dest, link))
- else:
- to_create.append((dest, link))
- script.DeleteFiles([i[1] for i in to_create])
- script.MakeSymlinks(to_create)
-
- # Now that the symlinks are created, we can set all the
- # permissions.
- script.AppendScript(temp_script)
-
- # Do device-specific installation (eg, write radio image).
- device_specific.IncrementalOTA_InstallEnd()
-
- if OPTIONS.extra_script is not None:
- script.AppendExtra(OPTIONS.extra_script)
-
- # Patch the build.prop file last, so if something fails but the
- # device can still come up, it appears to be the old build and will
- # get set the OTA package again to retry.
- script.Print("Patching remaining system files...")
- system_diff.EmitDeferredPatches(script)
-
- if OPTIONS.wipe_user_data:
- script.Print("Erasing user data...")
- script.FormatPartition("/data")
- metadata["ota-wipe"] = "yes"
-
- if OPTIONS.two_step:
- script.AppendExtra("""
-set_stage("%(bcb_dev)s", "");
-endif;
-endif;
-""" % bcb_dev)
-
- if OPTIONS.verify and system_diff:
- script.Print("Remounting and verifying system partition files...")
- script.Unmount("/system")
- script.Mount("/system", recovery_mount_options)
- system_diff.EmitExplicitTargetVerification(script)
-
- if OPTIONS.verify and vendor_diff:
- script.Print("Remounting and verifying vendor partition files...")
- script.Unmount("/vendor")
- script.Mount("/vendor", recovery_mount_options)
- vendor_diff.EmitExplicitTargetVerification(script)
-
- # For downgrade OTAs, we prefer to use the update-binary in the source
- # build that is actually newer than the one in the target build.
- if OPTIONS.downgrade:
- script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary)
- else:
- script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
-
- metadata["ota-required-cache"] = str(script.required_cache)
- WriteMetadata(metadata, output_zip)
-
-
def main(argv):
def option_handler(o, a):
@@ -2129,6 +1236,8 @@
OPTIONS.payload_signer = a
elif o == "--payload_signer_args":
OPTIONS.payload_signer_args = shlex.split(a)
+ elif o == "--extracted_input_target_files":
+ OPTIONS.extracted_input = a
else:
return False
return True
@@ -2159,6 +1268,7 @@
"log_diff=",
"payload_signer=",
"payload_signer_args=",
+ "extracted_input_target_files=",
], extra_option_handler=option_handler)
if len(args) != 2:
@@ -2181,9 +1291,12 @@
# Load the dict file from the zip directly to have a peek at the OTA type.
# For packages using A/B update, unzipping is not needed.
- input_zip = zipfile.ZipFile(args[0], "r")
- OPTIONS.info_dict = common.LoadInfoDict(input_zip)
- common.ZipClose(input_zip)
+ if OPTIONS.extracted_input is not None:
+ OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.extracted_input, OPTIONS.extracted_input)
+ else:
+ input_zip = zipfile.ZipFile(args[0], "r")
+ OPTIONS.info_dict = common.LoadInfoDict(input_zip)
+ common.ZipClose(input_zip)
ab_update = OPTIONS.info_dict.get("ab_update") == "true"
@@ -2213,11 +1326,18 @@
if OPTIONS.extra_script is not None:
OPTIONS.extra_script = open(OPTIONS.extra_script).read()
- print("unzipping target target-files...")
- OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0])
+ if OPTIONS.extracted_input is not None:
+ OPTIONS.input_tmp = OPTIONS.extracted_input
+ OPTIONS.target_tmp = OPTIONS.input_tmp
+ OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, OPTIONS.input_tmp)
+ input_zip = zipfile.ZipFile(args[0], "r")
+ else:
+ print("unzipping target target-files...")
+ OPTIONS.input_tmp, input_zip = common.UnzipTemp(
+ args[0], UNZIP_PATTERN)
- OPTIONS.target_tmp = OPTIONS.input_tmp
- OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.target_tmp)
+ OPTIONS.target_tmp = OPTIONS.input_tmp
+ OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.target_tmp)
if OPTIONS.verbose:
print("--- target info ---")
@@ -2282,7 +1402,8 @@
else:
print("unzipping source target-files...")
OPTIONS.source_tmp, source_zip = common.UnzipTemp(
- OPTIONS.incremental_source)
+ OPTIONS.incremental_source,
+ UNZIP_PATTERN)
OPTIONS.target_info_dict = OPTIONS.info_dict
OPTIONS.source_info_dict = common.LoadInfoDict(source_zip,
OPTIONS.source_tmp)
@@ -2290,7 +1411,7 @@
print("--- source info ---")
common.DumpInfoDict(OPTIONS.source_info_dict)
try:
- WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
+ WriteBlockIncrementalOTAPackage(input_zip, source_zip, output_zip)
if OPTIONS.log_diff:
out_file = open(OPTIONS.log_diff, 'w')
import target_files_diff
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 4ba7560..7eb60d9 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -144,6 +144,12 @@
f.seek(16, os.SEEK_SET)
f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
+ def RangeSha1(self, ranges):
+ h = sha1()
+ for data in self._GetRangeData(ranges):
+ h.update(data)
+ return h.hexdigest()
+
def ReadRangeSet(self, ranges):
return [d for d in self._GetRangeData(ranges)]
@@ -155,10 +161,11 @@
ranges = self.care_map
if not include_clobbered_blocks:
ranges = ranges.subtract(self.clobbered_blocks)
- h = sha1()
- for d in self._GetRangeData(ranges):
- h.update(d)
- return h.hexdigest()
+ return self.RangeSha1(ranges)
+
+ def WriteRangeDataToFd(self, ranges, fd):
+ for data in self._GetRangeData(ranges):
+ fd.write(data)
def _GetRangeData(self, ranges):
"""Generator that produces all the image data in 'ranges'. The
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index cc1fa23..e5a3694 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -41,14 +41,14 @@
block_image_diff = BlockImageDiff(tgt, src)
transfers = block_image_diff.transfers
- t0 = Transfer(
- "t1", "t1", RangeSet("10-15"), RangeSet("0-5"), "move", transfers)
- t1 = Transfer(
- "t2", "t2", RangeSet("20-25"), RangeSet("0-7"), "move", transfers)
- t2 = Transfer(
- "t3", "t3", RangeSet("30-35"), RangeSet("0-4"), "move", transfers)
- t3 = Transfer(
- "t4", "t4", RangeSet("0-10"), RangeSet("40-50"), "move", transfers)
+ t0 = Transfer("t1", "t1", RangeSet("10-15"), RangeSet("0-5"), "t1hash",
+ "t1hash", "move", transfers)
+ t1 = Transfer("t2", "t2", RangeSet("20-25"), RangeSet("0-7"), "t2hash",
+ "t2hash", "move", transfers)
+ t2 = Transfer("t3", "t3", RangeSet("30-35"), RangeSet("0-4"), "t3hash",
+ "t3hash", "move", transfers)
+ t3 = Transfer("t4", "t4", RangeSet("0-10"), RangeSet("40-50"), "t4hash",
+ "t4hash", "move", transfers)
block_image_diff.GenerateDigraph()
t3_goes_after_copy = t3.goes_after.copy()
@@ -87,10 +87,10 @@
block_image_diff = BlockImageDiff(tgt, src, version=3)
transfers = block_image_diff.transfers
- Transfer("t1", "t1", RangeSet("11-15"), RangeSet("20-29"), "diff",
- transfers)
- Transfer("t2", "t2", RangeSet("20-29"), RangeSet("11-15"), "diff",
- transfers)
+ Transfer("t1", "t1", RangeSet("11-15"), RangeSet("20-29"), "t1hash",
+ "t1hash", "diff", transfers)
+ Transfer("t2", "t2", RangeSet("20-29"), RangeSet("11-15"), "t2hash",
+ "t2hash", "diff", transfers)
block_image_diff.GenerateDigraph()
block_image_diff.FindVertexSequence()
@@ -121,12 +121,12 @@
block_image_diff = BlockImageDiff(tgt, src, version=3)
transfers = block_image_diff.transfers
- t1 = Transfer("t1", "t1", RangeSet("11-15"), RangeSet("1-5"), "diff",
- transfers)
- t2 = Transfer("t2", "t2", RangeSet("21-25"), RangeSet("11-15"), "diff",
- transfers)
+ t1 = Transfer("t1", "t1", RangeSet("11-15"), RangeSet("1-5"), "t1hash",
+ "t1hash", "diff", transfers)
+ t2 = Transfer("t2", "t2", RangeSet("21-25"), RangeSet("11-15"), "t2hash",
+ "t2hash", "diff", transfers)
t3 = Transfer("t3", "t3", RangeSet("1-5 30-39"), RangeSet("11-15 30-39"),
- "diff", transfers)
+ "t3hash", "t3hash", "diff", transfers)
block_image_diff.GenerateDigraph()