Merge "Ensure runtime-visible annotations are kept"
diff --git a/Changes.md b/Changes.md
index cabbed6..a03a48c 100644
--- a/Changes.md
+++ b/Changes.md
@@ -1,5 +1,62 @@
# Build System Changes for Android.mk Writers
+## Gensrcs starts disallowing depfile property
+
+To migrate all gensrcs to Bazel, we are restricting the use of depfile property
+because Bazel requires specifying the dependencies directly.
+
+To fix existing uses, remove depfile and directly specify all the dependencies
+in .bp files. For example:
+
+```
+gensrcs {
+ name: "framework-cppstream-protos",
+ tools: [
+ "aprotoc",
+ "protoc-gen-cppstream",
+ ],
+ cmd: "mkdir -p $(genDir)/$(in) " +
+ "&& $(location aprotoc) " +
+ " --plugin=$(location protoc-gen-cppstream) " +
+ " -I . " +
+ " $(in) ",
+ srcs: [
+ "bar.proto",
+ ],
+ output_extension: "srcjar",
+}
+```
+where `bar.proto` imports `external.proto` would become
+
+```
+gensrcs {
+ name: "framework-cppstream-protos",
+ tools: [
+ "aprotoc",
+ "protoc-gen-cpptream",
+ ],
+ tool_files: [
+ "external.proto",
+ ],
+ cmd: "mkdir -p $(genDir)/$(in) " +
+ "&& $(location aprotoc) " +
+ " --plugin=$(location protoc-gen-cppstream) " +
+ " $(in) ",
+ srcs: [
+ "bar.proto",
+ ],
+ output_extension: "srcjar",
+}
+```
+as in https://android-review.googlesource.com/c/platform/frameworks/base/+/2125692/.
+
+`BUILD_BROKEN_DEPFILE` can be used to allowlist usage of depfile in `gensrcs`.
+
+If `depfile` is needed for generating javastream proto, `java_library` with `proto.type`
+set `stream` is the alternative solution. Sees
+https://android-review.googlesource.com/c/platform/packages/modules/Permission/+/2118004/
+for an example.
+
## Genrule starts disallowing directory inputs
To better specify the inputs to the build, we are restricting use of directories
diff --git a/core/Makefile b/core/Makefile
index a96cfd5..5727db2 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -967,15 +967,11 @@
INTERNAL_BOOTIMAGE_ARGS := \
$(addprefix --second ,$(INSTALLED_2NDBOOTLOADER_TARGET))
-INTERNAL_INIT_BOOT_IMAGE_ARGS :=
-
# TODO(b/229701033): clean up BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK.
ifneq ($(BOARD_BUILD_GKI_BOOT_IMAGE_WITHOUT_RAMDISK),true)
ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
ifneq ($(BUILDING_INIT_BOOT_IMAGE),true)
INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
- else
- INTERNAL_INIT_BOOT_IMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
endif
endif
endif
@@ -1222,6 +1218,8 @@
INSTALLED_INIT_BOOT_IMAGE_TARGET := $(PRODUCT_OUT)/init_boot.img
$(INSTALLED_INIT_BOOT_IMAGE_TARGET): $(MKBOOTIMG) $(INSTALLED_RAMDISK_TARGET)
+INTERNAL_INIT_BOOT_IMAGE_ARGS := --ramdisk $(INSTALLED_RAMDISK_TARGET)
+
ifdef BOARD_KERNEL_PAGESIZE
INTERNAL_INIT_BOOT_IMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE)
endif
@@ -2979,20 +2977,29 @@
$(FSVERITY_APK_OUT): PRIVATE_FRAMEWORK_RES := $(call intermediates-dir-for,APPS,framework-res,,COMMON)/package-export.apk
$(FSVERITY_APK_OUT): PRIVATE_KEY := $(FSVERITY_APK_KEY_PATH)
$(FSVERITY_APK_OUT): PRIVATE_INPUTS := $(fsverity-metadata-targets)
+$(FSVERITY_APK_OUT): PRIVATE_ASSETS := $(call intermediates-dir-for,ETC,build_manifest)/assets
$(FSVERITY_APK_OUT): $(HOST_OUT_EXECUTABLES)/fsverity_manifest_generator \
$(HOST_OUT_EXECUTABLES)/fsverity $(HOST_OUT_EXECUTABLES)/aapt2 \
$(HOST_OUT_EXECUTABLES)/apksigner $(FSVERITY_APK_MANIFEST_PATH) \
$(FSVERITY_APK_KEY_PATH).x509.pem $(FSVERITY_APK_KEY_PATH).pk8 \
$(call intermediates-dir-for,APPS,framework-res,,COMMON)/package-export.apk \
$(fsverity-metadata-targets)
- $< --fsverity-path $(PRIVATE_FSVERITY) --aapt2-path $(PRIVATE_AAPT2) \
+ rm -rf $(PRIVATE_ASSETS)
+ mkdir -p $(PRIVATE_ASSETS)
+ $< --fsverity-path $(PRIVATE_FSVERITY) \
+ --base-dir $(PRODUCT_OUT) \
+ --output $(PRIVATE_ASSETS)/build_manifest.pb \
+ $(PRIVATE_INPUTS)
+ $(PRIVATE_AAPT2) link -o $@ \
+ -A $(PRIVATE_ASSETS) \
+ -I $(PRIVATE_FRAMEWORK_RES) \
--min-sdk-version $(PRIVATE_MIN_SDK_VERSION) \
--version-code $(PRIVATE_VERSION_CODE) \
--version-name $(PRIVATE_VERSION_NAME) \
- --apksigner-path $(PRIVATE_APKSIGNER) --apk-key-path $(PRIVATE_KEY) \
- --apk-manifest-path $(PRIVATE_MANIFEST) --framework-res $(PRIVATE_FRAMEWORK_RES) \
- --output $@ \
- --base-dir $(PRODUCT_OUT) $(PRIVATE_INPUTS)
+ --manifest $(PRIVATE_MANIFEST)
+ $(PRIVATE_APKSIGNER) sign --in $@ \
+ --cert $(PRIVATE_KEY).x509.pem \
+ --key $(PRIVATE_KEY).pk8
ALL_DEFAULT_INSTALLED_MODULES += $(FSVERITY_APK_OUT)
@@ -6524,22 +6531,22 @@
endif
endif
-# If BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT is set, super.img is built from images in the
-# $(PRODUCT_OUT) directory, and is built to $(PRODUCT_OUT)/super.img. Also, it will
-# be built for non-dist builds. This is useful for devices that uses super.img directly, e.g.
-# virtual devices.
-ifeq (true,$(BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT))
$(INSTALLED_SUPERIMAGE_TARGET): $(INSTALLED_SUPERIMAGE_DEPENDENCIES)
$(call pretty,"Target super fs image for debug: $@")
$(call build-superimage-target,$(INSTALLED_SUPERIMAGE_TARGET),\
$(call intermediates-dir-for,PACKAGING,superimage_debug)/misc_info.txt)
-droidcore-unbundled: $(INSTALLED_SUPERIMAGE_TARGET)
-
# For devices that uses super image directly, the superimage target points to the file in $(PRODUCT_OUT).
.PHONY: superimage
superimage: $(INSTALLED_SUPERIMAGE_TARGET)
+# If BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT is set, super.img is built from images in the
+# $(PRODUCT_OUT) directory, and is built to $(PRODUCT_OUT)/super.img. Also, it will
+# be built for non-dist builds. This is useful for devices that uses super.img directly, e.g.
+# virtual devices.
+ifeq (true,$(BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT))
+droidcore-unbundled: $(INSTALLED_SUPERIMAGE_TARGET)
+
$(call dist-for-goals,dist_files,$(INSTALLED_MISC_INFO_TARGET):super_misc_info.txt)
endif # BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT
@@ -6906,6 +6913,7 @@
# depended on by each module in soong_cc_prebuilt.mk, where the module will have
# a dependency on each shared library that it needs to be "reinstalled".
FUZZ_SHARED_DEPS := $(call copy-many-files,$(strip $(FUZZ_TARGET_SHARED_DEPS_INSTALL_PAIRS)))
+AFL_FUZZ_SHARED_DEPS := $(call copy-many-files,$(strip $(AFL_FUZZ_TARGET_SHARED_DEPS_INSTALL_PAIRS)))
# -----------------------------------------------------------------
# The rule to build all fuzz targets for C++ and Rust, and package them.
@@ -6921,6 +6929,10 @@
haiku: $(SOONG_FUZZ_PACKAGING_ARCH_MODULES) $(ALL_FUZZ_TARGETS)
$(call dist-for-goals,haiku,$(SOONG_FUZZ_PACKAGING_ARCH_MODULES))
+.PHONY: haiku-afl
+haiku-afl: $(SOONG_AFL_FUZZ_PACKAGING_ARCH_MODULES) $(ALL_AFL_FUZZ_TARGETS)
+$(call dist-for-goals,haiku-afl,$(SOONG_AFL_FUZZ_PACKAGING_ARCH_MODULES))
+
.PHONY: haiku-java
haiku-java: $(SOONG_JAVA_FUZZ_PACKAGING_ARCH_MODULES) $(ALL_JAVA_FUZZ_TARGETS)
$(call dist-for-goals,haiku-java,$(SOONG_JAVA_FUZZ_PACKAGING_ARCH_MODULES))
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index 6e52195..d007e80 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -26,6 +26,7 @@
# Add variables to the namespace below:
+$(call add_soong_config_var,ANDROID,TARGET_DYNAMIC_64_32_MEDIASERVER)
$(call add_soong_config_var,ANDROID,TARGET_ENABLE_MEDIADRM_64)
$(call add_soong_config_var,ANDROID,IS_TARGET_MIXED_SEPOLICY)
ifeq ($(IS_TARGET_MIXED_SEPOLICY),true)
@@ -75,10 +76,12 @@
# are controlled by the MODULE_BUILD_FROM_SOURCE environment variable by
# default.
INDIVIDUALLY_TOGGLEABLE_PREBUILT_MODULES := \
+ permission \
wifi \
$(foreach m, $(INDIVIDUALLY_TOGGLEABLE_PREBUILT_MODULES),\
- $(call soong_config_set,$(m)_module,source_build,$(MODULE_BUILD_FROM_SOURCE)))
+ $(if $(call soong_config_get,$(m)_module,source_build),,\
+ $(call soong_config_set,$(m)_module,source_build,$(MODULE_BUILD_FROM_SOURCE))))
# Apex build mode variables
ifdef APEX_BUILD_FOR_PRE_S_DEVICES
diff --git a/core/board_config.mk b/core/board_config.mk
index 8074225..d280349 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -174,6 +174,7 @@
_build_broken_var_list := \
+ BUILD_BROKEN_DEPFILE \
BUILD_BROKEN_DUP_RULES \
BUILD_BROKEN_DUP_SYSPROP \
BUILD_BROKEN_ELF_PREBUILT_PRODUCT_COPY_FILES \
diff --git a/core/distdir.mk b/core/distdir.mk
index 8f48cf8..bce8e7f 100644
--- a/core/distdir.mk
+++ b/core/distdir.mk
@@ -49,21 +49,18 @@
define __share-projects-rule
$(1) : PRIVATE_TARGETS := $(2)
-$(1) : PRIVATE_ARGUMENT_FILE := $(call intermediates-dir-for,METAPACKAGING,codesharing)/$(1)/arguments
$(1): $(2) $(COMPLIANCE_LISTSHARE)
$(hide) rm -f $$@
mkdir -p $$(dir $$@)
- mkdir -p $$(dir $$(PRIVATE_ARGUMENT_FILE))
- $$(if $$(strip $$(PRIVATE_TARGETS)),$$(call dump-words-to-file,$$(PRIVATE_TARGETS),$$(PRIVATE_ARGUMENT_FILE)))
- $$(if $$(strip $$(PRIVATE_TARGETS)),OUT_DIR=$(OUT_DIR) $(COMPLIANCE_LISTSHARE) -o $$@ @$$(PRIVATE_ARGUMENT_FILE),touch $$@)
+ $$(if $$(strip $$(PRIVATE_TARGETS)),OUT_DIR=$(OUT_DIR) $(COMPLIANCE_LISTSHARE) -o $$@ $$(PRIVATE_TARGETS),touch $$@)
endef
-# build list of projects to share in $(1) for dist targets in $(2)
+# build list of projects to share in $(1) for meta_lic in $(2)
#
# $(1): the intermediate project sharing file
-# $(2): the dist files to base the sharing on
+# $(2): the license metadata to base the sharing on
define _share-projects-rule
-$(eval $(call __share-projects-rule,$(1),$(call corresponding-license-metadata,$(2))))
+$(eval $(call __share-projects-rule,$(1),$(2)))
endef
.PHONY: alllicensetexts
@@ -86,32 +83,99 @@
$$(if $$(strip $$(PRIVATE_TARGETS)),OUT_DIR=$(OUT_DIR) $(TEXTNOTICE) -o $$@ @$$(PRIVATE_ARGUMENT_FILE),touch $$@)
endef
-# build list of projects to share in $(2) for dist targets in $(3) for dist goal $(1)
+# build list of projects to share in $(2) for meta_lic in $(3) for dist goals $(1)
+# Strip `out/dist/` used as proxy for 'DIST_DIR'
#
-# $(1): the name of the dist goal
+# $(1): the name of the dist goals
# $(2): the intermediate project sharing file
-# $(3): the dist files to base the sharing on
+# $(3): the license metadata to base the sharing on
define _license-texts-rule
-$(eval $(call __license-texts-rule,$(1),$(2),$(call corresponding-license-metadata,$(3)),$(sort $(dir $(3)))))
+$(eval $(call __license-texts-rule,$(1),$(2),$(3),out/dist/))
endef
+###########################################################
+## License metadata build rule for dist target $(1) with meta_lic $(2) copied from $(3)
+###########################################################
+define _dist-target-license-metadata-rule
+$(strip $(eval _meta :=$(2)))
+$(strip $(eval _dep:=))
+# 0p is the indicator for a non-copyrightable file where no party owns the copyright.
+# i.e. pure data with no copyrightable expression.
+# If all of the sources are 0p and only 0p, treat the copied file as 0p. Otherwise, all
+# of the sources must either be 0p or originate from a single metadata file to copy.
+$(strip $(foreach s,$(strip $(3)),\
+ $(eval _dmeta:=$(ALL_TARGETS.$(s).META_LIC))\
+ $(if $(strip $(_dmeta)),\
+ $(if $(filter-out 0p,$(_dep)),\
+ $(if $(filter-out $(_dep) 0p,$(_dmeta)),\
+ $(error cannot copy target from multiple modules: $(1) from $(_dep) and $(_dmeta)),\
+ $(if $(filter 0p,$(_dep)),$(eval _dep:=$(_dmeta)))),\
+ $(eval _dep:=$(_dmeta))\
+ ),\
+ $(eval TARGETS_MISSING_LICENSE_METADATA += $(s) $(1)))))
+
+
+ifeq (0p,$(strip $(_dep)))
+# Not copyrightable. No emcumbrances, no license text, no license kind etc.
+$(_meta): PRIVATE_CONDITIONS := unencumbered
+$(_meta): PRIVATE_SOURCES := $(3)
+$(_meta): PRIVATE_INSTALLED := $(1)
+# use `$(1)` which is the unique and relatively short `out/dist/$(target)`
+$(_meta): PRIVATE_ARGUMENT_FILE := $(call intermediates-dir-for,METAPACKAGING,notice)/$(1)/arguments
+$(_meta): $(BUILD_LICENSE_METADATA)
+$(_meta) :
+ rm -f $$@
+ mkdir -p $$(dir $$@)
+ mkdir -p $$(dir $$(PRIVATE_ARGUMENT_FILE))
+ $$(call dump-words-to-file,\
+ $$(addprefix -c ,$$(PRIVATE_CONDITIONS))\
+ $$(addprefix -s ,$$(PRIVATE_SOURCES))\
+ $$(addprefix -t ,$$(PRIVATE_TARGETS))\
+ $$(addprefix -i ,$$(PRIVATE_INSTALLED)),\
+ $$(PRIVATE_ARGUMENT_FILE))
+ OUT_DIR=$(OUT_DIR) $(BUILD_LICENSE_METADATA) \
+ @$$(PRIVATE_ARGUMENT_FILE) \
+ -o $$@
+
+else ifneq (,$(strip $(_dep)))
+# Not a missing target, copy metadata and `is_container` etc. from license metadata file `$(_dep)`
+$(_meta): PRIVATE_DEST_TARGET := $(1)
+$(_meta): PRIVATE_SOURCE_TARGETS := $(3)
+$(_meta): PRIVATE_SOURCE_METADATA := $(_dep)
+# use `$(1)` which is the unique and relatively short `out/dist/$(target)`
+$(_meta): PRIVATE_ARGUMENT_FILE := $(call intermediates-dir-for,METAPACKAGING,copynotice)/$(1)/arguments
+$(_meta) : $(_dep) $(COPY_LICENSE_METADATA)
+ rm -f $$@
+ mkdir -p $$(dir $$@)
+ mkdir -p $$(dir $$(PRIVATE_ARGUMENT_FILE))
+ $$(call dump-words-to-file,\
+ $$(addprefix -i ,$$(PRIVATE_DEST_TARGET))\
+ $$(addprefix -s ,$$(PRIVATE_SOURCE_TARGETS))\
+ $$(addprefix -d ,$$(PRIVATE_SOURCE_METADATA)),\
+ $$(PRIVATE_ARGUMENT_FILE))
+ OUT_DIR=$(OUT_DIR) $(COPY_LICENSE_METADATA) \
+ @$$(PRIVATE_ARGUMENT_FILE) \
+ -o $$@
+
+endif
+endef
+
+# use `out/dist/` as a proxy for 'DIST_DIR'
define _add_projects_to_share
+$(strip $(eval _mdir := $(call intermediates-dir-for,METAPACKAGING,meta)/out/dist)) \
$(strip $(eval _idir := $(call intermediates-dir-for,METAPACKAGING,shareprojects))) \
$(strip $(eval _tdir := $(call intermediates-dir-for,METAPACKAGING,licensetexts))) \
-$(strip $(eval _goals := $(sort $(_all_dist_goals)))) \
-$(strip $(eval _opairs := $(sort $(_all_dist_goal_output_pairs)))) \
-$(strip $(eval _dpairs := $(sort $(_all_dist_src_dst_pairs)))) \
-$(strip $(eval _allt :=)) \
-$(foreach goal,$(_goals), \
- $(eval _f := $(_idir)/$(goal).shareprojects) \
- $(eval _n := $(_tdir)/$(goal).txt) \
- $(call dist-for-goals,$(goal),$(_f):shareprojects/$(basename $(notdir $(_f)))) \
- $(call dist-for-goals,$(goal),$(_n):licensetexts/$(basename $(notdir $(_n)))) \
- $(eval _targets :=) \
- $(foreach op,$(filter $(goal):%,$(_opairs)),$(foreach p,$(filter %:$(call word-colon,2,$(op)),$(_dpairs)),$(eval _targets += $(call word-colon,1,$(p))))) \
- $(eval _allt += $(_targets)) \
- $(eval $(call _share-projects-rule,$(_f),$(_targets))) \
- $(eval $(call _license-texts-rule,$(goal),$(_n),$(_targets))) \
+$(strip $(eval _allt := $(sort $(foreach goal,$(_all_dist_goal_output_pairs),$(call word-colon,2,$(goal)))))) \
+$(foreach target,$(_allt), \
+ $(eval _goals := $(sort $(foreach dg,$(filter %:$(target),$(_all_dist_goal_output_pairs)),$(call word-colon,1,$(dg))))) \
+ $(eval _srcs := $(sort $(foreach sdp,$(filter %:$(target),$(_all_dist_src_dst_pairs)),$(call word-colon,1,$(sdp))))) \
+ $(eval $(call _dist-target-license-metadata-rule,out/dist/$(target),$(_mdir)/out/dist/$(target).meta_lic,$(_srcs))) \
+ $(eval _f := $(_idir)/$(target).shareprojects) \
+ $(eval _n := $(_tdir)/$(target).txt) \
+ $(eval $(call dist-for-goals,$(_goals),$(_f):shareprojects/$(target).shareprojects)) \
+ $(eval $(call dist-for-goals,$(_goals),$(_n):licensetexts/$(target).txt)) \
+ $(eval $(call _share-projects-rule,$(_f),$(foreach t, $(filter-out $(TARGETS_MISSING_LICENSE_METADATA),out/dist/$(target)),$(_mdir)/$(t).meta_lic))) \
+ $(eval $(call _license-texts-rule,$(_goals),$(_n),$(foreach t,$(filter-out $(TARGETS_MISSING_LICENSE_METADATA),out/dist/$(target)),$(_mdir)/$(t).meta_lic))) \
)
endef
diff --git a/core/node_fns.mk b/core/node_fns.mk
index 2243cd7..144eb8b 100644
--- a/core/node_fns.mk
+++ b/core/node_fns.mk
@@ -83,27 +83,17 @@
# If needle appears multiple times, only the first occurrance
# will survive.
#
-# How it works:
-#
-# - Stick everything in haystack into a single word,
-# with "|||" separating the words.
-# - Replace occurrances of "|||$(needle)|||" with "||| |||",
-# breaking haystack back into multiple words, with spaces
-# where needle appeared.
-# - Add needle between the first and second words of haystack.
-# - Replace "|||" with spaces, breaking haystack back into
-# individual words.
-#
define uniq-word
$(strip \
$(if $(filter-out 0 1,$(words $(filter $(2),$(1)))), \
- $(eval h := |||$(subst $(space),|||,$(strip $(1)))|||) \
- $(eval h := $(subst |||$(strip $(2))|||,|||$(space)|||,$(h))) \
- $(eval h := $(word 1,$(h)) $(2) $(wordlist 2,9999,$(h))) \
- $(subst |||,$(space),$(h)) \
- , \
- $(1) \
- ))
+ $(eval _uniq_word_seen :=) \
+ $(foreach w,$(1), \
+ $(if $(filter $(2),$(w)), \
+ $(if $(_uniq_word_seen),, \
+ $(w) \
+ $(eval _uniq_word_seen := true)), \
+ $(w))), \
+ $(1)))
endef
INHERIT_TAG := @inherit:
diff --git a/core/product.mk b/core/product.mk
index 53fee1c..7351313 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -404,7 +404,7 @@
$(eval current_mk := $(strip $(word 1,$(_include_stack)))) \
$(eval inherit_var := PRODUCTS.$(current_mk).INHERITS_FROM) \
$(eval $(inherit_var) := $(sort $($(inherit_var)) $(np))) \
- $(call dump-inherit,$(strip $(word 1,$(_include_stack))),$(1)) \
+ $(call dump-inherit,$(current_mk),$(1)) \
$(call dump-config-vals,$(current_mk),inherit))
endef
diff --git a/core/proguard_basic_keeps.flags b/core/proguard_basic_keeps.flags
index 84d73e1..b5d14fa 100644
--- a/core/proguard_basic_keeps.flags
+++ b/core/proguard_basic_keeps.flags
@@ -79,5 +79,6 @@
-dontnote
# The lite proto runtime uses reflection to access fields based on the names in
-# the schema, keep all the fields.
--keepclassmembers class * extends com.google.protobuf.MessageLite { <fields>; }
+# the schema, keep all the fields. Wildcard is used to apply the rule to classes
+# that have been renamed with jarjar.
+-keepclassmembers class * extends **.protobuf.MessageLite { <fields>; }
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 9b5cd1e..c4a40af 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -274,6 +274,7 @@
$(call add_json_str, ShippingApiLevel, $(PRODUCT_SHIPPING_API_LEVEL))
+$(call add_json_bool, BuildBrokenDepfile, $(filter true,$(BUILD_BROKEN_DEPFILE)))
$(call add_json_bool, BuildBrokenEnforceSyspropOwner, $(filter true,$(BUILD_BROKEN_ENFORCE_SYSPROP_OWNER)))
$(call add_json_bool, BuildBrokenTrebleSyspropNeverallow, $(filter true,$(BUILD_BROKEN_TREBLE_SYSPROP_NEVERALLOW)))
$(call add_json_bool, BuildBrokenVendorPropertyNamespace, $(filter true,$(BUILD_BROKEN_VENDOR_PROPERTY_NAMESPACE)))
@@ -292,6 +293,8 @@
$(call add_json_bool, GenerateAidlNdkPlatformBackend, $(filter true,$(NEED_AIDL_NDK_PLATFORM_BACKEND)))
+$(call add_json_bool, IgnorePrefer32OnDevice, $(filter true,$(IGNORE_PREFER32_ON_DEVICE)))
+
$(call json_end)
$(file >$(SOONG_VARIABLES).tmp,$(json_contents))
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 61c07ba..570702a 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -47,10 +47,18 @@
echo "ro.product.$(1).model=$(PRODUCT_MODEL)" >> $(2);\
echo "ro.product.$(1).name=$(TARGET_PRODUCT)" >> $(2);\
)\
- $(if $(filter system vendor odm,$(1)),\
- echo "ro.$(1).product.cpu.abilist=$(TARGET_CPU_ABI_LIST) " >> $(2);\
- echo "ro.$(1).product.cpu.abilist32=$(TARGET_CPU_ABI_LIST_32_BIT)" >> $(2);\
- echo "ro.$(1).product.cpu.abilist64=$(TARGET_CPU_ABI_LIST_64_BIT)" >> $(2);\
+ $(if $(filter true,$(ZYGOTE_FORCE_64)),\
+ $(if $(filter vendor,$(1)),\
+ echo "ro.$(1).product.cpu.abilist=$(TARGET_CPU_ABI_LIST_64_BIT)" >> $(2);\
+ echo "ro.$(1).product.cpu.abilist32=" >> $(2);\
+ echo "ro.$(1).product.cpu.abilist64=$(TARGET_CPU_ABI_LIST_64_BIT)" >> $(2);\
+ )\
+ ,\
+ $(if $(filter system vendor odm,$(1)),\
+ echo "ro.$(1).product.cpu.abilist=$(TARGET_CPU_ABI_LIST)" >> $(2);\
+ echo "ro.$(1).product.cpu.abilist32=$(TARGET_CPU_ABI_LIST_32_BIT)" >> $(2);\
+ echo "ro.$(1).product.cpu.abilist64=$(TARGET_CPU_ABI_LIST_64_BIT)" >> $(2);\
+ )\
)\
echo "ro.$(1).build.date=`$(DATE_FROM_FILE)`" >> $(2);\
echo "ro.$(1).build.date.utc=`$(DATE_FROM_FILE) +%s`" >> $(2);\
@@ -282,6 +290,7 @@
TARGET_CPU_ABI_LIST_64_BIT="$(TARGET_CPU_ABI_LIST_64_BIT)" \
TARGET_CPU_ABI="$(TARGET_CPU_ABI)" \
TARGET_CPU_ABI2="$(TARGET_CPU_ABI2)" \
+ ZYGOTE_FORCE_64_BIT="$(ZYGOTE_FORCE_64_BIT)" \
bash $(BUILDINFO_SH) > $@
ifdef TARGET_SYSTEM_PROP
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index 8097535..4ef6eb8 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -28,6 +28,7 @@
'"runtime_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).LOCAL_RUNTIME_LIBRARIES)),"$(w)", )], ' \
'"data_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_DATA_BINS)),"$(w)", )], ' \
'"supported_variants": [$(foreach w,$(sort $(ALL_MODULES.$(m).SUPPORTED_VARIANTS)),"$(w)", )], ' \
+ '"host_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).HOST_REQUIRED_FROM_TARGET)),"$(w)", )], ' \
'},\n' \
) | sed -e 's/, *\]/]/g' -e 's/, *\}/ }/g' -e '$$s/,$$//' >> $@
$(hide) echo '}' >> $@
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index cd67ad6..ce25ee2 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -104,7 +104,7 @@
# It must be of the form "YYYY-MM-DD" on production devices.
# It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
# If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
- PLATFORM_SECURITY_PATCH := 2022-06-05
+ PLATFORM_SECURITY_PATCH := 2022-07-05
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH
diff --git a/finalize_branch_for_release.sh b/finalize_branch_for_release.sh
index 12b096f..ce90ac0 100755
--- a/finalize_branch_for_release.sh
+++ b/finalize_branch_for_release.sh
@@ -16,15 +16,23 @@
# Update references in the codebase to new API version (TODO)
# ...
- AIDL_TRANSITIVE_FREEZE=true $m aidl-freeze-api
+ AIDL_TRANSITIVE_FREEZE=true $m aidl-freeze-api create_reference_dumps
+
+ # Generate ABI dumps
+ ANDROID_BUILD_TOP="$top" \
+ out/host/linux-x86/bin/create_reference_dumps \
+ -p aosp_arm64 --build-variant user
# Update new versions of files. See update-vndk-list.sh (which requires envsetup.sh)
$m check-vndk-list || \
{ cp $top/out/soong/vndk/vndk.libraries.txt $top/build/make/target/product/gsi/current.txt; }
- # for now, we simulate the release state for AIDL, but in the future, we would want
- # to actually turn the branch into the REL state and test with that
- AIDL_FROZEN_REL=true $m nothing # test build
+ # This command tests:
+ # The release state for AIDL.
+ # ABI difference between user and userdebug builds.
+ # In the future, we would want to actually turn the branch into the REL
+ # state and test with that.
+ AIDL_FROZEN_REL=true $m droidcore
# Build SDK (TODO)
# lunch sdk...
diff --git a/orchestrator/core/lunch.py b/orchestrator/core/lunch.py
index a648478..70a2d1d 100755
--- a/orchestrator/core/lunch.py
+++ b/orchestrator/core/lunch.py
@@ -75,11 +75,17 @@
for f in walk_paths(path, lambda x: x == filename):
return f
+# TODO: When orchestrator is in its own git project remove the "build" and "make" here
+class LunchContext(object):
+ """Mockable container for lunch"""
+ def __init__(self, workspace_root, orchestrator_path_prefix_components=["build", "build", "make"]):
+ self.workspace_root = workspace_root
+ self.orchestrator_path_prefix_components = orchestrator_path_prefix_components
-def find_config_dirs(workspace_root):
+def find_config_dirs(context):
"""Find the configuration files in the well known locations inside workspace_root
- <workspace_root>/build/build/orchestrator/multitree_combos
+ <workspace_root>/<orchestrator>/<path>/<prefix>/orchestrator/multitree_combos
(AOSP devices, such as cuttlefish)
<workspace_root>/vendor/**/multitree_combos
@@ -93,21 +99,20 @@
"""
# TODO: This is not looking in inner trees correctly.
- # TODO: When orchestrator is in its own git project remove the "make/" here
- yield os.path.join(workspace_root, "build/build/make/orchestrator/multitree_combos")
+ yield os.path.join(context.workspace_root, *context.orchestrator_path_prefix_components, "orchestrator/multitree_combos")
dirs = ["vendor", "device"]
for d in dirs:
- yield from find_dirs(os.path.join(workspace_root, d), "multitree_combos")
+ yield from find_dirs(os.path.join(context.workspace_root, d), "multitree_combos")
-def find_named_config(workspace_root, shortname):
- """Find the config with the given shortname inside workspace_root.
+def find_named_config(context, shortname):
+ """Find the config with the given shortname inside context.workspace_root.
Config directories are searched in the order described in find_config_dirs,
and inside those directories, alphabetically."""
filename = shortname + ".mcombo"
- for config_dir in find_config_dirs(workspace_root):
+ for config_dir in find_config_dirs(context):
found = find_file(config_dir, filename)
if found:
return found
@@ -122,7 +127,7 @@
return split
-def choose_config_from_args(workspace_root, args):
+def choose_config_from_args(context, args):
"""Return the config file we should use for the given argument,
or null if there's no file that matches that."""
if len(args) == 1:
@@ -130,7 +135,7 @@
# file we don't match that.
pv = parse_product_variant(args[0])
if pv:
- config = find_named_config(workspace_root, pv[0])
+ config = find_named_config(context, pv[0])
if config:
return (config, pv[1])
return None, None
@@ -295,9 +300,9 @@
return EXIT_STATUS_OK
-def find_all_combo_files(workspace_root):
+def find_all_combo_files(context):
"""Find all .mcombo files in the prescribed locations in the tree."""
- for dir in find_config_dirs(workspace_root):
+ for dir in find_config_dirs(context):
for file in walk_paths(dir, lambda x: x.endswith(".mcombo")):
yield file
@@ -313,10 +318,10 @@
return config.get("lunchable", False)
-def find_all_lunchable(workspace_root):
- """Find all mcombo files in the tree (rooted at workspace_root) that when
+def find_all_lunchable(context):
+ """Find all mcombo files in the tree (rooted at context.workspace_root) that when
parsed (and inheritance is flattened) have lunchable: true."""
- for f in [x for x in find_all_combo_files(workspace_root) if is_file_lunchable(x)]:
+ for f in [x for x in find_all_combo_files(context) if is_file_lunchable(x)]:
yield f
@@ -353,7 +358,8 @@
def do_list():
"""Handle the --list command."""
- for f in sorted(find_all_lunchable(".")):
+ lunch_context = LunchContext(".")
+ for f in sorted(find_all_lunchable(lunch_context)):
print(f)
diff --git a/orchestrator/core/test_lunch.py b/orchestrator/core/test_lunch.py
index 2d85d05..5b890fb 100755
--- a/orchestrator/core/test_lunch.py
+++ b/orchestrator/core/test_lunch.py
@@ -20,6 +20,11 @@
sys.dont_write_bytecode = True
import lunch
+# Create a test LunchContext object
+# Test workspace is in test/configs
+# Orchestrator prefix inside it is build/make
+test_lunch_context = lunch.LunchContext("test/configs", ["build", "make"])
+
class TestStringMethods(unittest.TestCase):
def test_find_dirs(self):
@@ -35,61 +40,61 @@
"test/configs/device/aa/bb/multitree_combos/v.mcombo")
def test_find_config_dirs(self):
- self.assertEqual([x for x in lunch.find_config_dirs("test/configs")], [
+ self.assertEqual([x for x in lunch.find_config_dirs(test_lunch_context)], [
"test/configs/build/make/orchestrator/multitree_combos",
"test/configs/vendor/aa/bb/multitree_combos",
"test/configs/device/aa/bb/multitree_combos"])
def test_find_named_config(self):
# Inside build/orchestrator, overriding device and vendor
- self.assertEqual(lunch.find_named_config("test/configs", "b"),
+ self.assertEqual(lunch.find_named_config(test_lunch_context, "b"),
"test/configs/build/make/orchestrator/multitree_combos/b.mcombo")
# Nested dir inside a combo dir
- self.assertEqual(lunch.find_named_config("test/configs", "nested"),
+ self.assertEqual(lunch.find_named_config(test_lunch_context, "nested"),
"test/configs/build/make/orchestrator/multitree_combos/nested/nested.mcombo")
# Inside vendor, overriding device
- self.assertEqual(lunch.find_named_config("test/configs", "v"),
+ self.assertEqual(lunch.find_named_config(test_lunch_context, "v"),
"test/configs/vendor/aa/bb/multitree_combos/v.mcombo")
# Inside device
- self.assertEqual(lunch.find_named_config("test/configs", "d"),
+ self.assertEqual(lunch.find_named_config(test_lunch_context, "d"),
"test/configs/device/aa/bb/multitree_combos/d.mcombo")
# Make sure we don't look too deep (for performance)
- self.assertIsNone(lunch.find_named_config("test/configs", "too_deep"))
+ self.assertIsNone(lunch.find_named_config(test_lunch_context, "too_deep"))
def test_choose_config_file(self):
# Empty string argument
- self.assertEqual(lunch.choose_config_from_args("test/configs", [""]),
+ self.assertEqual(lunch.choose_config_from_args(test_lunch_context, [""]),
(None, None))
# A PRODUCT-VARIANT name
- self.assertEqual(lunch.choose_config_from_args("test/configs", ["v-eng"]),
+ self.assertEqual(lunch.choose_config_from_args(test_lunch_context, ["v-eng"]),
("test/configs/vendor/aa/bb/multitree_combos/v.mcombo", "eng"))
# A PRODUCT-VARIANT name that conflicts with a file
- self.assertEqual(lunch.choose_config_from_args("test/configs", ["b-eng"]),
+ self.assertEqual(lunch.choose_config_from_args(test_lunch_context, ["b-eng"]),
("test/configs/build/make/orchestrator/multitree_combos/b.mcombo", "eng"))
# A PRODUCT-VARIANT that doesn't exist
- self.assertEqual(lunch.choose_config_from_args("test/configs", ["z-user"]),
+ self.assertEqual(lunch.choose_config_from_args(test_lunch_context, ["z-user"]),
(None, None))
# An explicit file
- self.assertEqual(lunch.choose_config_from_args("test/configs",
+ self.assertEqual(lunch.choose_config_from_args(test_lunch_context,
["test/configs/build/make/orchestrator/multitree_combos/b.mcombo", "eng"]),
("test/configs/build/make/orchestrator/multitree_combos/b.mcombo", "eng"))
# An explicit file that doesn't exist
- self.assertEqual(lunch.choose_config_from_args("test/configs",
+ self.assertEqual(lunch.choose_config_from_args(test_lunch_context,
["test/configs/doesnt_exist.mcombo", "eng"]),
(None, None))
# An explicit file without a variant should fail
- self.assertEqual(lunch.choose_config_from_args("test/configs",
+ self.assertEqual(lunch.choose_config_from_args(test_lunch_context,
["test/configs/build/make/orchestrator/multitree_combos/b.mcombo"]),
("test/configs/build/make/orchestrator/multitree_combos/b.mcombo", None))
@@ -119,7 +124,7 @@
})
def test_list(self):
- self.assertEqual(sorted(lunch.find_all_lunchable("test/configs")),
+ self.assertEqual(sorted(lunch.find_all_lunchable(test_lunch_context)),
["test/configs/build/make/orchestrator/multitree_combos/b.mcombo"])
if __name__ == "__main__":
diff --git a/target/board/BoardConfigGsiCommon.mk b/target/board/BoardConfigGsiCommon.mk
index 53714a8..8c634f6 100644
--- a/target/board/BoardConfigGsiCommon.mk
+++ b/target/board/BoardConfigGsiCommon.mk
@@ -80,6 +80,3 @@
# Setup a vendor image to let PRODUCT_VENDOR_PROPERTIES does not affect GSI
BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE := ext4
-
-# Disable 64 bit mediadrmserver
-TARGET_ENABLE_MEDIADRM_64 :=
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index 45ed3da..40be80e 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -52,6 +52,9 @@
TARGET_2ND_CPU_VARIANT := generic
endif
+# Include 64-bit mediaserver to support 64-bit only devices
+TARGET_DYNAMIC_64_32_MEDIASERVER := true
+
include build/make/target/board/BoardConfigGsiCommon.mk
# Some vendors still haven't cleaned up all device specific directories under
diff --git a/target/board/generic_x86_64/BoardConfig.mk b/target/board/generic_x86_64/BoardConfig.mk
index 93694f2..e7f2ae0 100755
--- a/target/board/generic_x86_64/BoardConfig.mk
+++ b/target/board/generic_x86_64/BoardConfig.mk
@@ -22,6 +22,9 @@
TARGET_2ND_ARCH := x86
TARGET_2ND_ARCH_VARIANT := x86_64
+# Include 64-bit mediaserver to support 64-bit only devices
+TARGET_DYNAMIC_64_32_MEDIASERVER := true
+
include build/make/target/board/BoardConfigGsiCommon.mk
ifndef BUILDING_GSI
diff --git a/target/board/gsi_arm64/BoardConfig.mk b/target/board/gsi_arm64/BoardConfig.mk
index db6f3f0..db95082 100644
--- a/target/board/gsi_arm64/BoardConfig.mk
+++ b/target/board/gsi_arm64/BoardConfig.mk
@@ -27,6 +27,9 @@
TARGET_2ND_CPU_ABI2 := armeabi
TARGET_2ND_CPU_VARIANT := generic
+# Include 64-bit mediaserver to support 64-bit only devices
+TARGET_DYNAMIC_64_32_MEDIASERVER := true
+
# TODO(b/111434759, b/111287060) SoC specific hacks
BOARD_ROOT_EXTRA_SYMLINKS += /vendor/lib/dsp:/dsp
BOARD_ROOT_EXTRA_SYMLINKS += /mnt/vendor/persist:/persist
diff --git a/target/product/core_64_bit.mk b/target/product/core_64_bit.mk
index 322fa80..b9d22a6 100644
--- a/target/product/core_64_bit.mk
+++ b/target/product/core_64_bit.mk
@@ -27,7 +27,11 @@
# Set the zygote property to select the 64-bit primary, 32-bit secondary script
# This line must be parsed before the one in core_minimal.mk
+ifeq ($(ZYGOTE_FORCE_64),true)
+PRODUCT_VENDOR_PROPERTIES += ro.zygote=zygote64
+else
PRODUCT_VENDOR_PROPERTIES += ro.zygote=zygote64_32
+endif
TARGET_SUPPORTS_32_BIT_APPS := true
TARGET_SUPPORTS_64_BIT_APPS := true
diff --git a/tools/Android.bp b/tools/Android.bp
index 6601c60..bd326f1 100644
--- a/tools/Android.bp
+++ b/tools/Android.bp
@@ -49,3 +49,8 @@
out: ["kernel_release.txt"],
cmd: "$(location) --tools lz4:$(location lz4) --input $(in) --output-release > $(out)"
}
+
+cc_binary_host {
+ name: "build-runfiles",
+ srcs: ["build-runfiles.cc"],
+}
diff --git a/tools/build-runfiles.cc b/tools/build-runfiles.cc
new file mode 100644
index 0000000..b6197f0
--- /dev/null
+++ b/tools/build-runfiles.cc
@@ -0,0 +1,426 @@
+// Copyright 2014 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This program creates a "runfiles tree" from a "runfiles manifest".
+//
+// The command line arguments are an input manifest INPUT and an output
+// directory RUNFILES. First, the files in the RUNFILES directory are scanned
+// and any extraneous ones are removed. Second, any missing files are created.
+// Finally, a copy of the input manifest is written to RUNFILES/MANIFEST.
+//
+// The input manifest consists of lines, each containing a relative path within
+// the runfiles, a space, and an optional absolute path. If this second path
+// is present, a symlink is created pointing to it; otherwise an empty file is
+// created.
+//
+// Given the line
+// <workspace root>/output/path /real/path
+// we will create directories
+// RUNFILES/<workspace root>
+// RUNFILES/<workspace root>/output
+// a symlink
+// RUNFILES/<workspace root>/output/path -> /real/path
+// and the output manifest will contain a line
+// <workspace root>/output/path /real/path
+//
+// If --use_metadata is supplied, every other line is treated as opaque
+// metadata, and is ignored here.
+//
+// All output paths must be relative and generally (but not always) begin with
+// <workspace root>. No output path may be equal to another. No output path may
+// be a path prefix of another.
+
+#define _FILE_OFFSET_BITS 64
+
+#include <dirent.h>
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <map>
+#include <string>
+
+// program_invocation_short_name is not portable.
+static const char *argv0;
+
+const char *input_filename;
+const char *output_base_dir;
+
+enum FileType {
+ FILE_TYPE_REGULAR,
+ FILE_TYPE_DIRECTORY,
+ FILE_TYPE_SYMLINK
+};
+
+struct FileInfo {
+ FileType type;
+ std::string symlink_target;
+
+ bool operator==(const FileInfo &other) const {
+ return type == other.type && symlink_target == other.symlink_target;
+ }
+
+ bool operator!=(const FileInfo &other) const {
+ return !(*this == other);
+ }
+};
+
+typedef std::map<std::string, FileInfo> FileInfoMap;
+
+class RunfilesCreator {
+ public:
+ explicit RunfilesCreator(const std::string &output_base)
+ : output_base_(output_base),
+ output_filename_("MANIFEST"),
+ temp_filename_(output_filename_ + ".tmp") {
+ SetupOutputBase();
+ if (chdir(output_base_.c_str()) != 0) {
+ err(2, "chdir '%s'", output_base_.c_str());
+ }
+ }
+
+ void ReadManifest(const std::string &manifest_file, bool allow_relative,
+ bool use_metadata) {
+ FILE *outfile = fopen(temp_filename_.c_str(), "w");
+ if (!outfile) {
+ err(2, "opening '%s/%s' for writing", output_base_.c_str(),
+ temp_filename_.c_str());
+ }
+ FILE *infile = fopen(manifest_file.c_str(), "r");
+ if (!infile) {
+ err(2, "opening '%s' for reading", manifest_file.c_str());
+ }
+
+ // read input manifest
+ int lineno = 0;
+ char buf[3 * PATH_MAX];
+ while (fgets(buf, sizeof buf, infile)) {
+ // copy line to output manifest
+ if (fputs(buf, outfile) == EOF) {
+ err(2, "writing to '%s/%s'", output_base_.c_str(),
+ temp_filename_.c_str());
+ }
+
+ // parse line
+ ++lineno;
+ // Skip metadata lines. They are used solely for
+ // dependency checking.
+ if (use_metadata && lineno % 2 == 0) continue;
+
+ char *tok = strtok(buf, " \n");
+ if (tok == nullptr) {
+ continue;
+ } else if (*tok == '/') {
+ errx(2, "%s:%d: paths must not be absolute", input_filename, lineno);
+ }
+ std::string link(tok);
+
+ const char *target = strtok(nullptr, " \n");
+ if (target == nullptr) {
+ target = "";
+ } else if (strtok(nullptr, " \n") != nullptr) {
+ errx(2, "%s:%d: link or target filename contains space", input_filename, lineno);
+ } else if (!allow_relative && target[0] != '/') {
+ errx(2, "%s:%d: expected absolute path", input_filename, lineno);
+ }
+
+ FileInfo *info = &manifest_[link];
+ if (target[0] == '\0') {
+ // No target means an empty file.
+ info->type = FILE_TYPE_REGULAR;
+ } else {
+ info->type = FILE_TYPE_SYMLINK;
+ info->symlink_target = target;
+ }
+
+ FileInfo parent_info;
+ parent_info.type = FILE_TYPE_DIRECTORY;
+
+ while (true) {
+ int k = link.rfind('/');
+ if (k < 0) break;
+ link.erase(k, std::string::npos);
+ if (!manifest_.insert(std::make_pair(link, parent_info)).second) break;
+ }
+ }
+ if (fclose(outfile) != 0) {
+ err(2, "writing to '%s/%s'", output_base_.c_str(),
+ temp_filename_.c_str());
+ }
+ fclose(infile);
+
+ // Don't delete the temp manifest file.
+ manifest_[temp_filename_].type = FILE_TYPE_REGULAR;
+ }
+
+ void CreateRunfiles() {
+ if (unlink(output_filename_.c_str()) != 0 && errno != ENOENT) {
+ err(2, "removing previous file at '%s/%s'", output_base_.c_str(),
+ output_filename_.c_str());
+ }
+
+ ScanTreeAndPrune(".");
+ CreateFiles();
+
+ // rename output file into place
+ if (rename(temp_filename_.c_str(), output_filename_.c_str()) != 0) {
+ err(2, "renaming '%s/%s' to '%s/%s'",
+ output_base_.c_str(), temp_filename_.c_str(),
+ output_base_.c_str(), output_filename_.c_str());
+ }
+ }
+
+ private:
+ void SetupOutputBase() {
+ struct stat st;
+ if (stat(output_base_.c_str(), &st) != 0) {
+ // Technically, this will cause problems if the user's umask contains
+ // 0200, but we don't care. Anyone who does that deserves what's coming.
+ if (mkdir(output_base_.c_str(), 0777) != 0) {
+ err(2, "creating directory '%s'", output_base_.c_str());
+ }
+ } else {
+ EnsureDirReadAndWritePerms(output_base_);
+ }
+ }
+
+ void ScanTreeAndPrune(const std::string &path) {
+ // A note on non-empty files:
+ // We don't distinguish between empty and non-empty files. That is, if
+ // there's a file that has contents, we don't truncate it here, even though
+ // the manifest supports creation of empty files, only. Given that
+ // .runfiles are *supposed* to be immutable, this shouldn't be a problem.
+ EnsureDirReadAndWritePerms(path);
+
+ struct dirent *entry;
+ DIR *dh = opendir(path.c_str());
+ if (!dh) {
+ err(2, "opendir '%s'", path.c_str());
+ }
+
+ errno = 0;
+ const std::string prefix = (path == "." ? "" : path + "/");
+ while ((entry = readdir(dh)) != nullptr) {
+ if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) continue;
+
+ std::string entry_path = prefix + entry->d_name;
+ FileInfo actual_info;
+ actual_info.type = DentryToFileType(entry_path, entry);
+
+ if (actual_info.type == FILE_TYPE_SYMLINK) {
+ ReadLinkOrDie(entry_path, &actual_info.symlink_target);
+ }
+
+ FileInfoMap::iterator expected_it = manifest_.find(entry_path);
+ if (expected_it == manifest_.end() ||
+ expected_it->second != actual_info) {
+ DelTree(entry_path, actual_info.type);
+ } else {
+ manifest_.erase(expected_it);
+ if (actual_info.type == FILE_TYPE_DIRECTORY) {
+ ScanTreeAndPrune(entry_path);
+ }
+ }
+
+ errno = 0;
+ }
+ if (errno != 0) {
+ err(2, "reading directory '%s'", path.c_str());
+ }
+ closedir(dh);
+ }
+
+ void CreateFiles() {
+ for (FileInfoMap::const_iterator it = manifest_.begin();
+ it != manifest_.end(); ++it) {
+ const std::string &path = it->first;
+ switch (it->second.type) {
+ case FILE_TYPE_DIRECTORY:
+ if (mkdir(path.c_str(), 0777) != 0) {
+ err(2, "mkdir '%s'", path.c_str());
+ }
+ break;
+ case FILE_TYPE_REGULAR:
+ {
+ int fd = open(path.c_str(), O_CREAT|O_EXCL|O_WRONLY, 0555);
+ if (fd < 0) {
+ err(2, "creating empty file '%s'", path.c_str());
+ }
+ close(fd);
+ }
+ break;
+ case FILE_TYPE_SYMLINK:
+ {
+ const std::string& target = it->second.symlink_target;
+ if (symlink(target.c_str(), path.c_str()) != 0) {
+ err(2, "symlinking '%s' -> '%s'", path.c_str(), target.c_str());
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ FileType DentryToFileType(const std::string &path, struct dirent *ent) {
+#ifdef _DIRENT_HAVE_D_TYPE
+ if (ent->d_type != DT_UNKNOWN) {
+ if (ent->d_type == DT_DIR) {
+ return FILE_TYPE_DIRECTORY;
+ } else if (ent->d_type == DT_LNK) {
+ return FILE_TYPE_SYMLINK;
+ } else {
+ return FILE_TYPE_REGULAR;
+ }
+ } else // NOLINT (the brace is in the next line)
+#endif
+ {
+ struct stat st;
+ LStatOrDie(path, &st);
+ if (S_ISDIR(st.st_mode)) {
+ return FILE_TYPE_DIRECTORY;
+ } else if (S_ISLNK(st.st_mode)) {
+ return FILE_TYPE_SYMLINK;
+ } else {
+ return FILE_TYPE_REGULAR;
+ }
+ }
+ }
+
+ void LStatOrDie(const std::string &path, struct stat *st) {
+ if (lstat(path.c_str(), st) != 0) {
+ err(2, "lstating file '%s'", path.c_str());
+ }
+ }
+
+ void StatOrDie(const std::string &path, struct stat *st) {
+ if (stat(path.c_str(), st) != 0) {
+ err(2, "stating file '%s'", path.c_str());
+ }
+ }
+
+ void ReadLinkOrDie(const std::string &path, std::string *output) {
+ char readlink_buffer[PATH_MAX];
+ int sz = readlink(path.c_str(), readlink_buffer, sizeof(readlink_buffer));
+ if (sz < 0) {
+ err(2, "reading symlink '%s'", path.c_str());
+ }
+ // readlink returns a non-null terminated string.
+ std::string(readlink_buffer, sz).swap(*output);
+ }
+
+ void EnsureDirReadAndWritePerms(const std::string &path) {
+ const int kMode = 0700;
+ struct stat st;
+ LStatOrDie(path, &st);
+ if ((st.st_mode & kMode) != kMode) {
+ int new_mode = st.st_mode | kMode;
+ if (chmod(path.c_str(), new_mode) != 0) {
+ err(2, "chmod '%s'", path.c_str());
+ }
+ }
+ }
+
+ bool DelTree(const std::string &path, FileType file_type) {
+ if (file_type != FILE_TYPE_DIRECTORY) {
+ if (unlink(path.c_str()) != 0) {
+ err(2, "unlinking '%s'", path.c_str());
+ return false;
+ }
+ return true;
+ }
+
+ EnsureDirReadAndWritePerms(path);
+
+ struct dirent *entry;
+ DIR *dh = opendir(path.c_str());
+ if (!dh) {
+ err(2, "opendir '%s'", path.c_str());
+ }
+ errno = 0;
+ while ((entry = readdir(dh)) != nullptr) {
+ if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) continue;
+ const std::string entry_path = path + '/' + entry->d_name;
+ FileType entry_file_type = DentryToFileType(entry_path, entry);
+ DelTree(entry_path, entry_file_type);
+ errno = 0;
+ }
+ if (errno != 0) {
+ err(2, "readdir '%s'", path.c_str());
+ }
+ closedir(dh);
+ if (rmdir(path.c_str()) != 0) {
+ err(2, "rmdir '%s'", path.c_str());
+ }
+ return true;
+ }
+
+ private:
+ std::string output_base_;
+ std::string output_filename_;
+ std::string temp_filename_;
+
+ FileInfoMap manifest_;
+};
+
+int main(int argc, char **argv) {
+ argv0 = argv[0];
+
+ argc--; argv++;
+ bool allow_relative = false;
+ bool use_metadata = false;
+
+ while (argc >= 1) {
+ if (strcmp(argv[0], "--allow_relative") == 0) {
+ allow_relative = true;
+ argc--; argv++;
+ } else if (strcmp(argv[0], "--use_metadata") == 0) {
+ use_metadata = true;
+ argc--; argv++;
+ } else {
+ break;
+ }
+ }
+
+ if (argc != 2) {
+ fprintf(stderr, "usage: %s "
+ "[--allow_relative] [--use_metadata] "
+ "INPUT RUNFILES\n",
+ argv0);
+ return 1;
+ }
+
+ input_filename = argv[0];
+ output_base_dir = argv[1];
+
+ std::string manifest_file = input_filename;
+ if (input_filename[0] != '/') {
+ char cwd_buf[PATH_MAX];
+ if (getcwd(cwd_buf, sizeof(cwd_buf)) == nullptr) {
+ err(2, "getcwd failed");
+ }
+ manifest_file = std::string(cwd_buf) + '/' + manifest_file;
+ }
+
+ RunfilesCreator runfiles_creator(output_base_dir);
+ runfiles_creator.ReadManifest(manifest_file, allow_relative, use_metadata);
+ runfiles_creator.CreateRunfiles();
+
+ return 0;
+}
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index d8e34b7..122202b 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -519,23 +519,6 @@
}
python_binary_host {
- name: "fsverity_manifest_generator",
- defaults: ["releasetools_binary_defaults"],
- srcs: [
- "fsverity_manifest_generator.py",
- ],
- libs: [
- "fsverity_digests_proto_python",
- "releasetools_common",
- ],
- required: [
- "aapt2",
- "apksigner",
- "fsverity",
- ],
-}
-
-python_binary_host {
name: "fsverity_metadata_generator",
defaults: ["releasetools_binary_defaults"],
srcs: [
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 09f69d0..3e87c54 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -557,7 +557,7 @@
cmd = [bpttool, "make_table", "--output_json", bpt.name,
"--output_gpt", img.name]
input_files_str = OPTIONS.info_dict["board_bpt_input_files"]
- input_files = input_files_str.split(" ")
+ input_files = input_files_str.split()
for i in input_files:
cmd.extend(["--input", i])
disk_size = OPTIONS.info_dict.get("board_bpt_disk_size")
diff --git a/tools/releasetools/apex_utils.py b/tools/releasetools/apex_utils.py
index 941edc6..6730a25 100644
--- a/tools/releasetools/apex_utils.py
+++ b/tools/releasetools/apex_utils.py
@@ -66,7 +66,7 @@
self.avbtool = avbtool if avbtool else "avbtool"
self.sign_tool = sign_tool
- def ProcessApexFile(self, apk_keys, payload_key, signing_args=None):
+ def ProcessApexFile(self, apk_keys, payload_key, signing_args=None, is_sepolicy=False):
"""Scans and signs the payload files and repack the apex
Args:
@@ -84,9 +84,13 @@
self.debugfs_path, 'list', self.apex_path]
entries_names = common.RunAndCheckOutput(list_cmd).split()
apk_entries = [name for name in entries_names if name.endswith('.apk')]
+ sepolicy_entries = []
+ if is_sepolicy:
+ sepolicy_entries = [name for name in entries_names if
+ name.startswith('./etc/SEPolicy') and name.endswith('.zip')]
# No need to sign and repack, return the original apex path.
- if not apk_entries and self.sign_tool is None:
+ if not apk_entries and not sepolicy_entries and self.sign_tool is None:
logger.info('No apk file to sign in %s', self.apex_path)
return self.apex_path
@@ -102,14 +106,14 @@
' %s', entry)
payload_dir, has_signed_content = self.ExtractApexPayloadAndSignContents(
- apk_entries, apk_keys, payload_key, signing_args)
+ apk_entries, sepolicy_entries, apk_keys, payload_key, signing_args)
if not has_signed_content:
- logger.info('No contents has been signed in %s', self.apex_path)
+ logger.info('No contents have been signed in %s', self.apex_path)
return self.apex_path
return self.RepackApexPayload(payload_dir, payload_key, signing_args)
- def ExtractApexPayloadAndSignContents(self, apk_entries, apk_keys, payload_key, signing_args):
+ def ExtractApexPayloadAndSignContents(self, apk_entries, sepolicy_entries, apk_keys, payload_key, signing_args):
"""Extracts the payload image and signs the containing apk files."""
if not os.path.exists(self.debugfs_path):
raise ApexSigningError(
@@ -120,11 +124,11 @@
extract_cmd = ['deapexer', '--debugfs_path',
self.debugfs_path, 'extract', self.apex_path, payload_dir]
common.RunAndCheckOutput(extract_cmd)
+ assert os.path.exists(self.apex_path)
has_signed_content = False
for entry in apk_entries:
apk_path = os.path.join(payload_dir, entry)
- assert os.path.exists(self.apex_path)
key_name = apk_keys.get(os.path.basename(entry))
if key_name in common.SPECIAL_CERT_STRINGS:
@@ -141,6 +145,37 @@
codename_to_api_level_map=self.codename_to_api_level_map)
has_signed_content = True
+ for entry in sepolicy_entries:
+ sepolicy_path = os.path.join(payload_dir, entry)
+
+ if not 'etc' in entry:
+ logger.warning('Sepolicy path does not contain the intended directory name etc:'
+ ' %s', entry)
+
+ key_name = apk_keys.get(os.path.basename(entry))
+ if key_name is None:
+ logger.warning('Failed to find signing keys for {} in'
+ ' apex {}, payload key will be used instead.'
+ ' Use "-e <name>=" to specify a key'
+ .format(entry, self.apex_path))
+ key_name = payload_key
+
+ if key_name in common.SPECIAL_CERT_STRINGS:
+ logger.info('Not signing: %s due to special cert string', sepolicy_path)
+ continue
+
+ if OPTIONS.sign_sepolicy_path is not None:
+ sig_path = os.path.join(payload_dir, sepolicy_path + '.sig')
+ fsv_sig_path = os.path.join(payload_dir, sepolicy_path + '.fsv_sig')
+ old_sig = common.MakeTempFile()
+ old_fsv_sig = common.MakeTempFile()
+ os.rename(sig_path, old_sig)
+ os.rename(fsv_sig_path, old_fsv_sig)
+
+ logger.info('Signing sepolicy file %s in apex %s', sepolicy_path, self.apex_path)
+ if common.SignSePolicy(sepolicy_path, key_name, self.key_passwords.get(key_name)):
+ has_signed_content = True
+
if self.sign_tool:
logger.info('Signing payload contents in apex %s with %s', self.apex_path, self.sign_tool)
# Pass avbtool to the custom signing tool
@@ -324,7 +359,8 @@
def SignUncompressedApex(avbtool, apex_file, payload_key, container_key,
container_pw, apk_keys, codename_to_api_level_map,
- no_hashtree, signing_args=None, sign_tool=None):
+ no_hashtree, signing_args=None, sign_tool=None,
+ is_sepolicy=False):
"""Signs the current uncompressed APEX with the given payload/container keys.
Args:
@@ -337,6 +373,7 @@
no_hashtree: Don't include hashtree in the signed APEX.
signing_args: Additional args to be passed to the payload signer.
sign_tool: A tool to sign the contents of the APEX.
+ is_sepolicy: Indicates if the apex is a sepolicy.apex
Returns:
The path to the signed APEX file.
@@ -346,7 +383,8 @@
apk_signer = ApexApkSigner(apex_file, container_pw,
codename_to_api_level_map,
avbtool, sign_tool)
- apex_file = apk_signer.ProcessApexFile(apk_keys, payload_key, signing_args)
+ apex_file = apk_signer.ProcessApexFile(
+ apk_keys, payload_key, signing_args, is_sepolicy)
# 2a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given
# payload_key.
@@ -400,7 +438,8 @@
def SignCompressedApex(avbtool, apex_file, payload_key, container_key,
container_pw, apk_keys, codename_to_api_level_map,
- no_hashtree, signing_args=None, sign_tool=None):
+ no_hashtree, signing_args=None, sign_tool=None,
+ is_sepolicy=False):
"""Signs the current compressed APEX with the given payload/container keys.
Args:
@@ -412,6 +451,7 @@
codename_to_api_level_map: A dict that maps from codename to API level.
no_hashtree: Don't include hashtree in the signed APEX.
signing_args: Additional args to be passed to the payload signer.
+ is_sepolicy: Indicates if the apex is a sepolicy.apex
Returns:
The path to the signed APEX file.
@@ -438,7 +478,8 @@
codename_to_api_level_map,
no_hashtree,
signing_args,
- sign_tool)
+ sign_tool,
+ is_sepolicy)
# 3. Compress signed original apex.
compressed_apex_file = common.MakeTempFile(prefix='apex-container-',
@@ -465,8 +506,8 @@
def SignApex(avbtool, apex_data, payload_key, container_key, container_pw,
- apk_keys, codename_to_api_level_map,
- no_hashtree, signing_args=None, sign_tool=None):
+ apk_keys, codename_to_api_level_map, no_hashtree,
+ signing_args=None, sign_tool=None, is_sepolicy=False):
"""Signs the current APEX with the given payload/container keys.
Args:
@@ -478,6 +519,7 @@
codename_to_api_level_map: A dict that maps from codename to API level.
no_hashtree: Don't include hashtree in the signed APEX.
signing_args: Additional args to be passed to the payload signer.
+ is_sepolicy: Indicates if the apex is a sepolicy.apex
Returns:
The path to the signed APEX file.
@@ -503,7 +545,8 @@
no_hashtree=no_hashtree,
apk_keys=apk_keys,
signing_args=signing_args,
- sign_tool=sign_tool)
+ sign_tool=sign_tool,
+ is_sepolicy=is_sepolicy)
elif apex_type == 'COMPRESSED':
return SignCompressedApex(
avbtool,
@@ -515,7 +558,8 @@
no_hashtree=no_hashtree,
apk_keys=apk_keys,
signing_args=signing_args,
- sign_tool=sign_tool)
+ sign_tool=sign_tool,
+ is_sepolicy=is_sepolicy)
else:
# TODO(b/172912232): support signing compressed apex
raise ApexInfoError('Unsupported apex type {}'.format(apex_type))
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 9567fdc..6d7895e 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -328,7 +328,7 @@
compressor = prop_dict["erofs_default_compressor"]
if "erofs_compressor" in prop_dict:
compressor = prop_dict["erofs_compressor"]
- if compressor:
+ if compressor and compressor != "none":
build_command.extend(["-z", compressor])
compress_hints = None
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index caa4641..917e4dc 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -72,7 +72,9 @@
if "ANDROID_HOST_OUT" in os.environ:
self.search_path = os.environ["ANDROID_HOST_OUT"]
self.signapk_shared_library_path = "lib64" # Relative to search_path
+ self.sign_sepolicy_path = None
self.extra_signapk_args = []
+ self.extra_sign_sepolicy_args = []
self.aapt2_path = "aapt2"
self.java_path = "java" # Use the one on the path by default.
self.java_args = ["-Xmx2048m"] # The default JVM args.
@@ -97,6 +99,7 @@
self.stash_threshold = 0.8
self.logfile = None
self.host_tools = {}
+ self.sepolicy_name = 'sepolicy.apex'
OPTIONS = Options()
@@ -1186,8 +1189,8 @@
"""
def uniq_concat(a, b):
- combined = set(a.split(" "))
- combined.update(set(b.split(" ")))
+ combined = set(a.split())
+ combined.update(set(b.split()))
combined = [item.strip() for item in combined if item.strip()]
return " ".join(sorted(combined))
@@ -1208,7 +1211,7 @@
# Super block devices are defined by the vendor dict.
if "super_block_devices" in vendor_dict:
merged_dict["super_block_devices"] = vendor_dict["super_block_devices"]
- for block_device in merged_dict["super_block_devices"].split(" "):
+ for block_device in merged_dict["super_block_devices"].split():
key = "super_%s_device_size" % block_device
if key not in vendor_dict:
raise ValueError("Vendor dict does not contain required key %s." % key)
@@ -1217,7 +1220,7 @@
# Partition groups and group sizes are defined by the vendor dict because
# these values may vary for each board that uses a shared system image.
merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
- for partition_group in merged_dict["super_partition_groups"].split(" "):
+ for partition_group in merged_dict["super_partition_groups"].split():
# Set the partition group's size using the value from the vendor dict.
key = "super_%s_group_size" % partition_group
if key not in vendor_dict:
@@ -2379,6 +2382,35 @@
"Failed to run signapk.jar: return code {}:\n{}".format(
proc.returncode, stdoutdata))
+def SignSePolicy(sepolicy, key, password):
+ """Sign the sepolicy zip, producing an fsverity .fsv_sig and
+ an RSA .sig signature files.
+ """
+
+ if OPTIONS.sign_sepolicy_path is None:
+ return False
+
+ java_library_path = os.path.join(
+ OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
+
+ cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
+ ["-Djava.library.path=" + java_library_path,
+ "-jar", os.path.join(OPTIONS.search_path, OPTIONS.sign_sepolicy_path)] +
+ OPTIONS.extra_sign_sepolicy_args)
+
+ cmd.extend([key + OPTIONS.public_key_suffix,
+ key + OPTIONS.private_key_suffix,
+ sepolicy])
+
+ proc = Run(cmd, stdin=subprocess.PIPE)
+ if password is not None:
+ password += "\n"
+ stdoutdata, _ = proc.communicate(password)
+ if proc.returncode != 0:
+ raise ExternalError(
+ "Failed to run sign sepolicy: return code {}:\n{}".format(
+ proc.returncode, stdoutdata))
+ return True
def CheckSize(data, target, info_dict):
"""Checks the data string passed against the max size limit.
@@ -2555,7 +2587,8 @@
opts, args = getopt.getopt(
argv, "hvp:s:x:" + extra_opts,
["help", "verbose", "path=", "signapk_path=",
- "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=",
+ "signapk_shared_library_path=", "extra_signapk_args=",
+ "sign_sepolicy_path=", "extra_sign_sepolicy_args=", "aapt2_path=",
"java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
"private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
"verity_signer_path=", "verity_signer_args=", "device_specific=",
@@ -2579,6 +2612,10 @@
OPTIONS.signapk_shared_library_path = a
elif o in ("--extra_signapk_args",):
OPTIONS.extra_signapk_args = shlex.split(a)
+ elif o in ("--sign_sepolicy_path",):
+ OPTIONS.sign_sepolicy_path = a
+ elif o in ("--extra_sign_sepolicy_args",):
+ OPTIONS.extra_sign_sepolicy_args = shlex.split(a)
elif o in ("--aapt2_path",):
OPTIONS.aapt2_path = a
elif o in ("--java_path",):
diff --git a/tools/releasetools/fsverity_manifest_generator.py b/tools/releasetools/fsverity_manifest_generator.py
deleted file mode 100644
index b8184bc..0000000
--- a/tools/releasetools/fsverity_manifest_generator.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright 2022 Google Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-`fsverity_manifest_generator` generates build manifest APK file containing
-digests of target files. The APK file is signed so the manifest inside the APK
-can be trusted.
-"""
-
-import argparse
-import common
-import os
-import subprocess
-import sys
-from fsverity_digests_pb2 import FSVerityDigests
-
-HASH_ALGORITHM = 'sha256'
-
-def _digest(fsverity_path, input_file):
- cmd = [fsverity_path, 'digest', input_file]
- cmd.extend(['--compact'])
- cmd.extend(['--hash-alg', HASH_ALGORITHM])
- out = subprocess.check_output(cmd, universal_newlines=True).strip()
- return bytes(bytearray.fromhex(out))
-
-if __name__ == '__main__':
- p = argparse.ArgumentParser()
- p.add_argument(
- '--output',
- help='Path to the output manifest APK',
- required=True)
- p.add_argument(
- '--fsverity-path',
- help='path to the fsverity program',
- required=True)
- p.add_argument(
- '--aapt2-path',
- help='path to the aapt2 program',
- required=True)
- p.add_argument(
- '--min-sdk-version',
- help='minimum supported sdk version of the generated manifest apk',
- required=True)
- p.add_argument(
- '--version-code',
- help='version code for the generated manifest apk',
- required=True)
- p.add_argument(
- '--version-name',
- help='version name for the generated manifest apk',
- required=True)
- p.add_argument(
- '--framework-res',
- help='path to framework-res.apk',
- required=True)
- p.add_argument(
- '--apksigner-path',
- help='path to the apksigner program',
- required=True)
- p.add_argument(
- '--apk-key-path',
- help='path to the apk key',
- required=True)
- p.add_argument(
- '--apk-manifest-path',
- help='path to AndroidManifest.xml',
- required=True)
- p.add_argument(
- '--base-dir',
- help='directory to use as a relative root for the inputs',
- required=True)
- p.add_argument(
- 'inputs',
- nargs='+',
- help='input file for the build manifest')
- args = p.parse_args(sys.argv[1:])
-
- digests = FSVerityDigests()
- for f in sorted(args.inputs):
- # f is a full path for now; make it relative so it starts with {mount_point}/
- digest = digests.digests[os.path.relpath(f, args.base_dir)]
- digest.digest = _digest(args.fsverity_path, f)
- digest.hash_alg = HASH_ALGORITHM
-
- temp_dir = common.MakeTempDir()
-
- os.mkdir(os.path.join(temp_dir, "assets"))
- metadata_path = os.path.join(temp_dir, "assets", "build_manifest.pb")
- with open(metadata_path, "wb") as f:
- f.write(digests.SerializeToString())
-
- common.RunAndCheckOutput([args.aapt2_path, "link",
- "-A", os.path.join(temp_dir, "assets"),
- "-o", args.output,
- "--min-sdk-version", args.min_sdk_version,
- "--version-code", args.version_code,
- "--version-name", args.version_name,
- "-I", args.framework_res,
- "--manifest", args.apk_manifest_path])
- common.RunAndCheckOutput([args.apksigner_path, "sign", "--in", args.output,
- "--cert", args.apk_key_path + ".x509.pem",
- "--key", args.apk_key_path + ".pk8"])
diff --git a/tools/releasetools/sign_apex.py b/tools/releasetools/sign_apex.py
index 6926467..d3e242b 100755
--- a/tools/releasetools/sign_apex.py
+++ b/tools/releasetools/sign_apex.py
@@ -52,6 +52,7 @@
import common
logger = logging.getLogger(__name__)
+OPTIONS = common.OPTIONS
def SignApexFile(avbtool, apex_file, payload_key, container_key, no_hashtree,
@@ -70,7 +71,8 @@
no_hashtree=no_hashtree,
apk_keys=apk_keys,
signing_args=signing_args,
- sign_tool=sign_tool)
+ sign_tool=sign_tool,
+ is_sepolicy=apex_file.endswith(OPTIONS.sepolicy_name))
def main(argv):
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 27e9dfb..dbe79ab 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -27,7 +27,7 @@
apkcerts.txt file, or the container key for an APEX. Option may be
repeated to give multiple extra packages.
- --extra_apex_payload_key <name=key>
+ --extra_apex_payload_key <name,name,...=key>
Add a mapping for APEX package name to payload signing key, which will
override the default payload signing key in apexkeys.txt. Note that the
container key should be overridden via the `--extra_apks` flag above.
@@ -881,7 +881,7 @@
pieces[-1] = EditTags(pieces[-1])
value = "/".join(pieces)
elif key == "ro.build.description":
- pieces = value.split(" ")
+ pieces = value.split()
assert pieces[-1].endswith("-keys")
pieces[-1] = EditTags(pieces[-1])
value = " ".join(pieces)
@@ -1098,7 +1098,7 @@
tokens = []
changed = False
- for token in args.split(' '):
+ for token in args.split():
fingerprint_key = 'com.android.build.{}.fingerprint'.format(partition)
if not token.startswith(fingerprint_key):
tokens.append(token)
@@ -1380,8 +1380,9 @@
for n in names:
OPTIONS.extra_apks[n] = key
elif o == "--extra_apex_payload_key":
- apex_name, key = a.split("=")
- OPTIONS.extra_apex_payload_keys[apex_name] = key
+ apex_names, key = a.split("=")
+ for name in apex_names:
+ OPTIONS.extra_apex_payload_keys[name] = key
elif o == "--skip_apks_with_path_prefix":
# Check the prefix, which must be in all upper case.
prefix = a.split('/')[0]
diff --git a/tools/releasetools/test_sign_apex.py b/tools/releasetools/test_sign_apex.py
index 8470f20..7723de7 100644
--- a/tools/releasetools/test_sign_apex.py
+++ b/tools/releasetools/test_sign_apex.py
@@ -59,6 +59,21 @@
self.assertTrue(os.path.exists(signed_test_apex))
@test_utils.SkipIfExternalToolsUnavailable()
+ def test_SignSepolicyApex(self):
+ test_apex = os.path.join(self.testdata_dir, 'sepolicy.apex')
+ payload_key = os.path.join(self.testdata_dir, 'testkey_RSA4096.key')
+ container_key = os.path.join(self.testdata_dir, 'testkey')
+ apk_keys = {'SEPolicy-33.zip': os.path.join(self.testdata_dir, 'testkey')}
+ signed_test_apex = sign_apex.SignApexFile(
+ 'avbtool',
+ test_apex,
+ payload_key,
+ container_key,
+ False,
+ None)
+ self.assertTrue(os.path.exists(signed_test_apex))
+
+ @test_utils.SkipIfExternalToolsUnavailable()
def test_SignCompressedApexFile(self):
apex = os.path.join(test_utils.get_current_dir(), 'com.android.apex.compressed.v1.capex')
payload_key = os.path.join(self.testdata_dir, 'testkey_RSA4096.key')
diff --git a/tools/releasetools/testdata/sepolicy.apex b/tools/releasetools/testdata/sepolicy.apex
new file mode 100644
index 0000000..2c646cd
--- /dev/null
+++ b/tools/releasetools/testdata/sepolicy.apex
Binary files differ