Merge "Revert^2 "Do not generate aconfig_flags.textproto"" into main
diff --git a/Changes.md b/Changes.md
index 6c0cf70..fc15e60 100644
--- a/Changes.md
+++ b/Changes.md
@@ -1,5 +1,14 @@
 # Build System Changes for Android.mk/Android.bp Writers
 
+## Soong genrules are now sandboxed
+
+Previously, soong genrules could access any files in the source tree, without specifying them as
+inputs. This makes them incorrect in incremental builds, and incompatible with RBE and Bazel.
+
+Now, genrules are sandboxed so they can only access their listed srcs. Modules denylisted in
+genrule/allowlists.go are exempt from this. You can also set `BUILD_BROKEN_GENRULE_SANDBOXING`
+in board config to disable this behavior.
+
 ## Partitions are no longer affected by previous builds
 
 Partition builds used to include everything in their staging directories, and building an
diff --git a/common/math.mk b/common/math.mk
index 0271ea8..ecee474 100644
--- a/common/math.mk
+++ b/common/math.mk
@@ -25,6 +25,7 @@
                       61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 \
                       81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
 __MATH_NUMBERS := 0 $(__MATH_POS_NUMBERS)
+__MATH_ONE_NUMBERS := 0 1 2 3 4 5 6 7 8 9
 
 math-error = $(call pretty-error,$(1))
 math-expect :=
@@ -36,6 +37,10 @@
 #  make -f ${ANDROID_BUILD_TOP}/build/make/common/math.mk RUN_MATH_TESTS=true
 #  $(get_build_var CKATI) -f ${ANDROID_BUILD_TOP}//build/make/common/math.mk RUN_MATH_TESTS=true
 ifdef RUN_MATH_TESTS
+  ifndef empty
+    empty :=
+    space := $(empty) $(empty)
+  endif
   MATH_TEST_FAILURE :=
   MATH_TEST_ERROR :=
   math-error = $(if $(MATH_TEST_ERROR),,$(eval MATH_TEST_ERROR:=$(1)))
@@ -61,13 +66,29 @@
 endif
 
 # Returns true if $(1) is a non-negative integer <= 100, otherwise returns nothing.
-define math_is_number
+define math_is_number_in_100
 $(strip \
   $(if $(1),,$(call math-error,Argument missing)) \
   $(if $(word 2,$(1)),$(call math-error,Multiple words in a single argument: $(1))) \
   $(if $(filter $(1),$(__MATH_NUMBERS)),true))
 endef
 
+# Same with math_is_number_in_100, but no limit.
+define _math_ext_is_number
+$(strip \
+  $(if $(1),,$(call math-error,Argument missing)) \
+  $(if $(word 2,$(1)),$(call math-error,Multiple words in a single argument: $(1))) \
+  $(eval should_empty:=$(1)) \
+  $(foreach num,$(__MATH_ONE_NUMBERS),\
+    $(eval should_empty:=$(subst $(num),$(empty),$(should_empty)))) \
+  $(if $(should_empty),,true))
+endef
+
+# Returns true if $(1) is a non-negative integer.
+define math_is_number
+$(strip $(if $(call math_is_number_in_100,$(1)),true,$(call _math_ext_is_number,$(1))))
+endef
+
 define math_is_zero
 $(strip \
   $(if $(word 2,$(1)),$(call math-error,Multiple words in a single argument: $(1))) \
@@ -76,6 +97,7 @@
 
 $(call math-expect-true,(call math_is_number,0))
 $(call math-expect-true,(call math_is_number,2))
+$(call math-expect-true,(call math_is_number,202412))
 $(call math-expect-false,(call math_is_number,foo))
 $(call math-expect-false,(call math_is_number,-1))
 $(call math-expect-error,(call math_is_number,1 2),Multiple words in a single argument: 1 2)
@@ -88,7 +110,7 @@
 $(call math-expect-error,(call math_is_zero,no 2),Multiple words in a single argument: no 2)
 
 define _math_check_valid
-$(if $(call math_is_number,$(1)),,$(call math-error,Only non-negative integers <= 100 are supported (not $(1))))
+$(if $(call math_is_number_in_100,$(1)),,$(call math-error,Only non-negative integers <= 100 are supported (not $(1))))
 endef
 
 $(call math-expect,(call _math_check_valid,0))
@@ -113,18 +135,81 @@
 $(call math-expect,(call int_range_list,2,1),)
 $(call math-expect-error,(call int_range_list,1,101),Only non-negative integers <= 100 are supported (not 101))
 
+# Split an integer into a list of digits
+define _math_number_to_list
+$(strip \
+  $(if $(call _math_ext_is_number,$(1)),,\
+    $(call math-error,Only non-negative integers are supported (not $(1)))) \
+  $(eval num_list:=$(1)) \
+  $(foreach num,$(__MATH_ONE_NUMBERS),\
+    $(eval num_list:=$(subst $(num),$(space)$(num),$(num_list)))) \
+  $(if $(filter $(words $(num_list)),$(__MATH_ONE_NUMBERS)),,\
+    $(call math-error,Only non-negative integers with less than 9 digits are supported (not $(1)))) \
+  $(if $(filter 0,$(word 1,$(num_list))),\
+    $(call math-error,Only non-negative integers without leading zeros are supported (not $(1)))) \
+  $(num_list))
+endef
+
+$(call math-expect,(call _math_number_to_list,123),1 2 3)
+$(call math-expect-error,(call _math_number_to_list,123 456),Multiple words in a single argument: 123 456)
+$(call math-expect-error,(call _math_number_to_list,-123),Only non-negative integers are supported (not -123))
+$(call math-expect-error,(call _math_number_to_list,002),Only non-negative integers without leading zeros are supported (not 002))
+$(call math-expect-error,(call _math_number_to_list,1234567890),Only non-negative integers with less than 9 digits are supported (not 1234567890))
+
+# Compare 1-digit integer $(1) and $(2).
+# Returns 1 if $(1) > $(2), -1 if $(1) < $(2), nothing if equals.
+define _math_1digit_comp
+$(strip \
+  $(if $(filter $(1),$(2)),,\
+    $(if $(filter $(1),$(firstword $(filter $(1) $(2),$(__MATH_ONE_NUMBERS)))),-1,1)))
+endef
+
+$(call math-expect,(call _math_1digit_comp,1,1))
+$(call math-expect,(call _math_1digit_comp,0,9),-1)
+$(call math-expect,(call _math_1digit_comp,3,1),1)
+
+# Compare the same $(3)-digit-length integers $(1) and $(2) that are split into a list of digits.
+# Returns 1 if $(1) > $(2), -1 if $(1) < $(2), nothing if equals.
+define _math_list_comp
+$(strip \
+  $(eval ans:=) \
+  $(foreach num,$(call int_range_list,1,$(3)),\
+    $(if $(ans),,$(eval ans:=$(call _math_1digit_comp,$(word $(num),$(1)),$(word $(num),$(2)))))) \
+  $(ans))
+endef
+
+# Compare any two non-negative integers $(1) and $(2).
+# Returns 1 if $(1) > $(2), -1 if $(1) < $(2), nothing if equals.
+define _math_ext_comp
+$(strip \
+  $(eval num_list1:=$(call _math_number_to_list,$(1))) \
+  $(eval len1:=$(words $(num_list1))) \
+  $(eval num_list2:=$(call _math_number_to_list,$(2))) \
+  $(eval len2:=$(words $(num_list2))) \
+  $(eval comp:=$(call _math_1digit_comp,$(len1),$(len2))) \
+  $(if $(comp),$(comp),$(call _math_list_comp,$(num_list1),$(num_list2),$(len1))))
+endef
+
+$(call math-expect,(call _math_ext_comp,5,10),-1)
+$(call math-expect,(call _math_ext_comp,12345,12345))
+$(call math-expect,(call _math_ext_comp,500,5),1)
+$(call math-expect,(call _math_ext_comp,202404,202504),-1)
 
 # Returns the greater of $1 or $2.
-# If $1 or $2 is not a positive integer <= 100, then an error is generated.
+# If $1 or $2 is not a positive integer, then an error is generated.
 define math_max
-$(strip $(call _math_check_valid,$(1)) $(call _math_check_valid,$(2)) \
-  $(lastword $(filter $(1) $(2),$(__MATH_NUMBERS))))
+$(strip \
+  $(if $(filter truetrue,$(call math_is_number_in_100,$(1))$(call math_is_number_in_100,$(2))),\
+    $(lastword $(filter $(1) $(2),$(__MATH_NUMBERS))),\
+    $(if $(filter 1,$(call _math_ext_comp,$(1),$(2))),$(1),$(2))))
 endef
 
 # Returns the lesser of $1 or $2.
 define math_min
-$(strip $(call _math_check_valid,$(1)) $(call _math_check_valid,$(2)) \
-  $(firstword $(filter $(1) $(2),$(__MATH_NUMBERS))))
+$(strip \
+  $(if $(filter truetrue,$(call math_is_number_in_100,$(1))$(call math_is_number_in_100,$(2))),\
+    $(firstword $(filter $(1) $(2),$(__MATH_NUMBERS))),\
+    $(if $(filter -1,$(call _math_ext_comp,$(1),$(2))),$(1),$(2))))
 endef
 
 $(call math-expect-error,(call math_max),Argument missing)
@@ -142,6 +227,15 @@
 $(call math-expect,(call math_min,7,32),7)
 $(call math-expect,(call math_min,32,7),7)
 
+$(call math-expect,(call math_max,32759,7),32759)
+$(call math-expect,(call math_max,7,32759),32759)
+$(call math-expect,(call math_max,202404,202505),202505)
+$(call math-expect,(call math_max,202404,202404),202404)
+$(call math-expect,(call math_min,8908527,32),32)
+$(call math-expect,(call math_min,32,8908527),32)
+$(call math-expect,(call math_min,202404,202505),202404)
+$(call math-expect,(call math_min,202404,202404),202404)
+
 define math_gt_or_eq
 $(if $(filter $(1),$(call math_max,$(1),$(2))),true)
 endef
@@ -150,6 +244,10 @@
 $(if $(call math_gt_or_eq,$(2),$(1)),,true)
 endef
 
+define math_lt_or_eq
+$(if $(call math_gt_or_eq,$(2),$(1)),true)
+endef
+
 define math_lt
 $(if $(call math_gt_or_eq,$(1),$(2)),,true)
 endef
@@ -160,10 +258,17 @@
 $(call math-expect-true,(call math_gt, 4, 3))
 $(call math-expect-false,(call math_gt, 5, 5))
 $(call math-expect-false,(call math_gt, 6, 7))
+$(call math-expect-true,(call math_lt_or_eq, 11, 11))
+$(call math-expect-false,(call math_lt_or_eq, 25, 15))
+$(call math-expect-true,(call math_lt_or_eq, 9, 16))
 $(call math-expect-false,(call math_lt, 1, 0))
 $(call math-expect-false,(call math_lt, 8, 8))
 $(call math-expect-true,(call math_lt, 10, 11))
 
+$(call math-expect-true,(call math_gt_or_eq, 2573904, 2573900))
+$(call math-expect-true,(call math_gt_or_eq, 12345, 12345))
+$(call math-expect-false,(call math_gt_or_eq, 56, 2780))
+
 # $1 is the variable name to increment
 define inc_and_print
 $(strip $(eval $(1) := $($(1)) .)$(words $($(1))))
@@ -192,6 +297,7 @@
 $(call math-expect,(call numbers_less_than,3,0 2 1 3),0 2 1)
 $(call math-expect,(call numbers_less_than,4,0 2 1 3),0 2 1 3)
 $(call math-expect,(call numbers_less_than,3,0 2 1 3 2),0 2 1 2)
+$(call math-expect,(call numbers_less_than,100,0 1000 50 101 100),0 50)
 
 # Returns the words in $2 that are numbers and are greater or equal to $1
 define numbers_greater_or_equal_to
diff --git a/core/Makefile b/core/Makefile
index 453a013..79c8a17 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -793,9 +793,25 @@
 .PHONY: apkcerts-list
 apkcerts-list: $(APKCERTS_FILE)
 
+intermediates := $(call intermediates-dir-for,PACKAGING,apexkeys)
+APEX_KEYS_FILE := $(intermediates)/apexkeys.txt
+
+all_apex_keys_files := $(sort $(foreach m,$(call product-installed-modules,$(INTERNAL_PRODUCT)),$(ALL_MODULES.$(m).APEX_KEYS_FILE)))
+$(APEX_KEYS_FILE): $(all_apex_keys_files)
+	@mkdir -p $(dir $@)
+	@rm -f $@
+	$(hide) touch $@
+	$(hide) $(foreach file,$^,cat $(file) >> $@ $(newline))
+all_apex_keys_files :=
+
+$(call declare-0p-target,$(APEX_KEYS_FILE))
+
+.PHONY: apexkeys.txt
+apexkeys.txt: $(APEX_KEYS_FILE)
+
 ifneq (,$(TARGET_BUILD_APPS))
   $(call dist-for-goals, apps_only, $(APKCERTS_FILE):apkcerts.txt)
-  $(call dist-for-goals, apps_only, $(SOONG_APEX_KEYS_FILE):apexkeys.txt)
+  $(call dist-for-goals, apps_only, $(APEX_KEYS_FILE):apexkeys.txt)
 endif
 
 
@@ -871,6 +887,9 @@
 $(call declare-1p-target,$(MK2BP_REMAINING_CSV))
 $(call dist-for-goals,droidcore-unbundled,$(MK2BP_REMAINING_CSV))
 
+.PHONY: mk2bp_remaining
+mk2bp_remaining: $(MK2BP_REMAINING_HTML) $(MK2BP_REMAINING_CSV)
+
 # -----------------------------------------------------------------
 # Modules use -Wno-error, or added default -Wall -Werror
 WALL_WERROR := $(PRODUCT_OUT)/wall_werror.txt
@@ -1060,9 +1079,28 @@
 BUILT_RAMDISK_16K_TARGET := $(PRODUCT_OUT)/ramdisk_16k.img
 RAMDISK_16K_STAGING_DIR := $(call intermediates-dir-for,PACKAGING,depmod_ramdisk_16k)
 
+ifneq ($(BOARD_SYSTEM_KERNEL_MODULES),)
+SYSTEM_DLKM_MODULE_PATTERNS := $(foreach path,$(BOARD_SYSTEM_KERNEL_MODULES),%/$(notdir $(path)))
+
+endif
+
+# BOARD_KERNEL_MODULES_16K might contain duplicate modules under different path.
+# for example, foo/bar/wifi.ko and foo/wifi.ko . To avoid build issues, de-dup
+# module list on basename first.
+BOARD_KERNEL_MODULES_16K := $(foreach \
+  pattern,\
+  $(sort $(foreach \
+    path,\
+    $(BOARD_KERNEL_MODULES_16K),\
+    %/$(notdir $(path)))\
+  ),\
+  $(firstword $(filter $(pattern),$(BOARD_KERNEL_MODULES_16K))) \
+)
+# For non-GKI modules, strip them before install. As debug symbols take up
+# significant space.
 $(foreach \
   file,\
-  $(BOARD_KERNEL_MODULES_16K),\
+  $(filter-out $(SYSTEM_DLKM_MODULE_PATTERNS),$(BOARD_KERNEL_MODULES_16K)),\
   $(eval \
     $(call copy-and-strip-kernel-module,\
       $(file),\
@@ -1071,6 +1109,20 @@
   ) \
 )
 
+# For GKI modules, copy as-is without stripping, because stripping would
+# remove the signature of kernel modules, and GKI modules must be signed
+# for kernel to load them.
+$(foreach \
+  file,\
+  $(filter $(SYSTEM_DLKM_MODULE_PATTERNS),$(BOARD_KERNEL_MODULES_16K)),\
+  $(eval \
+    $(call copy-one-file,\
+      $(file),\
+      $(RAMDISK_16K_STAGING_DIR)/lib/modules/0.0/$(notdir $(file)) \
+    ) \
+  ) \
+)
+
 BOARD_VENDOR_RAMDISK_FRAGMENT.16K.PREBUILT := $(BUILT_RAMDISK_16K_TARGET)
 
 $(BUILT_RAMDISK_16K_TARGET): $(DEPMOD) $(MKBOOTFS) $(EXTRACT_KERNEL) $(COMPRESSION_COMMAND_DEPS)
@@ -1423,15 +1475,19 @@
 INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
 
 ifeq ($(BOARD_AVB_ENABLE),true)
-$(INSTALLED_BOOTIMAGE_TARGET): $(INTERNAL_PREBUILT_BOOTIMAGE) $(AVBTOOL) $(BOARD_AVB_BOOT_KEY_PATH)
+$(INSTALLED_BOOTIMAGE_TARGET): PRIVATE_WORKING_DIR := $(call intermediates-dir-for,PACKAGING,prebuilt_bootimg)
+$(INSTALLED_BOOTIMAGE_TARGET): $(INTERNAL_PREBUILT_BOOTIMAGE) $(AVBTOOL) $(BOARD_AVB_BOOT_KEY_PATH) $(UNPACK_BOOTIMG)
 	cp $(INTERNAL_PREBUILT_BOOTIMAGE) $@
+	$(UNPACK_BOOTIMG) --boot_img $(INTERNAL_PREBUILT_BOOTIMAGE) --out $(PRIVATE_WORKING_DIR)
 	chmod +w $@
 	$(AVBTOOL) add_hash_footer \
 	    --image $@ \
+	    --salt `sha256sum $(PRIVATE_WORKING_DIR)/kernel | cut -d " " -f 1` \
 	    $(call get-partition-size-argument,$(BOARD_BOOTIMAGE_PARTITION_SIZE)) \
 	    --partition_name boot $(INTERNAL_AVB_BOOT_SIGNING_ARGS) \
 	    $(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
 
+
 $(call declare-container-license-metadata,$(INSTALLED_BOOTIMAGE_TARGET),SPDX-license-identifier-GPL-2.0-only SPDX-license-identifier-Apache-2.0,restricted notice,$(BUILD_SYSTEM)/LINUX_KERNEL_COPYING build/soong/licenses/LICENSE,"Boot Image",bool)
 $(call declare-container-license-deps,$(INSTALLED_BOOTIMAGE_TARGET),$(INTERNAL_PREBUILT_BOOTIMAGE),$(PRODUCT_OUT)/:/)
 
@@ -2110,6 +2166,7 @@
 $(if $(BOARD_$(_var)IMAGE_EROFS_COMPRESSOR),$(hide) echo "$(1)_erofs_compressor=$(BOARD_$(_var)IMAGE_EROFS_COMPRESSOR)" >> $(2))
 $(if $(BOARD_$(_var)IMAGE_EROFS_COMPRESS_HINTS),$(hide) echo "$(1)_erofs_compress_hints=$(BOARD_$(_var)IMAGE_EROFS_COMPRESS_HINTS)" >> $(2))
 $(if $(BOARD_$(_var)IMAGE_EROFS_PCLUSTER_SIZE),$(hide) echo "$(1)_erofs_pcluster_size=$(BOARD_$(_var)IMAGE_EROFS_PCLUSTER_SIZE)" >> $(2))
+$(if $(BOARD_$(_var)IMAGE_EROFS_BLOCKSIZE),$(hide) echo "$(1)_erofs_blocksize=$(BOARD_$(_var)IMAGE_EROFS_BLOCKSIZE)" >> $(2))
 $(if $(BOARD_$(_var)IMAGE_EXTFS_INODE_COUNT),$(hide) echo "$(1)_extfs_inode_count=$(BOARD_$(_var)IMAGE_EXTFS_INODE_COUNT)" >> $(2))
 $(if $(BOARD_$(_var)IMAGE_EXTFS_RSV_PCT),$(hide) echo "$(1)_extfs_rsv_pct=$(BOARD_$(_var)IMAGE_EXTFS_RSV_PCT)" >> $(2))
 $(if $(BOARD_$(_var)IMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "$(1)_f2fs_sldc_flags=$(BOARD_$(_var)IMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(2))
@@ -2199,6 +2256,7 @@
 $(if $(BOARD_EROFS_COMPRESSOR),$(hide) echo "erofs_default_compressor=$(BOARD_EROFS_COMPRESSOR)" >> $(1))
 $(if $(BOARD_EROFS_COMPRESS_HINTS),$(hide) echo "erofs_default_compress_hints=$(BOARD_EROFS_COMPRESS_HINTS)" >> $(1))
 $(if $(BOARD_EROFS_PCLUSTER_SIZE),$(hide) echo "erofs_pcluster_size=$(BOARD_EROFS_PCLUSTER_SIZE)" >> $(1))
+$(if $(BOARD_EROFS_BLOCKSIZE),$(hide) echo "erofs_blocksize=$(BOARD_EROFS_BLOCKSIZE)" >> $(1))
 $(if $(BOARD_EROFS_SHARE_DUP_BLOCKS),$(hide) echo "erofs_share_dup_blocks=$(BOARD_EROFS_SHARE_DUP_BLOCKS)" >> $(1))
 $(if $(BOARD_EROFS_USE_LEGACY_COMPRESSION),$(hide) echo "erofs_use_legacy_compression=$(BOARD_EROFS_USE_LEGACY_COMPRESSION)" >> $(1))
 $(if $(BOARD_EXT4_SHARE_DUP_BLOCKS),$(hide) echo "ext4_share_dup_blocks=$(BOARD_EXT4_SHARE_DUP_BLOCKS)" >> $(1))
@@ -5049,8 +5107,11 @@
 APEX_INFO_FILE := $(APEX_OUT)/apex-info-list.xml
 
 # apexd_host scans/activates APEX files and writes /apex/apex-info-list.xml
+# Note that `@echo $(PRIVATE_APEX_FILES)` line is added to trigger the rule when the APEX list is changed.
+$(APEX_INFO_FILE): PRIVATE_APEX_FILES := $(apex_files)
 $(APEX_INFO_FILE): $(HOST_OUT_EXECUTABLES)/apexd_host $(apex_files)
 	@echo "Extracting apexes..."
+	@echo $(PRIVATE_APEX_FILES) > /dev/null
 	@rm -rf $(APEX_OUT)
 	@mkdir -p $(APEX_OUT)
 	$< --vendor_path $(TARGET_OUT_VENDOR) \
@@ -6294,7 +6355,7 @@
 	    $(INSTALLED_MISC_INFO_TARGET) \
 	    $(INSTALLED_FASTBOOT_INFO_TARGET) \
 	    $(APKCERTS_FILE) \
-	    $(SOONG_APEX_KEYS_FILE) \
+	    $(APEX_KEYS_FILE) \
 	    $(SOONG_ZIP) \
 	    $(HOST_OUT_EXECUTABLES)/fs_config \
 	    $(HOST_OUT_EXECUTABLES)/map_file_generator \
@@ -6505,7 +6566,7 @@
 	@# build them.
 	$(hide) mkdir -p $(zip_root)/META
 	$(hide) cp $(APKCERTS_FILE) $(zip_root)/META/apkcerts.txt
-	$(hide) cp $(SOONG_APEX_KEYS_FILE) $(zip_root)/META/apexkeys.txt
+	$(hide) cp $(APEX_KEYS_FILE) $(zip_root)/META/apexkeys.txt
 ifneq ($(tool_extension),)
 	$(hide) cp $(PRIVATE_TOOL_EXTENSION) $(zip_root)/META/
 endif
@@ -6768,7 +6829,7 @@
             $(INSTALLED_MISC_INFO_TARGET) \
             $(INSTALLED_FASTBOOT_INFO_TARGET) \
             $(APKCERTS_FILE) \
-            $(SOONG_APEX_KEYS_FILE) \
+            $(APEX_KEYS_FILE) \
             $(HOST_OUT_EXECUTABLES)/fs_config \
             $(HOST_OUT_EXECUTABLES)/map_file_generator \
             $(ADD_IMG_TO_TARGET_FILES) \
@@ -7650,7 +7711,7 @@
 
 # -----------------------------------------------------------------
 # Extract platform fonts used in Layoutlib
-include $(BUILD_SYSTEM)/layoutlib_fonts.mk
+include $(BUILD_SYSTEM)/layoutlib_data.mk
 
 
 # -----------------------------------------------------------------
diff --git a/core/all_versions.bzl b/core/all_versions.bzl
deleted file mode 100644
index 33da673..0000000
--- a/core/all_versions.bzl
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (C) 2023 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-_all_versions = ["OPR1", "OPD1", "OPD2", "OPM1", "OPM2", "PPR1", "PPD1", "PPD2", "PPM1", "PPM2", "QPR1"] + [
-    version + subversion
-    for version in ["Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
-    for subversion in ["P1A", "P1B", "P2A", "P2B", "D1A", "D1B", "D2A", "D2B", "Q1A", "Q1B", "Q2A", "Q2B", "Q3A", "Q3B"]
-]
-
-variables_to_export_to_make = {
-    "ALL_VERSIONS": _all_versions,
-}
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 3313b5f..0eb205a 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -1180,6 +1180,7 @@
 ALL_MODULES.$(my_register_name).TEST_MAINLINE_MODULES := $(LOCAL_TEST_MAINLINE_MODULES)
 ifndef LOCAL_IS_HOST_MODULE
 ALL_MODULES.$(my_register_name).FILE_CONTEXTS := $(LOCAL_FILE_CONTEXTS)
+ALL_MODULES.$(my_register_name).APEX_KEYS_FILE := $(LOCAL_APEX_KEY_PATH)
 endif
 ifdef LOCAL_IS_UNIT_TEST
 ALL_MODULES.$(my_register_name).IS_UNIT_TEST := $(LOCAL_IS_UNIT_TEST)
diff --git a/core/board_config.mk b/core/board_config.mk
index eb4c5ec..b7ca3a4 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -188,6 +188,7 @@
   BUILD_BROKEN_VENDOR_PROPERTY_NAMESPACE \
   BUILD_BROKEN_VINTF_PRODUCT_COPY_FILES \
   BUILD_BROKEN_INCORRECT_PARTITION_IMAGES \
+  BUILD_BROKEN_GENRULE_SANDBOXING \
 
 _build_broken_var_list += \
   $(foreach m,$(AVAILABLE_BUILD_MODULE_TYPES) \
@@ -223,6 +224,8 @@
   board_config_mk := \
     $(strip $(sort $(wildcard \
       $(SRC_TARGET_DIR)/board/$(TARGET_DEVICE)/BoardConfig.mk \
+      device/generic/goldfish/board/$(TARGET_DEVICE)/BoardConfig.mk \
+      device/google/cuttlefish/board/$(TARGET_DEVICE)/BoardConfig.mk \
       $(shell test -d device && find -L device -maxdepth 4 -path '*/$(TARGET_DEVICE)/BoardConfig.mk') \
       $(shell test -d vendor && find -L vendor -maxdepth 4 -path '*/$(TARGET_DEVICE)/BoardConfig.mk') \
     )))
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 409e559..d76c4f2 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -19,6 +19,7 @@
 LOCAL_ALLOW_UNDEFINED_SYMBOLS:=
 LOCAL_ANNOTATION_PROCESSORS:=
 LOCAL_ANNOTATION_PROCESSOR_CLASSES:=
+LOCAL_APEX_KEY_PATH:=
 LOCAL_APIDIFF_NEWAPI:=
 LOCAL_APIDIFF_OLDAPI:=
 LOCAL_APK_LIBRARIES:=
diff --git a/core/config.mk b/core/config.mk
index 196f07c..c747fd5 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -722,6 +722,7 @@
 BUILD_SUPER_IMAGE := $(BOARD_CUSTOM_BUILD_SUPER_IMAGE)
 endif
 IMG_FROM_TARGET_FILES := $(HOST_OUT_EXECUTABLES)/img_from_target_files$(HOST_EXECUTABLE_SUFFIX)
+UNPACK_BOOTIMG := $(HOST_OUT_EXECUTABLES)/unpack_bootimg
 MAKE_RECOVERY_PATCH := $(HOST_OUT_EXECUTABLES)/make_recovery_patch$(HOST_EXECUTABLE_SUFFIX)
 OTA_FROM_TARGET_FILES := $(HOST_OUT_EXECUTABLES)/ota_from_target_files$(HOST_EXECUTABLE_SUFFIX)
 OTA_FROM_RAW_IMG := $(HOST_OUT_EXECUTABLES)/ota_from_raw_img$(HOST_EXECUTABLE_SUFFIX)
diff --git a/core/definitions.mk b/core/definitions.mk
index b6b0d69..ebc6c6e 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -2955,7 +2955,7 @@
 ifeq ($(HOST_OS),linux)
 # Runs appcompat and store logs in $(PRODUCT_OUT)/appcompat
 define extract-package
-$(AAPT2) dump resources $@ | awk -F ' |=' '/^Package/{print $$3}' >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log &&
+$(AAPT2) dump resources $@ | awk -F ' |=' '/^Package/{print $$3; exit}' >> $(PRODUCT_OUT)/appcompat/$(PRIVATE_MODULE).log &&
 endef
 define appcompat-header
 $(hide) \
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 091c2e3..7ddbf32 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -51,44 +51,15 @@
 include $(BUILD_SYSTEM)/release_config.mk
 
 # ---------------------------------------------------------------
-# defines ALL_VERSIONS
-$(call run-starlark,build/make/core/all_versions.bzl)
+# Set up version information
+include $(BUILD_SYSTEM)/version_util.mk
 
-# Filters ALL_VERSIONS down to the range [$1, $2], and errors if $1 > $2 or $3 is
-# not in [$1, $2]
-# $(1): min platform version
-# $(2): max platform version
-# $(3): default platform version
-define allowed-platform-versions
-$(strip \
-  $(if $(filter $(ALL_VERSIONS),$(1)),,
-    $(error Invalid MIN_PLATFORM_VERSION '$(1)'))
-  $(if $(filter $(ALL_VERSIONS),$(2)),,
-    $(error Invalid MAX_PLATFORM_VERSION '$(2)'))
-  $(if $(filter $(ALL_VERSIONS),$(3)),,
-    $(error Invalid RELEASE_PLATFORM_VERSION '$(3)'))
+# This used to be calculated, but is now fixed and not expected
+# to change over time anymore. New code attempting to use a
+# variable like IS_AT_LAST_* should instead use a
+# build system flag.
 
-  $(eval allowed_versions_ := $(call find_and_earlier,$(ALL_VERSIONS),$(2)))
-
-  $(if $(filter $(allowed_versions_),$(1)),,
-    $(error MIN_PLATFORM_VERSION '$(1)' must be before MAX_PLATFORM_VERSION '$(2)'))
-
-  $(eval allowed_versions_ := $(1) \
-    $(filter-out $(call find_and_earlier,$(allowed_versions_),$(1)),$(allowed_versions_)))
-
-  $(if $(filter $(allowed_versions_),$(3)),,
-    $(error RELEASE_PLATFORM_VERSION '$(3)' must be between MIN_PLATFORM_VERSION '$(1)' and MAX_PLATFORM_VERSION '$(2)'))
-
-  $(allowed_versions_))
-endef
-
-#$(warning $(call allowed-platform-versions,OPR1,PPR1,OPR1))
-#$(warning $(call allowed-platform-versions,OPM1,PPR1,OPR1))
-
-# Set up version information.
-include $(BUILD_SYSTEM)/version_defaults.mk
-
-ENABLED_VERSIONS := $(call find_and_earlier,$(ALL_VERSIONS),$(TARGET_PLATFORM_VERSION))
+ENABLED_VERSIONS := "OPR1 OPD1 OPD2 OPM1 OPM2 PPR1 PPD1 PPD2 PPM1 PPM2 QPR1 QP1A QP1B QP2A QP2B QD1A QD1B QD2A QD2B QQ1A QQ1B QQ2A QQ2B QQ3A QQ3B RP1A RP1B RP2A RP2B RD1A RD1B RD2A RD2B RQ1A RQ1B RQ2A RQ2B RQ3A RQ3B SP1A SP1B SP2A SP2B SD1A SD1B SD2A SD2B SQ1A SQ1B SQ2A SQ2B SQ3A SQ3B TP1A TP1B TP2A TP2B TD1A TD1B TD2A TD2B TQ1A TQ1B TQ2A TQ2B TQ3A TQ3B UP1A UP1B UP2A UP2B UD1A UD1B UD2A UD2B UQ1A UQ1B UQ2A UQ2B UQ3A UQ3B"
 
 $(foreach v,$(ENABLED_VERSIONS), \
   $(eval IS_AT_LEAST_$(v) := true))
diff --git a/core/instrumentation_test_config_template.xml b/core/instrumentation_test_config_template.xml
index 379126c..9dfc001 100644
--- a/core/instrumentation_test_config_template.xml
+++ b/core/instrumentation_test_config_template.xml
@@ -24,7 +24,7 @@
     </target_preparer>
 
     <test class="com.android.tradefed.testtype.{TEST_TYPE}" >
-        {EXTRA_TEST_RUNNER_CONFIGS}<option name="package" value="{PACKAGE}" />
+        <option name="package" value="{PACKAGE}" />
         <option name="runner" value="{RUNNER}" />
     </test>
 </configuration>
diff --git a/core/layoutlib_data.mk b/core/layoutlib_data.mk
new file mode 100644
index 0000000..7321c09
--- /dev/null
+++ b/core/layoutlib_data.mk
@@ -0,0 +1,133 @@
+# Data files for layoutlib
+
+FONT_TEMP := $(call intermediates-dir-for,PACKAGING,fonts,HOST,COMMON)
+
+# The font configuration files - system_fonts.xml, fallback_fonts.xml etc.
+font_config := $(sort $(wildcard frameworks/base/data/fonts/*.xml))
+font_config := $(addprefix $(FONT_TEMP)/, $(notdir $(font_config)))
+
+$(font_config): $(FONT_TEMP)/%.xml: \
+			frameworks/base/data/fonts/%.xml
+	$(hide) mkdir -p $(dir $@)
+	$(hide) cp -vf $< $@
+
+# List of fonts on the device that we want to ship. This is all .ttf, .ttc and .otf fonts.
+fonts_device := $(filter $(TARGET_OUT)/fonts/%.ttf $(TARGET_OUT)/fonts/%.ttc $(TARGET_OUT)/fonts/%.otf, $(INTERNAL_SYSTEMIMAGE_FILES))
+fonts_device := $(addprefix $(FONT_TEMP)/, $(notdir $(fonts_device)))
+
+# TODO: If the font file is a symlink, reuse the font renamed from the symlink
+# target.
+$(fonts_device): $(FONT_TEMP)/%: $(TARGET_OUT)/fonts/%
+	$(hide) mkdir -p $(dir $@)
+	$(hide) cp -vf $< $@
+
+KEYBOARD_TEMP := $(call intermediates-dir-for,PACKAGING,keyboards,HOST,COMMON)
+
+# The key character map files needed for supporting KeyEvent
+keyboards := $(sort $(wildcard frameworks/base/data/keyboards/*.kcm))
+keyboards := $(addprefix $(KEYBOARD_TEMP)/, $(notdir $(keyboards)))
+
+$(keyboards): $(KEYBOARD_TEMP)/%.kcm: frameworks/base/data/keyboards/%.kcm
+	$(hide) mkdir -p $(dir $@)
+	$(hide) cp -vf $< $@
+
+# List of all data files - font files, font configuration files, key character map files
+LAYOUTLIB_FILES := $(fonts_device) $(font_config) $(keyboards)
+
+.PHONY: layoutlib layoutlib-tests
+layoutlib layoutlib-tests: $(LAYOUTLIB_FILES)
+
+$(call dist-for-goals, layoutlib, $(foreach m,$(fonts_device), $(m):layoutlib_native/fonts/$(notdir $(m))))
+$(call dist-for-goals, layoutlib, $(foreach m,$(font_config), $(m):layoutlib_native/fonts/$(notdir $(m))))
+$(call dist-for-goals, layoutlib, $(foreach m,$(keyboards), $(m):layoutlib_native/keyboards/$(notdir $(m))))
+
+FONT_TEMP :=
+font_config :=
+fonts_device :=
+FONT_FILES :=
+
+# The following build process of build.prop, layoutlib-res.zip is moved here from release_layoutlib.sh
+# so the SBOM of all platform neutral artifacts and Linux/Windows artifacts of layoutlib can be built in Make/Soong.
+# See go/layoutlib-sbom.
+
+# build.prop shipped with layoutlib
+LAYOUTLIB_BUILD_PROP := $(call intermediates-dir-for,PACKAGING,layoutlib-build-prop,HOST,COMMON)
+$(LAYOUTLIB_BUILD_PROP)/layoutlib-build.prop: $(INSTALLED_SDK_BUILD_PROP_TARGET)
+	rm -rf $@
+	cp $< $@
+	# Remove all the uncommon build properties
+	sed -i '/^ro\.\(build\|product\|config\|system\)/!d' $@
+	# Mark the build as layoutlib. This can be read at runtime by apps
+	sed -i 's|ro.product.brand=generic|ro.product.brand=studio|' $@
+	sed -i 's|ro.product.device=generic|ro.product.device=layoutlib|' $@
+
+$(call dist-for-goals,layoutlib,$(LAYOUTLIB_BUILD_PROP)/layoutlib-build.prop:layoutlib_native/build.prop)
+
+# Resource files from frameworks/base/core/res/res
+LAYOUTLIB_RES := $(call intermediates-dir-for,PACKAGING,layoutlib-res,HOST,COMMON)
+LAYOUTLIB_RES_FILES := $(shell find frameworks/base/core/res/res -type f -not -path 'frameworks/base/core/res/res/values-m[nc]c*' | sort)
+$(LAYOUTLIB_RES)/layoutlib-res.zip: $(SOONG_ZIP) $(HOST_OUT_EXECUTABLES)/aapt2 $(LAYOUTLIB_RES_FILES)
+	rm -rf $@
+	echo $(LAYOUTLIB_RES_FILES) > $(LAYOUTLIB_RES)/filelist.txt
+	$(SOONG_ZIP) -C frameworks/base/core/res -l $(LAYOUTLIB_RES)/filelist.txt -o $(LAYOUTLIB_RES)/temp.zip
+	rm -rf $(LAYOUTLIB_RES)/data && unzip -q -d $(LAYOUTLIB_RES)/data $(LAYOUTLIB_RES)/temp.zip
+	rm -rf $(LAYOUTLIB_RES)/compiled && mkdir $(LAYOUTLIB_RES)/compiled && $(HOST_OUT_EXECUTABLES)/aapt2 compile $(LAYOUTLIB_RES)/data/res/**/*.9.png -o $(LAYOUTLIB_RES)/compiled
+	printf '<?xml version="1.0" encoding="utf-8"?>\n<manifest xmlns:android="http://schemas.android.com/apk/res/android" package="com.google.android.layoutlib" />' > $(LAYOUTLIB_RES)/AndroidManifest.xml
+	$(HOST_OUT_EXECUTABLES)/aapt2 link -R $(LAYOUTLIB_RES)/compiled/* -o $(LAYOUTLIB_RES)/compiled.apk --manifest $(LAYOUTLIB_RES)/AndroidManifest.xml
+	rm -rf $(LAYOUTLIB_RES)/compiled_apk && unzip -q -d $(LAYOUTLIB_RES)/compiled_apk $(LAYOUTLIB_RES)/compiled.apk
+	for f in $(LAYOUTLIB_RES)/compiled_apk/res/*; do mv "$$f" "$${f/-v4/}";done
+	for f in $(LAYOUTLIB_RES)/compiled_apk/res/**/*.9.png; do mv "$$f" "$${f/.9.png/.compiled.9.png}";done
+	cp -r $(LAYOUTLIB_RES)/compiled_apk/res $(LAYOUTLIB_RES)/data
+	$(SOONG_ZIP) -C $(LAYOUTLIB_RES)/data -D $(LAYOUTLIB_RES)/data/res -o $@
+
+$(call dist-for-goals,layoutlib,$(LAYOUTLIB_RES)/layoutlib-res.zip:layoutlib_native/res.zip)
+
+# SBOM of layoutlib artifacts
+LAYOUTLIB_SBOM := $(call intermediates-dir-for,PACKAGING,layoutlib-sbom,HOST)
+_layoutlib_font_config_files := $(sort $(wildcard frameworks/base/data/fonts/*.xml))
+_layoutlib_fonts_files := $(filter $(TARGET_OUT)/fonts/%.ttf $(TARGET_OUT)/fonts/%.ttc $(TARGET_OUT)/fonts/%.otf, $(INTERNAL_SYSTEMIMAGE_FILES))
+_layoutlib_keyboard_files := $(sort $(wildcard frameworks/base/data/keyboards/*.kcm))
+$(LAYOUTLIB_SBOM)/sbom-metadata.csv:
+	rm -rf $@
+	echo installed_file,module_path,soong_module_type,is_prebuilt_make_module,product_copy_files,kernel_module_copy_files,is_platform_generated,build_output_path,static_libraries,whole_static_libraries,is_static_lib >> $@
+	echo build.prop,,,,,,Y,$(LAYOUTLIB_BUILD_PROP)/layoutlib-build.prop,,, >> $@
+
+	$(foreach f,$(_layoutlib_font_config_files),\
+	  echo data/fonts/$(notdir $f),frameworks/base/data/fonts,prebuilt_etc,,,,,$f,,, >> $@; \
+	)
+
+	$(foreach f,$(_layoutlib_fonts_files), \
+	  $(eval _module_name := $(ALL_INSTALLED_FILES.$f)) \
+	  $(eval _module_path := $(strip $(sort $(ALL_MODULES.$(_module_name).PATH)))) \
+	  $(eval _soong_module_type := $(strip $(sort $(ALL_MODULES.$(_module_name).SOONG_MODULE_TYPE)))) \
+	  echo data/fonts/$(notdir $f),$(_module_path),$(_soong_module_type),,,,,$f,,, >> $@; \
+	)
+
+	$(foreach f,$(_layoutlib_keyboard_files), \
+	  echo data/keyboards/$(notdir $f),frameworks/base/data/keyboards,prebuilt_etc,,,,,$f,,, >> $@; \
+	)
+
+	$(foreach f,$(LAYOUTLIB_RES_FILES), \
+	  $(eval _path := $(subst frameworks/base/core/res,data,$f)) \
+	  echo $(_path),,,,,,Y,$f,,, >> $@; \
+	)
+
+.PHONY: layoutlib-sbom
+layoutlib-sbom: $(LAYOUTLIB_SBOM)/layoutlib.spdx.json
+$(LAYOUTLIB_SBOM)/layoutlib.spdx.json: $(PRODUCT_OUT)/always_dirty_file.txt $(GEN_SBOM) $(LAYOUTLIB_SBOM)/sbom-metadata.csv $(_layoutlib_font_config_files) $(_layoutlib_fonts_files) $(LAYOUTLIB_BUILD_PROP)/layoutlib-build.prop $(_layoutlib_keyboard_files) $(LAYOUTLIB_RES_FILES)
+	rm -rf $@
+	$(GEN_SBOM) --output_file $@ --metadata $(LAYOUTLIB_SBOM)/sbom-metadata.csv --build_version $(BUILD_FINGERPRINT_FROM_FILE) --product_mfr "$(PRODUCT_MANUFACTURER)" --module_name "layoutlib" --json
+
+$(call dist-for-goals,layoutlib,$(LAYOUTLIB_SBOM)/layoutlib.spdx.json:layoutlib_native/sbom/layoutlib.spdx.json)
+
+# Generate SBOM of framework_res.jar that is created in release_layoutlib.sh.
+# The generated SBOM contains placeholders for release_layotlib.sh to substitute, and the placeholders include:
+# document name, document namespace, document creation info, organization and SHA1 value of framework_res.jar.
+GEN_SBOM_FRAMEWORK_RES := $(HOST_OUT_EXECUTABLES)/generate-sbom-framework_res
+.PHONY: layoutlib-framework_res-sbom
+layoutlib-framework_res-sbom: $(LAYOUTLIB_SBOM)/framework_res.jar.spdx.json
+$(LAYOUTLIB_SBOM)/framework_res.jar.spdx.json: $(LAYOUTLIB_SBOM)/layoutlib.spdx.json $(GEN_SBOM_FRAMEWORK_RES)
+	rm -rf $@
+	$(GEN_SBOM_FRAMEWORK_RES) --output_file $(LAYOUTLIB_SBOM)/framework_res.jar.spdx.json --layoutlib_sbom $(LAYOUTLIB_SBOM)/layoutlib.spdx.json
+
+$(call dist-for-goals,layoutlib,$(LAYOUTLIB_SBOM)/framework_res.jar.spdx.json:layoutlib_native/sbom/framework_res.jar.spdx.json)
\ No newline at end of file
diff --git a/core/layoutlib_fonts.mk b/core/layoutlib_fonts.mk
deleted file mode 100644
index d2a814f..0000000
--- a/core/layoutlib_fonts.mk
+++ /dev/null
@@ -1,35 +0,0 @@
-# Fonts for layoutlib
-
-FONT_TEMP := $(call intermediates-dir-for,PACKAGING,fonts,HOST,COMMON)
-
-# The font configuration files - system_fonts.xml, fallback_fonts.xml etc.
-font_config := $(sort $(wildcard frameworks/base/data/fonts/*.xml))
-font_config := $(addprefix $(FONT_TEMP)/, $(notdir $(font_config)))
-
-$(font_config): $(FONT_TEMP)/%.xml: \
-			frameworks/base/data/fonts/%.xml
-	$(hide) mkdir -p $(dir $@)
-	$(hide) cp -vf $< $@
-
-# List of fonts on the device that we want to ship. This is all .ttf, .ttc and .otf fonts.
-fonts_device := $(filter $(TARGET_OUT)/fonts/%.ttf $(TARGET_OUT)/fonts/%.ttc $(TARGET_OUT)/fonts/%.otf, $(INTERNAL_SYSTEMIMAGE_FILES))
-fonts_device := $(addprefix $(FONT_TEMP)/, $(notdir $(fonts_device)))
-
-# TODO: If the font file is a symlink, reuse the font renamed from the symlink
-# target.
-$(fonts_device): $(FONT_TEMP)/%: $(TARGET_OUT)/fonts/%
-	$(hide) mkdir -p $(dir $@)
-	$(hide) cp -vf $< $@
-
-# List of all dependencies - all fonts and configuration files.
-FONT_FILES := $(fonts_device) $(font_config)
-
-.PHONY: layoutlib layoutlib-tests
-layoutlib layoutlib-tests: $(FONT_FILES)
-
-$(call dist-for-goals, layoutlib, $(foreach m,$(FONT_FILES), $(m):layoutlib_native/fonts/$(notdir $(m))))
-
-FONT_TEMP :=
-font_config :=
-fonts_device :=
-FONT_FILES :=
diff --git a/core/main.mk b/core/main.mk
index 7c25862..cd6d4b7 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -335,19 +335,6 @@
     ro.build.ab_update=$(AB_OTA_UPDATER)
 endif
 
-# Set ro.product.vndk.version to know the VNDK version required by product
-# modules. It uses the version in PRODUCT_PRODUCT_VNDK_VERSION. If the value
-# is "current", use PLATFORM_VNDK_VERSION.
-ifdef PRODUCT_PRODUCT_VNDK_VERSION
-ifeq ($(KEEP_VNDK),true)
-ifeq ($(PRODUCT_PRODUCT_VNDK_VERSION),current)
-ADDITIONAL_PRODUCT_PROPERTIES += ro.product.vndk.version=$(PLATFORM_VNDK_VERSION)
-else
-ADDITIONAL_PRODUCT_PROPERTIES += ro.product.vndk.version=$(PRODUCT_PRODUCT_VNDK_VERSION)
-endif
-endif
-endif
-
 ADDITIONAL_PRODUCT_PROPERTIES += ro.build.characteristics=$(TARGET_AAPT_CHARACTERISTICS)
 
 ifeq ($(AB_OTA_UPDATER),true)
@@ -563,6 +550,7 @@
 # sources or dependencies for these tools may be missing from the tree.
 ifeq (,$(TARGET_BUILD_UNBUNDLED_IMAGE))
 droid_targets : blueprint_tools
+checkbuild: blueprint_tests
 endif
 
 endif # dont_bother
diff --git a/core/product.mk b/core/product.mk
index ca65948..39c9eb7 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -447,6 +447,8 @@
 
 _product_single_value_vars += PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API
 
+_product_list_vars += PRODUCT_RELEASE_CONFIG_MAPS
+
 .KATI_READONLY := _product_single_value_vars _product_list_vars
 _product_var_list :=$= $(_product_single_value_vars) $(_product_list_vars)
 
diff --git a/core/release_config.bzl b/core/release_config.bzl
index a2f59e6..a29f3f2 100644
--- a/core/release_config.bzl
+++ b/core/release_config.bzl
@@ -11,6 +11,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+"""
+Export build flags (with values) to make.
+"""
 
 load("//build/bazel/utils:schema_validation.bzl", "validate")
 
@@ -52,6 +55,11 @@
             },
             "declared_in": {"type": "string"},
         },
+        "optional_keys": {
+            "appends": {
+                "type": "bool",
+            },
+        },
     },
 }
 
@@ -72,8 +80,23 @@
     },
 }
 
-def flag(name, partitions, default):
-    "Declare a flag."
+def flag(name, partitions, default, _kwmarker = (), appends = False):
+    """Declare a flag.
+
+    Args:
+      name: name of the flag
+      partitions: the partitions where this should be recorded.
+      default: the default value of the flag.
+      _kwmarker: Used to detect argument misuse.
+      appends: Whether new values should be append (not replace) the old.
+
+    Returns:
+      A dictionary containing the flag declaration.
+    """
+
+    # If specified, appends must be a keyword value.
+    if _kwmarker != ():
+        fail("Too many positional parameters")
     if not partitions:
         fail("At least 1 partition is required")
     if not name.startswith("RELEASE_"):
@@ -93,17 +116,33 @@
         "name": name,
         "partitions": partitions,
         "default": default,
+        "appends": appends,
     }
 
 def value(name, value):
-    "Define the flag value for a particular configuration."
+    """Define the flag value for a particular configuration.
+
+    Args:
+      name: The name of the flag.
+      value: The value for the flag.
+
+    Returns:
+      A dictionary containing the name and value to be used.
+    """
     return {
         "name": name,
         "value": value,
     }
 
 def _format_value(val):
-    "Format the starlark type correctly for make"
+    """Format the starlark type correctly for make.
+
+    Args:
+      val: The value to format
+
+    Returns:
+      The value, formatted correctly for make.
+    """
     if type(val) == "NoneType":
         return ""
     elif type(val) == "bool":
@@ -112,16 +151,26 @@
         return val
 
 def release_config(all_flags, all_values):
-    "Return the make variables that should be set for this release config."
+    """Return the make variables that should be set for this release config.
+
+    Args:
+      all_flags: A list of flag objects (from flag() calls).
+      all_values: A list of value objects (from value() calls).
+
+    Returns:
+      A dictionary of {name: value} variables for make.
+    """
     validate(all_flags, _all_flags_schema)
     validate(all_values, _all_values_schema)
 
     # Validate flags
     flag_names = []
+    flags_dict = {}
     for flag in all_flags:
         if flag["name"] in flag_names:
             fail(flag["declared_in"] + ": Duplicate declaration of flag " + flag["name"])
         flag_names.append(flag["name"])
+        flags_dict[flag["name"]] = flag
 
     # Record which flags go on which partition
     partitions = {}
@@ -135,13 +184,21 @@
             else:
                 partitions.setdefault(partition, []).append(flag["name"])
 
-    # Validate values
-    # TODO(joeo): Disallow duplicate values after we've split AOSP and vendor flags.
+    # Generate final values.
+    # Only declared flags may have a value.
     values = {}
     for value in all_values:
-        if value["name"] not in flag_names:
-            fail(value["set_in"] + ": Value set for undeclared build flag: " + value["name"])
-        values[value["name"]] = value
+        name = value["name"]
+        if name not in flag_names:
+            fail(value["set_in"] + ": Value set for undeclared build flag: " + name)
+        if flags_dict[name]["appends"]:
+            if name in values:
+                values[name]["value"] += " " + value["value"]
+                values[name]["set_in"] += " " + value["set_in"]
+            else:
+                values[name] = value
+        else:
+            values[name] = value
 
     # Collect values
     result = {
diff --git a/core/release_config.mk b/core/release_config.mk
index b72ee89..5993e85 100644
--- a/core/release_config.mk
+++ b/core/release_config.mk
@@ -52,6 +52,15 @@
         ) \
     )
 
+# PRODUCT_RELEASE_CONFIG_MAPS is set by Soong using an initial run of product
+# config to capture only the list of config maps needed by the build.
+# Keep them in the order provided, but remove duplicates.
+$(foreach map,$(PRODUCT_RELEASE_CONFIG_MAPS), \
+    $(if $(filter $(map),$(config_map_files)),,$(eval config_map_files += $(map))) \
+)
+
+# Declare or extend a release-config.
+#
 # $1 config name
 # $2 release config files
 define declare-release-config
@@ -63,10 +72,24 @@
     $(eval _all_release_configs.$(strip $(1)).FILES := $(_all_release_configs.$(strip $(1)).FILES) $(strip $(2)))
 endef
 
-# Include the config map files
+# Include the config map files and populate _flag_declaration_files.
+_flag_declaration_files :=
 $(foreach f, $(config_map_files), \
+    $(eval FLAG_DECLARATION_FILES:= ) \
     $(eval _included := $(f)) \
     $(eval include $(f)) \
+    $(eval _flag_declaration_files += $(FLAG_DECLARATION_FILES)) \
+)
+FLAG_DECLARATION_FILES :=
+
+# Make sure that the flag definitions are included for vendor/google builds.
+# This decouples the change in vendor/google/release/release_config_map.mk
+# from this logic change.
+# TODO: Remove this once the vendor/google FLAG_DECLARATION_FILES change is there.
+$(if $(wildcard vendor/google/release/release_config_map.mk),\
+  $(if $(filter vendor/google/release/build_flags.bzl,$(_flag_declaration_files)),,\
+    $(eval _flag_declaration_files := vendor/google/release/build_flags.bzl $(_flag_declaration_files)) \
+  ) \
 )
 
 # If TARGET_RELEASE is set, fail if there is no matching release config
@@ -78,7 +101,11 @@
 else
     # Choose flag files
     # Don't sort this, use it in the order they gave us.
-    flag_value_files := $(_all_release_configs.$(TARGET_RELEASE).FILES)
+    # Do allow duplicate entries, retaining only the first usage.
+    flag_value_files :=
+    $(foreach f,$(_all_release_configs.$(TARGET_RELEASE).FILES), \
+      $(if $(filter $(f),$(flag_value_files)),,$(eval flag_value_files += $(f)))\
+    )
 endif
 else
 # Useful for finding scripts etc that aren't passing or setting TARGET_RELEASE
@@ -121,21 +148,8 @@
 # that we chose from the config map above.  Then we run that, and load the
 # results of that into the make environment.
 
-# If this is a google source tree, restrict it to only the one file
-# which has OWNERS control.  If it isn't let others define their own.
-# TODO: Remove wildcard for build/release one when all branch manifests
-# have updated.
-flag_declaration_files := $(wildcard build/release/build_flags.bzl) \
-    $(if $(wildcard vendor/google/release/build_flags.bzl), \
-        vendor/google/release/build_flags.bzl, \
-        $(sort \
-            $(wildcard device/*/release/build_flags.bzl) \
-            $(wildcard device/*/*/release/build_flags.bzl) \
-            $(wildcard vendor/*/release/build_flags.bzl) \
-            $(wildcard vendor/*/*/release/build_flags.bzl) \
-        ) \
-    )
-
+# _flag_declaration_files is the combined list of FLAG_DECLARATION_FILES set by
+# release_config_map.mk files above.
 
 # Because starlark can't find files with $(wildcard), write an entrypoint starlark script that
 # contains the result of the above wildcards for the starlark code to use.
@@ -145,8 +159,8 @@
 _c+=$(newline)$(space)d = dict(d)
 _c+=$(newline)$(space)d[k] = v
 _c+=$(newline)$(space)return d
-_c+=$(foreach f,$(flag_declaration_files),$(newline)load("$(f)", flags_$(call filename_to_starlark,$(f)) = "flags"))
-_c+=$(newline)all_flags = [] $(foreach f,$(flag_declaration_files),+ [add(x, "declared_in", "$(f)") for x in flags_$(call filename_to_starlark,$(f))])
+_c+=$(foreach f,$(_flag_declaration_files),$(newline)load("$(f)", flags_$(call filename_to_starlark,$(f)) = "flags"))
+_c+=$(newline)all_flags = [] $(foreach f,$(_flag_declaration_files),+ [add(x, "declared_in", "$(f)") for x in flags_$(call filename_to_starlark,$(f))])
 _c+=$(foreach f,$(flag_value_files),$(newline)load("//$(f)", values_$(call filename_to_starlark,$(f)) = "values"))
 _c+=$(newline)all_values = [] $(foreach f,$(flag_value_files),+ [add(x, "set_in", "$(f)") for x in values_$(call filename_to_starlark,$(f))])
 _c+=$(newline)variables_to_export_to_make = release_config(all_flags, all_values)
diff --git a/core/soong_config.mk b/core/soong_config.mk
index e541c12..be6a795 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -8,6 +8,7 @@
 ifndef AFDO_PROFILES
 # Set AFDO_PROFILES
 -include vendor/google_data/pgo_profile/sampling/afdo_profiles.mk
+include toolchain/pgo-profiles/sampling/afdo_profiles.mk
 else
 $(error AFDO_PROFILES can only be set from soong_config.mk. For product-specific fdo_profiles, please use PRODUCT_AFDO_PROFILES)
 endif
@@ -15,6 +16,10 @@
 # PRODUCT_AFDO_PROFILES takes precedence over product-agnostic profiles in AFDO_PROFILES
 ALL_AFDO_PROFILES := $(PRODUCT_AFDO_PROFILES) $(AFDO_PROFILES)
 
+ifneq (,$(filter-out environment undefined,$(origin GENRULE_SANDBOXING)))
+  $(error GENRULE_SANDBOXING can only be provided via an environment variable, use BUILD_BROKEN_GENRULE_SANDBOXING to disable genrule sandboxing in board config)
+endif
+
 ifeq ($(WRITE_SOONG_VARIABLES),true)
 
 # Create soong.variables with copies of makefile settings.  Runs every build,
@@ -280,7 +285,8 @@
 $(call add_json_bool, BuildBrokenClangProperty,            $(filter true,$(BUILD_BROKEN_CLANG_PROPERTY)))
 $(call add_json_bool, BuildBrokenClangAsFlags,             $(filter true,$(BUILD_BROKEN_CLANG_ASFLAGS)))
 $(call add_json_bool, BuildBrokenClangCFlags,              $(filter true,$(BUILD_BROKEN_CLANG_CFLAGS)))
-$(call add_json_bool, GenruleSandboxing,                   $(filter true,$(GENRULE_SANDBOXING)))
+# Use the value of GENRULE_SANDBOXING if set, otherwise use the inverse of BUILD_BROKEN_GENRULE_SANDBOXING
+$(call add_json_bool, GenruleSandboxing,                   $(if $(GENRULE_SANDBOXING),$(filter true,$(GENRULE_SANDBOXING)),$(if $(filter true,$(BUILD_BROKEN_GENRULE_SANDBOXING)),,true)))
 $(call add_json_bool, BuildBrokenEnforceSyspropOwner,      $(filter true,$(BUILD_BROKEN_ENFORCE_SYSPROP_OWNER)))
 $(call add_json_bool, BuildBrokenTrebleSyspropNeverallow,  $(filter true,$(BUILD_BROKEN_TREBLE_SYSPROP_NEVERALLOW)))
 $(call add_json_bool, BuildBrokenUsesSoongPython2Modules,  $(filter true,$(BUILD_BROKEN_USES_SOONG_PYTHON2_MODULES)))
@@ -313,7 +319,7 @@
 $(call add_json_list, BuildVersionTags,    $(BUILD_VERSION_TAGS))
 
 $(call add_json_str, ReleaseVersion,    $(_RELEASE_VERSION))
-$(call add_json_str, ReleaseAconfigValueSets,    $(RELEASE_ACONFIG_VALUE_SETS))
+$(call add_json_list, ReleaseAconfigValueSets,    $(RELEASE_ACONFIG_VALUE_SETS))
 $(call add_json_str, ReleaseAconfigFlagDefaultPermission,    $(RELEASE_ACONFIG_FLAG_DEFAULT_PERMISSION))
 
 $(call add_json_bool, ReleaseDefaultModuleBuildFromSource,   $(RELEASE_DEFAULT_MODULE_BUILD_FROM_SOURCE))
@@ -380,6 +386,8 @@
   $(call add_json_bool, CopyImagesForTargetFilesZip, $(filter true,$(COPY_IMAGES_FOR_TARGET_FILES_ZIP)))
 
   $(call add_json_bool, BoardAvbEnable, $(filter true,$(BOARD_AVB_ENABLE)))
+
+  $(call add_json_list, ProductPackages, $(sort $(PRODUCT_PACKAGES)))
 $(call end_json_map)
 
 $(call add_json_bool, NextReleaseHideFlaggedApi, $(filter true,$(PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API)))
diff --git a/core/sysprop.mk b/core/sysprop.mk
index a37fd05..4e8e976 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -46,7 +46,6 @@
         echo "ro.product.$(1).manufacturer=$(PRODUCT_MANUFACTURER)" >> $(2);\
         echo "ro.product.$(1).model=$(PRODUCT_MODEL)" >> $(2);\
         echo "ro.product.$(1).name=$(TARGET_PRODUCT)" >> $(2);\
-        # Attestation specific properties for AOSP/GSI build running on device.
         if [ -n "$(strip $(PRODUCT_MODEL_FOR_ATTESTATION))" ]; then \
             echo "ro.product.model_for_attestation=$(PRODUCT_MODEL_FOR_ATTESTATION)" >> $(2);\
         fi; \
diff --git a/core/tasks/cts.mk b/core/tasks/cts.mk
index 593b7b6..91cb2c9 100644
--- a/core/tasks/cts.mk
+++ b/core/tasks/cts.mk
@@ -37,7 +37,7 @@
   cts_platform_release_path := cts/tests/tests/os/assets/platform_releases.txt
   cts_platform_release_string := $(shell cat $(cts_platform_release_path))
 
-  ifeq ($(RELEASE_PLATFORM_VERSION_CODENAME_REL),)
+  ifneq (REL,$(PLATFORM_VERSION_CODENAME))
     ifeq (,$(findstring $(PLATFORM_VERSION),$(cts_platform_version_string)))
       define error_msg
         ============================================================
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
deleted file mode 100644
index 379369e..0000000
--- a/core/version_defaults.mk
+++ /dev/null
@@ -1,111 +0,0 @@
-#
-# Copyright (C) 2008 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# Handle various build version information.
-#
-# Guarantees that the following are defined:
-#     PLATFORM_VERSION
-#     PLATFORM_DISPLAY_VERSION
-#     PLATFORM_SDK_VERSION
-#     PLATFORM_VERSION_CODENAME
-#     DEFAULT_APP_TARGET_SDK
-#     BUILD_ID
-#     BUILD_NUMBER
-#     PLATFORM_SECURITY_PATCH
-#     PLATFORM_VNDK_VERSION
-#     PLATFORM_SYSTEMSDK_VERSIONS
-#
-
-# Look for an optional file containing overrides of the defaults,
-# but don't cry if we don't find it.  We could just use -include, but
-# the build.prop target also wants INTERNAL_BUILD_ID_MAKEFILE to be set
-# if the file exists.
-#
-INTERNAL_BUILD_ID_MAKEFILE := $(wildcard $(BUILD_SYSTEM)/build_id.mk)
-ifdef INTERNAL_BUILD_ID_MAKEFILE
-  include $(INTERNAL_BUILD_ID_MAKEFILE)
-endif
-
-# Set release configuration. The default resides in build/release/build_flags.mk.
-MIN_PLATFORM_VERSION := UP1A
-MAX_PLATFORM_VERSION := VP1A
-
-# The last stable version name of the platform that was released.  During
-# development, this stays at that previous version, while the codename indicates
-# further work based on the previous version.
-PLATFORM_VERSION_LAST_STABLE := 14
-.KATI_READONLY := PLATFORM_VERSION_LAST_STABLE
-
-# These are the current development codenames, if the build is not a final
-# release build.  If this is a final release build, it is simply "REL".
-# Note that this may be overridden by RELEASE_VERSION_CODENAME_REL in
-# version_util.mk.
-PLATFORM_VERSION_CODENAME.UP1A := UpsideDownCake
-PLATFORM_VERSION_CODENAME.VP1A := VanillaIceCream
-
-# This is the user-visible version.  In a final release build it should
-# be empty to use PLATFORM_VERSION as the user-visible version.  For
-# a preview release it can be set to a user-friendly value like `12 Preview 1`
-PLATFORM_DISPLAY_VERSION :=
-
-ifndef PLATFORM_SDK_VERSION
-  # This is the canonical definition of the SDK version, which defines
-  # the set of APIs and functionality available in the platform.  It
-  # is a single integer that increases monotonically as updates to
-  # the SDK are released.  It should only be incremented when the APIs for
-  # the new release are frozen (so that developers don't write apps against
-  # intermediate builds).  During development, this number remains at the
-  # SDK version the branch is based on and PLATFORM_VERSION_CODENAME holds
-  # the code-name of the new development work.
-
-  # When you increment the PLATFORM_SDK_VERSION please ensure you also
-  # clear out the following text file of all older PLATFORM_VERSION's:
-  # cts/tests/tests/os/assets/platform_versions.txt
-  PLATFORM_SDK_VERSION := 34
-endif
-.KATI_READONLY := PLATFORM_SDK_VERSION
-
-# This is the sdk extension version of this tree.
-PLATFORM_SDK_EXTENSION_VERSION := 7
-.KATI_READONLY := PLATFORM_SDK_EXTENSION_VERSION
-
-# This is the sdk extension version that PLATFORM_SDK_VERSION ships with.
-PLATFORM_BASE_SDK_EXTENSION_VERSION := $(PLATFORM_SDK_EXTENSION_VERSION)
-.KATI_READONLY := PLATFORM_BASE_SDK_EXTENSION_VERSION
-
-# This are all known codenames.
-PLATFORM_VERSION_KNOWN_CODENAMES := \
-Base Base11 Cupcake Donut Eclair Eclair01 EclairMr1 Froyo Gingerbread GingerbreadMr1 \
-Honeycomb HoneycombMr1 HoneycombMr2 IceCreamSandwich IceCreamSandwichMr1 \
-JellyBean JellyBeanMr1 JellyBeanMr2 Kitkat KitkatWatch Lollipop LollipopMr1 M N NMr1 O OMr1 P \
-Q R S Sv2 Tiramisu UpsideDownCake VanillaIceCream
-
-# Convert from space separated list to comma separated
-PLATFORM_VERSION_KNOWN_CODENAMES := \
-  $(call normalize-comma-list,$(PLATFORM_VERSION_KNOWN_CODENAMES))
-.KATI_READONLY := PLATFORM_VERSION_KNOWN_CODENAMES
-
-ifndef PLATFORM_SECURITY_PATCH
-    #  Used to indicate the security patch that has been applied to the device.
-    #  It must signify that the build includes all security patches issued up through the designated Android Public Security Bulletin.
-    #  It must be of the form "YYYY-MM-DD" on production devices.
-    #  It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
-    #  If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
-    PLATFORM_SECURITY_PATCH := 2023-10-05
-endif
-
-include $(BUILD_SYSTEM)/version_util.mk
diff --git a/core/version_util.mk b/core/version_util.mk
index 0cc3442..dfa0277 100644
--- a/core/version_util.mk
+++ b/core/version_util.mk
@@ -14,119 +14,99 @@
 # limitations under the License.
 #
 
-ALLOWED_VERSIONS := $(call allowed-platform-versions,\
-  $(MIN_PLATFORM_VERSION),\
-  $(MAX_PLATFORM_VERSION),\
-  $(RELEASE_PLATFORM_VERSION))
+
+#
+# Handle various build version information.
+#
+# Guarantees that the following are defined:
+#     PLATFORM_VERSION
+#     PLATFORM_DISPLAY_VERSION
+#     PLATFORM_SDK_VERSION
+#     PLATFORM_SDK_EXTENSION_VERSION
+#     PLATFORM_VERSION_CODENAME
+#     DEFAULT_APP_TARGET_SDK
+#     BUILD_ID
+#     BUILD_NUMBER
+#     PLATFORM_SECURITY_PATCH
+#     PLATFORM_VNDK_VERSION
+#     PLATFORM_SYSTEMSDK_VERSIONS
+#     PLATFORM_VERSION_LAST_STABLE
+#
+
+# Look for an optional file containing overrides of the defaults,
+# but don't cry if we don't find it.  We could just use -include, but
+# the build.prop target also wants INTERNAL_BUILD_ID_MAKEFILE to be set
+# if the file exists.
+#
+INTERNAL_BUILD_ID_MAKEFILE := $(wildcard $(BUILD_SYSTEM)/build_id.mk)
+ifdef INTERNAL_BUILD_ID_MAKEFILE
+  include $(INTERNAL_BUILD_ID_MAKEFILE)
+endif
 
 ifdef TARGET_PLATFORM_VERSION
   $(error Do not set TARGET_PLATFORM_VERSION directly. Use RELEASE_PLATFORM_VERSION. value: $(TARGET_PLATFORM_VERSION))
 endif
-
 TARGET_PLATFORM_VERSION := $(RELEASE_PLATFORM_VERSION)
-
-ifeq (,$(filter $(ALLOWED_VERSIONS), $(TARGET_PLATFORM_VERSION)))
-  $(warning Invalid TARGET_PLATFORM_VERSION '$(TARGET_PLATFORM_VERSION)', must be one of)
-  $(error $(ALLOWED_VERSIONS))
-endif
-ALLOWED_VERSIONS :=
-MIN_PLATFORM_VERSION :=
-MAX_PLATFORM_VERSION :=
-
 .KATI_READONLY := TARGET_PLATFORM_VERSION
 
-# Default versions for each TARGET_PLATFORM_VERSION
-# TODO: PLATFORM_VERSION, PLATFORM_SDK_VERSION, etc. should be conditional
-# on this
-
-# This is the canonical definition of the platform version,
-# which is the version that we reveal to the end user.
-# Update this value when the platform version changes (rather
-# than overriding it somewhere else).  Can be an arbitrary string.
-
-# When you change PLATFORM_VERSION for a given PLATFORM_SDK_VERSION
-# please add that PLATFORM_VERSION as well as clean up obsolete PLATFORM_VERSION's
-# in the following text file:
-# cts/tests/tests/os/assets/platform_versions.txt
-
-# Note that there should be one PLATFORM_VERSION and PLATFORM_VERSION_CODENAME
-# entry for each unreleased API level, regardless of
-# MIN_PLATFORM_VERSION/MAX_PLATFORM_VERSION. PLATFORM_VERSION is used to
-# generate the range of allowed SDK versions, so it must have an entry for every
-# unreleased API level targetable by this branch, not just those that are valid
-# lunch targets for this branch.
-
-# Release config flag to override the current version to REL.  Note that the
-# codename can also be locked to REL by setting it in versino_defaults.mk.
-ifneq ($(RELEASE_PLATFORM_VERSION_CODENAME_REL),)
-  PLATFORM_VERSION_CODENAME.$(TARGET_PLATFORM_VERSION) := REL
+ifdef PLATFORM_SECURITY_PATCH
+  $(error Do not set PLATFORM_SECURITY_PATCH directly. Use RELEASE_PLATFORM_SECURITY_PATCH. value: $(PLATFORM_SECURITY_PATCH))
 endif
+PLATFORM_SECURITY_PATCH := $(RELEASE_PLATFORM_SECURITY_PATCH)
+.KATI_READONLY := PLATFORM_SECURITY_PATCH
 
-PLATFORM_VERSION_CODENAME := $(PLATFORM_VERSION_CODENAME.$(TARGET_PLATFORM_VERSION))
-ifndef PLATFORM_VERSION_CODENAME
-  # PLATFORM_VERSION_CODENAME falls back to TARGET_PLATFORM_VERSION
-  PLATFORM_VERSION_CODENAME := $(TARGET_PLATFORM_VERSION)
+ifdef PLATFORM_SDK_VERSION
+  $(error Do not set PLATFORM_SDK_VERSION directly. Use RELEASE_PLATFORM_SDK_VERSION. value: $(PLATFORM_SDK_VERSION))
 endif
+PLATFORM_SDK_VERSION := $(RELEASE_PLATFORM_SDK_VERSION)
+.KATI_READONLY := PLATFORM_SDK_VERSION
 
-# This is all of the *active* development codenames.
-# This confusing name is needed because
-# all_codenames has been baked into build.prop for ages.
-#
-# Should be either the same as PLATFORM_VERSION_CODENAME or a comma-separated
-# list of additional codenames after PLATFORM_VERSION_CODENAME.
-PLATFORM_VERSION_ALL_CODENAMES :=
-
-# Build a list of all active code names. Avoid duplicates, and stop when we
-# reach a codename that matches PLATFORM_VERSION_CODENAME (anything beyond
-# that is not included in our build).
-_versions_in_target := \
-  $(call find_and_earlier,$(ALL_VERSIONS),$(TARGET_PLATFORM_VERSION))
-$(foreach version,$(_versions_in_target),\
-  $(eval _codename := $(PLATFORM_VERSION_CODENAME.$(version)))\
-  $(if $(filter $(_codename),$(PLATFORM_VERSION_ALL_CODENAMES)),,\
-    $(eval PLATFORM_VERSION_ALL_CODENAMES += $(_codename))))
-
-# And the list of actually all the codenames that are in preview. The
-# ALL_CODENAMES variable is sort of a lie for historical reasons and only
-# includes codenames up to and including the currently active codename, whereas
-# this variable also includes future codenames. For example, while AOSP is still
-# merging into U, but V development has started, ALL_CODENAMES will only be U,
-# but ALL_PREVIEW_CODENAMES will be U and V.
-#
-# REL is filtered out of the list. The codename of the current release is
-# replaced by "REL" when the build is configured as a release rather than a
-# preview. For example, PLATFORM_VERSION_CODENAME.UpsideDownCake will be "REL"
-# rather than UpsideDownCake in a -next target when the upcoming release is
-# UpsideDownCake. "REL" is a codename (and android.os.Build relies on this:
-# https://cs.android.com/android/platform/superproject/main/+/main:frameworks/base/core/java/android/os/Build.java;l=484-487;drc=316e3d16c9f34212f3beace7695289651d15a071),
-# so it should be in PLATFORM_VERSION_ALL_CODENAMES, but it definitely is not a
-# preview codename.
-PLATFORM_VERSION_ALL_PREVIEW_CODENAMES :=
-$(foreach version,$(ALL_VERSIONS),\
-  $(eval _codename := $(PLATFORM_VERSION_CODENAME.$(version)))\
-  $(if $(filter REL,$(_codename)),,\
-      $(if $(filter $(_codename),$(PLATFORM_VERSION_ALL_PREVIEW_CODENAMES)),,\
-        $(eval PLATFORM_VERSION_ALL_PREVIEW_CODENAMES += $(_codename)))))
-
-# And convert from space separated to comma separated.
-PLATFORM_VERSION_ALL_CODENAMES := \
-  $(subst $(space),$(comma),$(strip $(PLATFORM_VERSION_ALL_CODENAMES)))
-PLATFORM_VERSION_ALL_PREVIEW_CODENAMES := \
-  $(subst $(space),$(comma),$(strip $(PLATFORM_VERSION_ALL_PREVIEW_CODENAMES)))
-
-.KATI_READONLY := \
-  PLATFORM_VERSION_CODENAME \
-  PLATFORM_VERSION_ALL_CODENAMES \
-  PLATFORM_VERSION_ALL_PREVIEW_CODENAMES \
-
-ifneq (REL,$(PLATFORM_VERSION_CODENAME))
-  codenames := \
-    $(subst $(comma),$(space),$(strip $(PLATFORM_VERSION_KNOWN_CODENAMES)))
-  ifeq ($(filter $(PLATFORM_VERSION_CODENAME),$(codenames)),)
-    $(error '$(PLATFORM_VERSION_CODENAME)' is not in '$(codenames)'. \
-        Add PLATFORM_VERSION_CODENAME to PLATFORM_VERSION_KNOWN_CODENAMES)
-  endif
+ifdef PLATFORM_SDK_EXTENSION_VERSION
+  $(error Do not set PLATFORM_SDK_EXTENSION_VERSION directly. Use RELEASE_PLATFORM_SDK_EXTENSION_VERSION. value: $(PLATFORM_SDK_EXTENSION_VERSION))
 endif
+PLATFORM_SDK_EXTENSION_VERSION := $(RELEASE_PLATFORM_SDK_EXTENSION_VERSION)
+.KATI_READONLY := PLATFORM_SDK_EXTENSION_VERSION
+
+# This is the sdk extension version that PLATFORM_SDK_VERSION ships with.
+PLATFORM_BASE_SDK_EXTENSION_VERSION := $(PLATFORM_SDK_EXTENSION_VERSION)
+.KATI_READONLY := PLATFORM_BASE_SDK_EXTENSION_VERSION
+
+ifdef PLATFORM_VERSION_CODENAME
+  $(error Do not set PLATFORM_VERSION_CODENAME directly. Use RELEASE_PLATFORM_VERSION. value: $(PLATFORM_VERSION_CODENAME))
+endif
+PLATFORM_VERSION_CODENAME := $(RELEASE_PLATFORM_VERSION_CODENAME)
+.KATI_READONLY := PLATFORM_VERSION_CODENAME
+
+ifdef PLATFORM_VERSION_ALL_CODENAMES
+  $(error Do not set PLATFORM_VERSION_ALL_CODENAMES directly. Use RELEASE_PLATFORM_VERSION_ALL_CODENAMES. value: $(PLATFORM_VERSION_ALL_CODENAMES))
+endif
+PLATFORM_VERSION_ALL_CODENAMES := $(RELEASE_PLATFORM_VERSION_ALL_CODENAMES)
+.KATI_READONLY := PLATFORM_VERSION_ALL_CODENAMES
+
+ifdef PLATFORM_VERSION_ALL_PREVIEW_CODENAMES
+  $(error Do not set PLATFORM_VERSION_ALL_PREVIEW_CODENAMES directly. Use RELEASE_PLATFORM_VERSION_ALL_PREVIEW_CODENAMES. value: $(PLATFORM_VERSION_ALL_PREVIEW_CODENAMES))
+endif
+PLATFORM_VERSION_ALL_PREVIEW_CODENAMES := $(RELEASE_PLATFORM_VERSION_ALL_PREVIEW_CODENAMES)
+.KATI_READONLY := PLATFORM_VERSION_ALL_PREVIEW_CODENAMES
+
+ifdef PLATFORM_VERSION_LAST_STABLE
+  $(error Do not set PLATFORM_VERSION_LAST_STABLE directly. Use RELEASE_PLATFORM_VERSION_LAST_STABLE. value: $(PLATFORM_VERSION_CODENAME))
+endif
+PLATFORM_VERSION_LAST_STABLE := $(RELEASE_PLATFORM_VERSION_LAST_STABLE)
+.KATI_READONLY := PLATFORM_VERSION_LAST_STABLE
+
+
+# This are all known codenames. Should this move into the release config?
+PLATFORM_VERSION_KNOWN_CODENAMES := \
+Base Base11 Cupcake Donut Eclair Eclair01 EclairMr1 Froyo Gingerbread GingerbreadMr1 \
+Honeycomb HoneycombMr1 HoneycombMr2 IceCreamSandwich IceCreamSandwichMr1 \
+JellyBean JellyBeanMr1 JellyBeanMr2 Kitkat KitkatWatch Lollipop LollipopMr1 M N NMr1 O OMr1 P \
+Q R S Sv2 Tiramisu UpsideDownCake VanillaIceCream
+
+# Convert from space separated list to comma separated
+PLATFORM_VERSION_KNOWN_CODENAMES := \
+  $(call normalize-comma-list,$(PLATFORM_VERSION_KNOWN_CODENAMES))
+.KATI_READONLY := PLATFORM_VERSION_KNOWN_CODENAMES
 
 ifndef PLATFORM_VERSION
   ifeq (REL,$(PLATFORM_VERSION_CODENAME))
diff --git a/envsetup.sh b/envsetup.sh
index 63837ec..ee45280 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -56,7 +56,7 @@
 Run "m help" for help with the build system itself.
 
 Invoke ". build/envsetup.sh" from your shell to add the following functions to your environment:
-- lunch:      lunch <product_name>-<build_variant>
+- lunch:      lunch <product_name>-<release_type>-<build_variant>
               Selects <product_name> as the product to build, and <build_variant> as the variant to
               build, and stores those selections in the environment to be read by subsequent
               invocations of 'm' etc.
@@ -205,6 +205,7 @@
         return
     fi
         TARGET_PRODUCT=$1 \
+        TARGET_RELEASE= \
         TARGET_BUILD_VARIANT= \
         TARGET_BUILD_TYPE= \
         TARGET_BUILD_APPS= \
@@ -486,7 +487,7 @@
 
 function multitree_lunch_help()
 {
-    echo "usage: lunch PRODUCT-VARIANT" 1>&2
+    echo "usage: lunch PRODUCT-RELEASE-VARIANT" 1>&2
     echo "    Set up android build environment based on a product short name and variant" 1>&2
     echo 1>&2
     echo "lunch COMBO_FILE VARIANT" 1>&2
@@ -728,7 +729,7 @@
 {
     local uname=$(uname)
     local choices
-    choices=$(TARGET_BUILD_APPS= TARGET_PRODUCT= TARGET_BUILD_VARIANT= get_build_var COMMON_LUNCH_CHOICES 2>/dev/null)
+    choices=$(TARGET_BUILD_APPS= TARGET_PRODUCT= TARGET_RELEASE= TARGET_BUILD_VARIANT= get_build_var COMMON_LUNCH_CHOICES 2>/dev/null)
     local ret=$?
 
     echo
@@ -774,7 +775,7 @@
         answer=$1
     else
         print_lunch_menu
-        echo "Which would you like? [aosp_arm-eng]"
+        echo "Which would you like? [aosp_arm-trunk_staging-eng]"
         echo -n "Pick from common choices above (e.g. 13) or specify your own (e.g. aosp_barbet-eng): "
         read answer
         used_lunch_menu=1
@@ -784,7 +785,7 @@
 
     if [ -z "$answer" ]
     then
-        selection=aosp_arm-eng
+        selection=aosp_arm-trunk_staging-eng
     elif (echo -n $answer | grep -q -e "^[0-9][0-9]*$")
     then
         local choices=($(TARGET_BUILD_APPS= get_build_var COMMON_LUNCH_CHOICES))
@@ -804,26 +805,16 @@
 
     export TARGET_BUILD_APPS=
 
-    # Support either <product>-<variant> or <product>-<release>-<variant>
-    local product release_and_variant release variant
-    product=${selection%%-*} # Trim everything after first dash
-    release_and_variant=${selection#*-} # Trim everything up to first dash
-    if [ "$release_and_variant" != "$selection" ]; then
-        local first=${release_and_variant%%-*} # Trim everything after first dash
-        if [ "$first" != "$release_and_variant" ]; then
-            # There is a 2nd dash, split into release-variant
-            release=$first # Everything up to the dash
-            variant=${release_and_variant#*-} # Trim everything up to dash
-        else
-            # There is not a 2nd dash, default to variant as the second param
-            variant=$first
-        fi
-    fi
+    # This must be <product>-<release>-<variant>
+    local product release variant
+    # Split string on the '-' character.
+    IFS="-" read -r product release variant <<< "$selection"
 
-    if [ -z "$product" ]
+    if [[ -z "$product" ]] || [[ -z "$release" ]] || [[ -z "$variant" ]]
     then
         echo
         echo "Invalid lunch combo: $selection"
+        echo "Valid combos must be of the form <product>-<release>-<variant>"
         return 1
     fi
 
@@ -841,11 +832,8 @@
     fi
     export TARGET_PRODUCT=$(get_build_var TARGET_PRODUCT)
     export TARGET_BUILD_VARIANT=$(get_build_var TARGET_BUILD_VARIANT)
-    if [ -n "$release" ]; then
-      export TARGET_RELEASE=$release
-    else
-      unset TARGET_RELEASE
-    fi
+    export TARGET_RELEASE=$release
+    # Note this is the string "release", not the value of the variable.
     export TARGET_BUILD_TYPE=release
 
     if [ $used_lunch_menu -eq 1 ]; then
@@ -887,6 +875,8 @@
 {
     local showHelp="$(echo $* | xargs -n 1 echo | \grep -E '^(help)$' | xargs)"
     local arch="$(echo $* | xargs -n 1 echo | \grep -E '^(arm|x86|arm64|x86_64)$' | xargs)"
+    # TODO(b/307975293): Expand tapas to take release arguments (and update hmm() usage).
+    local release="trunk_staging"
     local variant="$(echo $* | xargs -n 1 echo | \grep -E '^(user|userdebug|eng)$' | xargs)"
     local density="$(echo $* | xargs -n 1 echo | \grep -E '^(ldpi|mdpi|tvdpi|hdpi|xhdpi|xxhdpi|xxxhdpi|alldpi)$' | xargs)"
     local keys="$(echo $* | xargs -n 1 echo | \grep -E '^(devkeys)$' | xargs)"
@@ -902,6 +892,10 @@
         echo "tapas: Error: Multiple build archs supplied: $arch"
         return
     fi
+    if [ $(echo $release | wc -w) -gt 1 ]; then
+        echo "tapas: Error: Multiple build releases supplied: $release"
+        return
+    fi
     if [ $(echo $variant | wc -w) -gt 1 ]; then
         echo "tapas: Error: Multiple build variants supplied: $variant"
         return
@@ -936,6 +930,7 @@
     fi
 
     export TARGET_PRODUCT=$product
+    export TARGET_RELEASE=$release
     export TARGET_BUILD_VARIANT=$variant
     export TARGET_BUILD_DENSITY=$density
     export TARGET_BUILD_TYPE=release
@@ -953,6 +948,8 @@
 {
     local showHelp="$(echo $* | xargs -n 1 echo | \grep -E '^(help)$' | xargs)"
     local product="$(echo $* | xargs -n 1 echo | \grep -E '^(.*_)?(arm|x86|arm64|riscv64|x86_64|arm64only|x86_64only)$' | xargs)"
+    # TODO: Expand banchan to take release arguments (and update hmm() usage).
+    local release="trunk_staging"
     local variant="$(echo $* | xargs -n 1 echo | \grep -E '^(user|userdebug|eng)$' | xargs)"
     local apps="$(echo $* | xargs -n 1 echo | \grep -E -v '^(user|userdebug|eng|(.*_)?(arm|x86|arm64|riscv64|x86_64))$' | xargs)"
 
@@ -967,6 +964,10 @@
         echo "banchan: Error: Multiple build archs or products supplied: $products"
         return
     fi
+    if [ $(echo $release | wc -w) -gt 1 ]; then
+        echo "banchan: Error: Multiple build releases supplied: $release"
+        return
+    fi
     if [ $(echo $variant | wc -w) -gt 1 ]; then
         echo "banchan: Error: Multiple build variants supplied: $variant"
         return
@@ -990,6 +991,7 @@
     fi
 
     export TARGET_PRODUCT=$product
+    export TARGET_RELEASE=$release
     export TARGET_BUILD_VARIANT=$variant
     export TARGET_BUILD_DENSITY=alldpi
     export TARGET_BUILD_TYPE=release
@@ -1613,8 +1615,8 @@
 # Return the Bazel label of a Soong module if it is converted with bp2build.
 function bmod()
 (
-    if [ $# -ne 1 ]; then
-        echo "usage: bmod <module>" >&2
+    if [ $# -eq 0 ]; then
+        echo "usage: bmod <module 1> <module 2> ... <module n>" >&2
         return 1
     fi
 
@@ -1631,19 +1633,24 @@
       return 1
     fi
 
-    local target_label=$(python3 -c "import json
-module = '$1'
+    modules=()
+    for m in "$@"; do
+        modules+=("\"$m\",")
+    done
+    local res=$(python3 -c "import json
+modules = [${modules[*]}]
 converted_json='$converted_json'
 bp2build_converted_map = json.load(open(converted_json))
-if module not in bp2build_converted_map:
-    exit(1)
-print(bp2build_converted_map[module] + ':' + module)")
+for module in modules:
+    if module not in bp2build_converted_map:
+        print(module + ' is not converted to Bazel.')
+    else:
+        print(bp2build_converted_map[module] + ':' + module)")
 
-    if [ -z "${target_label}" ]; then
-      echo "$1 is not converted to Bazel." >&2
-      return 1
-    else
-      echo "${target_label}"
+    echo "${res}"
+    unconverted_count=$(echo "${res}" | grep -c "not converted to Bazel")
+    if [[ ${unconverted_count} -ne 0 ]]; then
+        return 1
     fi
 )
 
@@ -2049,6 +2056,16 @@
     "$ANDROID_SOONG_HOST_OUT"/bin/avbtool $@
 }
 
+function overrideflags() {
+    local T="$(gettop)"
+    (\cd "${T}" && build/make/tools/overrideflags.sh "$@")
+}
+
+function aninja() {
+    local T="$(gettop)"
+    (\cd "${T}" && prebuilts/build-tools/linux-x86/bin/ninja -f out/combined-${TARGET_PRODUCT}.ninja "$@")
+}
+
 validate_current_shell
 set_global_paths
 source_vendorsetup
diff --git a/target/board/emulator_arm/AndroidBoard.mk b/target/board/emulator_arm/AndroidBoard.mk
deleted file mode 100644
index 7911f61..0000000
--- a/target/board/emulator_arm/AndroidBoard.mk
+++ /dev/null
@@ -1 +0,0 @@
-LOCAL_PATH := $(call my-dir)
diff --git a/target/board/emulator_arm/BoardConfig.mk b/target/board/emulator_arm/BoardConfig.mk
deleted file mode 100644
index 287824f..0000000
--- a/target/board/emulator_arm/BoardConfig.mk
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# arm emulator specific definitions
-TARGET_ARCH := arm
-TARGET_ARCH_VARIANT := armv7-a-neon
-TARGET_CPU_VARIANT := generic
-TARGET_CPU_ABI := armeabi-v7a
-TARGET_CPU_ABI2 := armeabi
-
-include build/make/target/board/BoardConfigGsiCommon.mk
-include build/make/target/board/BoardConfigEmuCommon.mk
-
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-
-# Wifi.
-BOARD_WLAN_DEVICE           := emulator
-BOARD_HOSTAPD_DRIVER        := NL80211
-BOARD_WPA_SUPPLICANT_DRIVER := NL80211
-BOARD_HOSTAPD_PRIVATE_LIB   := lib_driver_cmd_simulated
-BOARD_WPA_SUPPLICANT_PRIVATE_LIB := lib_driver_cmd_simulated
-WPA_SUPPLICANT_VERSION      := VER_0_8_X
-WIFI_DRIVER_FW_PATH_PARAM   := "/dev/null"
-WIFI_DRIVER_FW_PATH_STA     := "/dev/null"
-WIFI_DRIVER_FW_PATH_AP      := "/dev/null"
diff --git a/target/board/emulator_arm/device.mk b/target/board/emulator_arm/device.mk
deleted file mode 100644
index af023eb..0000000
--- a/target/board/emulator_arm/device.mk
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish # for libwifi-hal-emu
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish-opengl # for goldfish deps.
diff --git a/target/board/emulator_arm/system_ext.prop b/target/board/emulator_arm/system_ext.prop
deleted file mode 100644
index 64829f3..0000000
--- a/target/board/emulator_arm/system_ext.prop
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# system.prop for generic sdk
-#
-
-rild.libpath=/vendor/lib/libreference-ril.so
diff --git a/target/board/emulator_arm64/BoardConfig.mk b/target/board/emulator_arm64/BoardConfig.mk
deleted file mode 100644
index c16e61b..0000000
--- a/target/board/emulator_arm64/BoardConfig.mk
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# arm64 emulator specific definitions
-TARGET_ARCH := arm64
-TARGET_ARCH_VARIANT := armv8-a
-TARGET_CPU_VARIANT := generic
-TARGET_CPU_ABI := arm64-v8a
-
-ifneq ($(TARGET_BUILD_APPS)$(filter cts sdk,$(MAKECMDGOALS)),)
-# DO NOT USE
-# DO NOT USE
-#
-# This architecture / CPU variant must NOT be used for any 64 bit
-# platform builds. It is the lowest common denominator required
-# to build an unbundled application or cts for all supported 32 and 64 bit
-# platforms.
-#
-# If you're building a 64 bit platform (and not an application) the
-# ARM-v8 specification allows you to assume all the features available in an
-# armv7-a-neon CPU. You should set the following as 2nd arch/cpu variant:
-#
-# TARGET_2ND_ARCH_VARIANT := armv8-a
-# TARGET_2ND_CPU_VARIANT := generic
-#
-# DO NOT USE
-# DO NOT USE
-TARGET_2ND_ARCH_VARIANT := armv7-a-neon
-# DO NOT USE
-# DO NOT USE
-TARGET_2ND_CPU_VARIANT := generic
-# DO NOT USE
-# DO NOT USE
-else
-TARGET_2ND_ARCH_VARIANT := armv8-a
-TARGET_2ND_CPU_VARIANT := generic
-endif
-
-include build/make/target/board/BoardConfigGsiCommon.mk
-include build/make/target/board/BoardConfigEmuCommon.mk
-
-BOARD_BOOTIMAGE_PARTITION_SIZE := 0x02000000
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-
-# Wifi.
-BOARD_WLAN_DEVICE           := emulator
-BOARD_HOSTAPD_DRIVER        := NL80211
-BOARD_WPA_SUPPLICANT_DRIVER := NL80211
-BOARD_HOSTAPD_PRIVATE_LIB   := lib_driver_cmd_simulated
-BOARD_WPA_SUPPLICANT_PRIVATE_LIB := lib_driver_cmd_simulated
-WPA_SUPPLICANT_VERSION      := VER_0_8_X
-WIFI_DRIVER_FW_PATH_PARAM   := "/dev/null"
-WIFI_DRIVER_FW_PATH_STA     := "/dev/null"
-WIFI_DRIVER_FW_PATH_AP      := "/dev/null"
diff --git a/target/board/emulator_arm64/device.mk b/target/board/emulator_arm64/device.mk
deleted file mode 100644
index d221e64..0000000
--- a/target/board/emulator_arm64/device.mk
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish # for libwifi-hal-emu
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish-opengl # for goldfish deps.
-
diff --git a/target/board/emulator_arm64/system_ext.prop b/target/board/emulator_arm64/system_ext.prop
deleted file mode 100644
index 2f8f803..0000000
--- a/target/board/emulator_arm64/system_ext.prop
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# system.prop for emulator arm64 sdk
-#
-
-rild.libpath=/vendor/lib64/libreference-ril.so
diff --git a/target/board/emulator_x86/BoardConfig.mk b/target/board/emulator_x86/BoardConfig.mk
deleted file mode 100644
index 8f79166..0000000
--- a/target/board/emulator_x86/BoardConfig.mk
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# x86 emulator specific definitions
-TARGET_CPU_ABI := x86
-TARGET_ARCH := x86
-TARGET_ARCH_VARIANT := x86
-
-TARGET_PRELINK_MODULE := false
-
-include build/make/target/board/BoardConfigGsiCommon.mk
-include build/make/target/board/BoardConfigEmuCommon.mk
-
-# Resize to 4G to accommodate ASAN and CTS
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 4294967296
-
-BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/x86
-
-# Wifi.
-BOARD_WLAN_DEVICE           := emulator
-BOARD_HOSTAPD_DRIVER        := NL80211
-BOARD_WPA_SUPPLICANT_DRIVER := NL80211
-BOARD_HOSTAPD_PRIVATE_LIB   := lib_driver_cmd_simulated
-BOARD_WPA_SUPPLICANT_PRIVATE_LIB := lib_driver_cmd_simulated
-WPA_SUPPLICANT_VERSION      := VER_0_8_X
-WIFI_DRIVER_FW_PATH_PARAM   := "/dev/null"
-WIFI_DRIVER_FW_PATH_STA     := "/dev/null"
-WIFI_DRIVER_FW_PATH_AP      := "/dev/null"
diff --git a/target/board/emulator_x86/device.mk b/target/board/emulator_x86/device.mk
deleted file mode 100644
index 8a9d8da..0000000
--- a/target/board/emulator_x86/device.mk
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish # for libwifi-hal-emu
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish-opengl # for goldfish deps.
-
-ifdef NET_ETH0_STARTONBOOT
-  PRODUCT_VENDOR_PROPERTIES += net.eth0.startonboot=1
-endif
-
-# Ensure we package the BIOS files too.
-PRODUCT_HOST_PACKAGES += \
-	bios.bin \
-	vgabios-cirrus.bin \
diff --git a/target/board/emulator_x86/system_ext.prop b/target/board/emulator_x86/system_ext.prop
deleted file mode 100644
index 64829f3..0000000
--- a/target/board/emulator_x86/system_ext.prop
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# system.prop for generic sdk
-#
-
-rild.libpath=/vendor/lib/libreference-ril.so
diff --git a/target/board/emulator_x86_64/BoardConfig.mk b/target/board/emulator_x86_64/BoardConfig.mk
deleted file mode 100755
index b9cbd8a..0000000
--- a/target/board/emulator_x86_64/BoardConfig.mk
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# x86_64 emulator specific definitions
-TARGET_CPU_ABI := x86_64
-TARGET_ARCH := x86_64
-TARGET_ARCH_VARIANT := x86_64
-
-TARGET_2ND_CPU_ABI := x86
-TARGET_2ND_ARCH := x86
-TARGET_2ND_ARCH_VARIANT := x86_64
-
-TARGET_PRELINK_MODULE := false
-include build/make/target/board/BoardConfigGsiCommon.mk
-include build/make/target/board/BoardConfigEmuCommon.mk
-
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
-
-BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/x86
-
-# Wifi.
-BOARD_WLAN_DEVICE           := emulator
-BOARD_HOSTAPD_DRIVER        := NL80211
-BOARD_WPA_SUPPLICANT_DRIVER := NL80211
-BOARD_HOSTAPD_PRIVATE_LIB   := lib_driver_cmd_simulated
-BOARD_WPA_SUPPLICANT_PRIVATE_LIB := lib_driver_cmd_simulated
-WPA_SUPPLICANT_VERSION      := VER_0_8_X
-WIFI_DRIVER_FW_PATH_PARAM   := "/dev/null"
-WIFI_DRIVER_FW_PATH_STA     := "/dev/null"
-WIFI_DRIVER_FW_PATH_AP      := "/dev/null"
diff --git a/target/board/emulator_x86_64/device.mk b/target/board/emulator_x86_64/device.mk
deleted file mode 100755
index 8a9d8da..0000000
--- a/target/board/emulator_x86_64/device.mk
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish # for libwifi-hal-emu
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish-opengl # for goldfish deps.
-
-ifdef NET_ETH0_STARTONBOOT
-  PRODUCT_VENDOR_PROPERTIES += net.eth0.startonboot=1
-endif
-
-# Ensure we package the BIOS files too.
-PRODUCT_HOST_PACKAGES += \
-	bios.bin \
-	vgabios-cirrus.bin \
diff --git a/target/board/emulator_x86_64/system_ext.prop b/target/board/emulator_x86_64/system_ext.prop
deleted file mode 100644
index ed9d173..0000000
--- a/target/board/emulator_x86_64/system_ext.prop
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# system.prop for generic sdk
-#
-
-rild.libpath=/vendor/lib64/libreference-ril.so
diff --git a/target/board/emulator_x86_64_arm64/BoardConfig.mk b/target/board/emulator_x86_64_arm64/BoardConfig.mk
deleted file mode 100755
index 26b61a6..0000000
--- a/target/board/emulator_x86_64_arm64/BoardConfig.mk
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# x86_64 emulator specific definitions
-TARGET_CPU_ABI := x86_64
-TARGET_ARCH := x86_64
-TARGET_ARCH_VARIANT := x86_64
-
-TARGET_2ND_CPU_ABI := x86
-TARGET_2ND_ARCH := x86
-TARGET_2ND_ARCH_VARIANT := x86_64
-
-TARGET_NATIVE_BRIDGE_ARCH := arm64
-TARGET_NATIVE_BRIDGE_ARCH_VARIANT := armv8-a
-TARGET_NATIVE_BRIDGE_CPU_VARIANT := generic
-TARGET_NATIVE_BRIDGE_ABI := arm64-v8a
-
-TARGET_NATIVE_BRIDGE_2ND_ARCH := arm
-TARGET_NATIVE_BRIDGE_2ND_ARCH_VARIANT := armv7-a-neon
-TARGET_NATIVE_BRIDGE_2ND_CPU_VARIANT := generic
-TARGET_NATIVE_BRIDGE_2ND_ABI := armeabi-v7a armeabi
-
-BUILD_BROKEN_DUP_RULES := true
-
-TARGET_PRELINK_MODULE := false
-
-include build/make/target/board/BoardConfigMainlineCommon.mk
-include build/make/target/board/BoardConfigEmuCommon.mk
-
-# the settings differ from BoardConfigMainlineCommon.mk
-BOARD_USES_SYSTEM_OTHER_ODEX :=
-
-# Resize to 4G to accommodate ASAN and CTS
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 4294967296
-
-BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/x86
-
-# Wifi.
-BOARD_WLAN_DEVICE           := emulator
-BOARD_HOSTAPD_DRIVER        := NL80211
-BOARD_WPA_SUPPLICANT_DRIVER := NL80211
-BOARD_HOSTAPD_PRIVATE_LIB   := lib_driver_cmd_simulated
-BOARD_WPA_SUPPLICANT_PRIVATE_LIB := lib_driver_cmd_simulated
-WPA_SUPPLICANT_VERSION      := VER_0_8_X
-WIFI_DRIVER_FW_PATH_PARAM   := "/dev/null"
-WIFI_DRIVER_FW_PATH_STA     := "/dev/null"
-WIFI_DRIVER_FW_PATH_AP      := "/dev/null"
diff --git a/target/board/emulator_x86_64_arm64/device.mk b/target/board/emulator_x86_64_arm64/device.mk
deleted file mode 100755
index af023eb..0000000
--- a/target/board/emulator_x86_64_arm64/device.mk
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish # for libwifi-hal-emu
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish-opengl # for goldfish deps.
diff --git a/target/board/emulator_x86_64_arm64/system_ext.prop b/target/board/emulator_x86_64_arm64/system_ext.prop
deleted file mode 100644
index ed9d173..0000000
--- a/target/board/emulator_x86_64_arm64/system_ext.prop
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# system.prop for generic sdk
-#
-
-rild.libpath=/vendor/lib64/libreference-ril.so
diff --git a/target/board/emulator_x86_arm/BoardConfig.mk b/target/board/emulator_x86_arm/BoardConfig.mk
deleted file mode 100644
index 21fdbc8..0000000
--- a/target/board/emulator_x86_arm/BoardConfig.mk
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# x86 emulator specific definitions
-TARGET_CPU_ABI := x86
-TARGET_ARCH := x86
-TARGET_ARCH_VARIANT := x86
-
-TARGET_NATIVE_BRIDGE_ARCH := arm
-TARGET_NATIVE_BRIDGE_ARCH_VARIANT := armv7-a-neon
-TARGET_NATIVE_BRIDGE_CPU_VARIANT := generic
-TARGET_NATIVE_BRIDGE_ABI := armeabi-v7a armeabi
-
-BUILD_BROKEN_DUP_RULES := true
-
-#
-# The inclusion order below is important.
-# The settings in latter makefiles overwrite those in the former.
-#
-include build/make/target/board/BoardConfigMainlineCommon.mk
-include build/make/target/board/BoardConfigEmuCommon.mk
-
-# the settings differ from BoardConfigMainlineCommon.mk
-BOARD_USES_SYSTEM_OTHER_ODEX :=
-
-# Resize to 4G to accommodate ASAN and CTS
-BOARD_USERDATAIMAGE_PARTITION_SIZE := 4294967296
-
-BOARD_SEPOLICY_DIRS += device/generic/goldfish/sepolicy/x86
-
-# Wifi.
-BOARD_WLAN_DEVICE           := emulator
-BOARD_HOSTAPD_DRIVER        := NL80211
-BOARD_WPA_SUPPLICANT_DRIVER := NL80211
-BOARD_HOSTAPD_PRIVATE_LIB   := lib_driver_cmd_simulated
-BOARD_WPA_SUPPLICANT_PRIVATE_LIB := lib_driver_cmd_simulated
-WPA_SUPPLICANT_VERSION      := VER_0_8_X
-WIFI_DRIVER_FW_PATH_PARAM   := "/dev/null"
-WIFI_DRIVER_FW_PATH_STA     := "/dev/null"
-WIFI_DRIVER_FW_PATH_AP      := "/dev/null"
diff --git a/target/board/emulator_x86_arm/device.mk b/target/board/emulator_x86_arm/device.mk
deleted file mode 100644
index af023eb..0000000
--- a/target/board/emulator_x86_arm/device.mk
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Copyright (C) 2020 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish # for libwifi-hal-emu
-PRODUCT_SOONG_NAMESPACES += device/generic/goldfish-opengl # for goldfish deps.
diff --git a/target/board/emulator_x86_arm/system_ext.prop b/target/board/emulator_x86_arm/system_ext.prop
deleted file mode 100644
index 64829f3..0000000
--- a/target/board/emulator_x86_arm/system_ext.prop
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# system.prop for generic sdk
-#
-
-rild.libpath=/vendor/lib/libreference-ril.so
diff --git a/target/product/AndroidProducts.mk b/target/product/AndroidProducts.mk
index 8787c59..76b1c58 100644
--- a/target/product/AndroidProducts.mk
+++ b/target/product/AndroidProducts.mk
@@ -67,10 +67,6 @@
     $(LOCAL_DIR)/mainline_system_x86_arm.mk \
     $(LOCAL_DIR)/ndk.mk \
     $(LOCAL_DIR)/sdk.mk \
-    $(LOCAL_DIR)/sdk_phone_arm64.mk \
-    $(LOCAL_DIR)/sdk_phone_armv7.mk \
-    $(LOCAL_DIR)/sdk_phone_x86_64.mk \
-    $(LOCAL_DIR)/sdk_phone_x86.mk \
 
 endif
 
@@ -86,7 +82,7 @@
     $(LOCAL_DIR)/module_x86_64only.mk \
 
 COMMON_LUNCH_CHOICES := \
-    aosp_arm64-eng \
-    aosp_arm-eng \
-    aosp_x86_64-eng \
-    aosp_x86-eng \
+    aosp_arm64-trunk_staging-eng \
+    aosp_arm-trunk_staging-eng \
+    aosp_x86_64-trunk_staging-eng \
+    aosp_x86-trunk_staging-eng \
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index aa08002..eac3026 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -53,6 +53,7 @@
     com.android.btservices \
     com.android.configinfrastructure \
     com.android.conscrypt \
+    com.android.crashrecovery \
     com.android.devicelock \
     com.android.extservices \
     com.android.healthfitness \
@@ -94,6 +95,7 @@
     flags_health_check \
     framework-graphics \
     framework-minus-apex \
+    framework-minus-apex-install-dependencies \
     framework-res \
     framework-sysconfig.xml \
     fsck.erofs \
@@ -294,7 +296,7 @@
 # These packages are not used on Android TV
 ifneq ($(PRODUCT_IS_ATV),true)
   PRODUCT_PACKAGES += \
-      SoundPicker \
+      $(RELEASE_PACKAGE_SOUND_PICKER) \
 
 endif
 
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
index 04e9748..f7c92aa 100644
--- a/target/product/default_art_config.mk
+++ b/target/product/default_art_config.mk
@@ -65,6 +65,7 @@
     com.android.btservices:framework-bluetooth \
     com.android.configinfrastructure:framework-configinfrastructure \
     com.android.conscrypt:conscrypt \
+    com.android.crashrecovery:framework-crashrecovery \
     com.android.devicelock:framework-devicelock \
     com.android.healthfitness:framework-healthfitness \
     com.android.i18n:core-icu4j \
@@ -93,6 +94,7 @@
     com.android.appsearch:service-appsearch \
     com.android.art:service-art \
     com.android.configinfrastructure:service-configinfrastructure \
+    com.android.crashrecovery:service-crashrecovery \
     com.android.healthfitness:service-healthfitness \
     com.android.media:service-media-s \
     com.android.ondevicepersonalization:service-ondevicepersonalization \
diff --git a/target/product/emulator_vendor.mk b/target/product/emulator_vendor.mk
deleted file mode 100644
index f71b275..0000000
--- a/target/product/emulator_vendor.mk
+++ /dev/null
@@ -1,52 +0,0 @@
-#
-# Copyright (C) 2012 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# This file is included by other product makefiles to add all the
-# emulator-related modules to PRODUCT_PACKAGES.
-#
-
-$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_vendor.mk)
-
-# need this for gles libraries to load properly
-# after moving to /vendor/lib/
-PRODUCT_PACKAGES += \
-    vndk-sp
-
-DEVICE_PACKAGE_OVERLAYS := device/generic/goldfish/overlay
-
-PRODUCT_CHARACTERISTICS := emulator
-
-PRODUCT_FULL_TREBLE_OVERRIDE := true
-
-# goldfish vendor partition configurations
-$(call inherit-product-if-exists, device/generic/goldfish/vendor.mk)
-
-#watchdog tiggers reboot because location service is not
-#responding, disble it for now.
-#still keep it on internal main (master) as it is still working
-#once it is fixed in aosp, remove this block of comment.
-#PRODUCT_VENDOR_PROPERTIES += \
-#config.disable_location=true
-
-# enable Google-specific location features,
-# like NetworkLocationProvider and LocationCollector
-PRODUCT_SYSTEM_EXT_PROPERTIES += \
-    ro.com.google.locationfeatures=1
-
-# disable setupwizard
-PRODUCT_SYSTEM_EXT_PROPERTIES += \
-    ro.setupwizard.mode?=DISABLED
diff --git a/target/product/fullmte.mk b/target/product/fullmte.mk
index d47c685..5726c06 100644
--- a/target/product/fullmte.mk
+++ b/target/product/fullmte.mk
@@ -20,7 +20,8 @@
 # For more details, see:
 # https://source.android.com/docs/security/test/memory-safety/arm-mte
 ifeq ($(filter memtag_heap,$(SANITIZE_TARGET)),)
-  SANITIZE_TARGET := $(strip $(SANITIZE_TARGET) memtag_heap memtag_stack)
+  # TODO(b/292478827): Re-enable memtag_stack when new toolchain rolls.
+  SANITIZE_TARGET := $(strip $(SANITIZE_TARGET) memtag_heap)
   SANITIZE_TARGET_DIAG := $(strip $(SANITIZE_TARGET_DIAG) memtag_heap)
 endif
 PRODUCT_PRODUCT_PROPERTIES += persist.arm64.memtag.default=sync
diff --git a/target/product/generic_system.mk b/target/product/generic_system.mk
index ab36eb1..6d40436 100644
--- a/target/product/generic_system.mk
+++ b/target/product/generic_system.mk
@@ -104,10 +104,6 @@
     libpolicy-subsystem
 
 
-ifneq ($(KEEP_VNDK),true)
-PRODUCT_PACKAGES += llndk.libraries.txt
-endif
-
 # Include all zygote init scripts. "ro.zygote" will select one of them.
 PRODUCT_COPY_FILES += \
     system/core/rootdir/init.zygote32.rc:system/etc/init/hw/init.zygote32.rc \
diff --git a/target/product/gsi/Android.mk b/target/product/gsi/Android.mk
index 655a666..007aabd 100644
--- a/target/product/gsi/Android.mk
+++ b/target/product/gsi/Android.mk
@@ -7,7 +7,7 @@
 #####################################################################
 # This is the up-to-date list of vndk libs.
 LATEST_VNDK_LIB_LIST := $(LOCAL_PATH)/current.txt
-UNFROZEN_VNDK :=
+UNFROZEN_VNDK := true
 ifeq (REL,$(PLATFORM_VERSION_CODENAME))
     # Use frozen vndk lib list only if "34 >= PLATFORM_VNDK_VERSION"
     ifeq ($(call math_gt_or_eq,34,$(PLATFORM_VNDK_VERSION)),true)
@@ -15,8 +15,7 @@
         ifeq ($(wildcard $(LATEST_VNDK_LIB_LIST)),)
             $(error $(LATEST_VNDK_LIB_LIST) file not found. Please copy "$(LOCAL_PATH)/current.txt" to "$(LATEST_VNDK_LIB_LIST)" and commit a CL for release branch)
         endif
-    else
-        UNFROZEN_VNDK := true
+        UNFROZEN_VNDK :=
     endif
 endif
 
@@ -230,7 +229,9 @@
 
 # Filter LLNDK libs moved to APEX to avoid pulling them into /system/LIB
 LOCAL_REQUIRED_MODULES := \
-    $(filter-out $(LLNDK_MOVED_TO_APEX_LIBRARIES),$(LLNDK_LIBRARIES))
+    $(filter-out $(LLNDK_MOVED_TO_APEX_LIBRARIES),$(LLNDK_LIBRARIES)) \
+    llndk.libraries.txt
+
 
 include $(BUILD_PHONY_PACKAGE)
 
diff --git a/target/product/handheld_system.mk b/target/product/handheld_system.mk
index 6c93dd7..00b62bc 100644
--- a/target/product/handheld_system.mk
+++ b/target/product/handheld_system.mk
@@ -40,6 +40,7 @@
     BuiltInPrintService \
     CalendarProvider \
     cameraserver \
+    com.android.nfcservices \
     CameraExtensionsProxy \
     CaptivePortalLogin \
     CertInstaller \
@@ -56,7 +57,6 @@
     MmsService \
     MtpService \
     MusicFX \
-    NfcNci \
     PacProcessor \
     preinstalled-packages-platform-handheld-system.xml \
     PrintRecommendationService \
diff --git a/target/product/sdk_phone_arm64.mk b/target/product/sdk_phone_arm64.mk
deleted file mode 100644
index c16c403..0000000
--- a/target/product/sdk_phone_arm64.mk
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# Copyright (C) 2009 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-PRODUCT_USE_DYNAMIC_PARTITIONS := true
-
-# This is a build configuration for a full-featured build of the
-# Open-Source part of the tree. It's geared toward a US-centric
-# build quite specifically for the emulator, and might not be
-# entirely appropriate to inherit from for on-device configurations.
-
-# Enable mainline checking for exact this product name
-ifeq (sdk_phone_arm64,$(TARGET_PRODUCT))
-PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
-endif
-
-#
-# All components inherited here go to system image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_system.mk)
-
-#
-# All components inherited here go to system_ext image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
-
-#
-# All components inherited here go to product image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
-
-#
-# All components inherited here go to vendor or vendor_boot image
-#
-$(call inherit-product-if-exists, device/generic/goldfish/arm64-vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/board/emulator_arm64/device.mk)
-
-# keep this apk for sdk targets for now
-PRODUCT_PACKAGES += \
-    EmulatorSmokeTests
-
-# Overrides
-PRODUCT_BRAND := Android
-PRODUCT_NAME := sdk_phone_arm64
-PRODUCT_DEVICE := emulator_arm64
-PRODUCT_MODEL := Android SDK built for arm64
-# Disable <uses-library> checks for SDK product. It lacks some libraries (e.g.
-# RadioConfigLib), which makes it impossible to translate their module names to
-# library name, so the check fails.
-PRODUCT_BROKEN_VERIFY_USES_LIBRARIES := true
-
-PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API := true
diff --git a/target/product/sdk_phone_armv7.mk b/target/product/sdk_phone_armv7.mk
deleted file mode 100644
index 293b1ea..0000000
--- a/target/product/sdk_phone_armv7.mk
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# Copyright (C) 2007 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-PRODUCT_USE_DYNAMIC_PARTITIONS := true
-
-# This is a build configuration for a full-featured build of the
-# Open-Source part of the tree. It's geared toward a US-centric
-# build quite specifically for the emulator, and might not be
-# entirely appropriate to inherit from for on-device configurations.
-
-# Enable mainline checking for exact this product name
-ifeq (sdk_phone_armv7,$(TARGET_PRODUCT))
-PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
-endif
-
-#
-# All components inherited here go to system image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_system.mk)
-
-#
-# All components inherited here go to system_ext image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
-
-#
-# All components inherited here go to product image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
-
-#
-# All components inherited here go to vendor image
-#
-$(call inherit-product-if-exists, build/make/target/product/ramdisk_stub.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/board/emulator_arm/device.mk)
-
-# keep this apk for sdk targets for now
-PRODUCT_PACKAGES += \
-    EmulatorSmokeTests
-
-
-# Overrides
-PRODUCT_BRAND := Android
-PRODUCT_NAME := sdk_phone_armv7
-PRODUCT_DEVICE := emulator_arm
-PRODUCT_MODEL := Android SDK built for arm
-# Disable <uses-library> checks for SDK product. It lacks some libraries (e.g.
-# RadioConfigLib), which makes it impossible to translate their module names to
-# library name, so the check fails.
-PRODUCT_BROKEN_VERIFY_USES_LIBRARIES := true
-
-PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API := true
diff --git a/target/product/sdk_phone_x86.mk b/target/product/sdk_phone_x86.mk
deleted file mode 100644
index 90cd8d5..0000000
--- a/target/product/sdk_phone_x86.mk
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-# Copyright (C) 2009 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-PRODUCT_USE_DYNAMIC_PARTITIONS := true
-
-# This is a build configuration for a full-featured build of the
-# Open-Source part of the tree. It's geared toward a US-centric
-# build quite specifically for the emulator, and might not be
-# entirely appropriate to inherit from for on-device configurations.
-
-#
-# All components inherited here go to system image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_system.mk)
-
-# Enable mainline checking for exact this product name
-ifeq (sdk_phone_x86,$(TARGET_PRODUCT))
-PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
-endif
-
-#
-# All components inherited here go to system_ext image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
-
-#
-# All components inherited here go to product image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
-
-#
-# All components inherited here go to vendor image
-#
-$(call inherit-product-if-exists, device/generic/goldfish/x86-vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/board/emulator_x86/device.mk)
-
-# Overrides
-PRODUCT_BRAND := Android
-PRODUCT_NAME := sdk_phone_x86
-PRODUCT_DEVICE := emulator_x86
-PRODUCT_MODEL := Android SDK built for x86
-# Disable <uses-library> checks for SDK product. It lacks some libraries (e.g.
-# RadioConfigLib), which makes it impossible to translate their module names to
-# library name, so the check fails.
-PRODUCT_BROKEN_VERIFY_USES_LIBRARIES := true
-
-PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API := true
diff --git a/target/product/sdk_phone_x86_64.mk b/target/product/sdk_phone_x86_64.mk
deleted file mode 100644
index b2e14a5..0000000
--- a/target/product/sdk_phone_x86_64.mk
+++ /dev/null
@@ -1,62 +0,0 @@
-#
-# Copyright (C) 2009 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-PRODUCT_USE_DYNAMIC_PARTITIONS := true
-
-# This is a build configuration for a full-featured build of the
-# Open-Source part of the tree. It's geared toward a US-centric
-# build quite specifically for the emulator, and might not be
-# entirely appropriate to inherit from for on-device configurations.
-
-#
-# All components inherited here go to system image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_system.mk)
-
-# Enable mainline checking for exact this product name
-ifeq (sdk_phone_x86_64,$(TARGET_PRODUCT))
-PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
-endif
-
-#
-# All components inherited here go to system_ext image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
-
-#
-# All components inherited here go to product image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
-
-#
-# All components inherited here go to vendor image
-#
-$(call inherit-product-if-exists, device/generic/goldfish/x86_64-vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/emulator_vendor.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/board/emulator_x86_64/device.mk)
-
-# Overrides
-PRODUCT_BRAND := Android
-PRODUCT_NAME := sdk_phone_x86_64
-PRODUCT_DEVICE := emulator_x86_64
-PRODUCT_MODEL := Android SDK built for x86_64
-# Disable <uses-library> checks for SDK product. It lacks some libraries (e.g.
-# RadioConfigLib), which makes it impossible to translate their module names to
-# library name, so the check fails.
-PRODUCT_BROKEN_VERIFY_USES_LIBRARIES := true
-
-PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API := true
diff --git a/tools/aconfig/Android.bp b/tools/aconfig/Android.bp
index 02fc57c..425d8a9 100644
--- a/tools/aconfig/Android.bp
+++ b/tools/aconfig/Android.bp
@@ -13,6 +13,11 @@
         type: "lite",
     },
     sdk_version: "current",
+    min_sdk_version: "UpsideDownCake",
+    apex_available: [
+        "com.android.configinfrastructure",
+        "//apex_available:platform",
+    ]
 }
 
 java_library_host {
diff --git a/tools/aconfig/fake_device_config/Android.bp b/tools/aconfig/fake_device_config/Android.bp
index 5f62ae9..7420aa8 100644
--- a/tools/aconfig/fake_device_config/Android.bp
+++ b/tools/aconfig/fake_device_config/Android.bp
@@ -15,7 +15,7 @@
 java_library {
 	name: "fake_device_config",
 	srcs: ["src/**/*.java"],
-	sdk_version: "core_platform",
-    host_supported: true,
+	sdk_version: "core_current",
+	host_supported: true,
 }
 
diff --git a/tools/aconfig/overrideflags/overrideflags.py b/tools/aconfig/overrideflags/overrideflags.py
new file mode 100644
index 0000000..e355c21
--- /dev/null
+++ b/tools/aconfig/overrideflags/overrideflags.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Create Aconfig value building rules.
+
+This script will help to create Aconfig flag value building rules. It will
+parse necessary information in the value file to create the building rules, but
+it will not validate the value file. The validation will defer to the building
+system.
+"""
+
+import argparse
+import pathlib
+import re
+import sys
+
+
+_VALUE_LIST_TEMPLATE: str = """
+ACONFIG_VALUES_LIST_LOCAL = [{}]
+"""
+
+_ACONFIG_VALUES_TEMPLATE: str = """
+aconfig_values {{
+    name: "{}",
+    package: "{}",
+    srcs: [
+        "{}",
+    ]
+}}
+"""
+
+_ACONFIG_VALUES_NAME_SUFFIX: str = "aconfig-local-override-{}"
+
+_PACKAGE_REGEX = re.compile(r"^package\:\s*\"([\w\d\.]+)\"")
+_ANDROID_BP_FILE_NAME = r"Android.bp"
+
+
+def _parse_packages(file: pathlib.Path) -> set[str]:
+  packages = set()
+  with open(file) as f:
+    for line in f:
+      line = line.strip()
+      package_match = _PACKAGE_REGEX.match(line)
+      if package_match is None:
+        continue
+      package_name = package_match.group(1)
+      packages.add(package_name)
+
+  return packages
+
+
+def _create_android_bp(packages: set[str], file_name: str) -> str:
+  android_bp = ""
+  value_list = ",\n    ".join(
+      map(f'"{_ACONFIG_VALUES_NAME_SUFFIX}"'.format, packages)
+  )
+  if value_list:
+    value_list = "\n    " + value_list + "\n"
+  android_bp += _VALUE_LIST_TEMPLATE.format(value_list) + "\n"
+
+  for package in packages:
+    android_bp += _ACONFIG_VALUES_TEMPLATE.format(
+        _ACONFIG_VALUES_NAME_SUFFIX.format(package), package, file_name
+    )
+    android_bp += "\n"
+
+  return android_bp
+
+
+def _write_android_bp(new_android_bp: str, out: pathlib.Path) -> None:
+  if not out.is_dir():
+    out.mkdir(parents=True, exist_ok=True)
+
+  output = out.joinpath(_ANDROID_BP_FILE_NAME)
+  with open(output, "r+", encoding="utf8") as file:
+    lines = []
+    for line in file:
+      line = line.rstrip("\n")
+      if line.startswith("ACONFIG_VALUES_LIST_LOCAL"):
+        break
+      lines.append(line)
+    # Overwrite the file with the updated contents.
+    file.seek(0)
+    file.truncate()
+    file.write("\n".join(lines))
+    file.write(new_android_bp)
+
+
+def main(args):
+  """Program entry point."""
+  args_parser = argparse.ArgumentParser()
+  args_parser.add_argument(
+      "--overrides",
+      required=True,
+      help="The path to override file.",
+  )
+  args_parser.add_argument(
+      "--out",
+      required=True,
+      help="The path to output directory.",
+  )
+
+  args = args_parser.parse_args(args)
+  file = pathlib.Path(args.overrides)
+  out = pathlib.Path(args.out)
+  if not file.is_file():
+    raise FileNotFoundError(f"File '{file}' is not found")
+
+  packages = _parse_packages(file)
+  new_android_bp = _create_android_bp(packages, file.name)
+  _write_android_bp(new_android_bp, out)
+
+
+if __name__ == "__main__":
+  main(sys.argv[1:])
diff --git a/tools/aconfig/src/codegen_cpp.rs b/tools/aconfig/src/codegen_cpp.rs
index cf0abb9..aeb57a3 100644
--- a/tools/aconfig/src/codegen_cpp.rs
+++ b/tools/aconfig/src/codegen_cpp.rs
@@ -34,13 +34,17 @@
     let class_elements: Vec<ClassElement> =
         parsed_flags_iter.map(|pf| create_class_element(package, pf)).collect();
     let readwrite = class_elements.iter().any(|item| item.readwrite);
+    let has_fixed_read_only = class_elements.iter().any(|item| item.is_fixed_read_only);
     let header = package.replace('.', "_");
+    let package_macro = header.to_uppercase();
     let cpp_namespace = package.replace('.', "::");
     ensure!(codegen::is_valid_name_ident(&header));
     let context = Context {
         header: &header,
+        package_macro: &package_macro,
         cpp_namespace: &cpp_namespace,
         package,
+        has_fixed_read_only,
         readwrite,
         for_test: codegen_mode == CodegenMode::Test,
         class_elements,
@@ -79,8 +83,10 @@
 #[derive(Serialize)]
 pub struct Context<'a> {
     pub header: &'a str,
+    pub package_macro: &'a str,
     pub cpp_namespace: &'a str,
     pub package: &'a str,
+    pub has_fixed_read_only: bool,
     pub readwrite: bool,
     pub for_test: bool,
     pub class_elements: Vec<ClassElement>,
@@ -89,8 +95,10 @@
 #[derive(Serialize)]
 pub struct ClassElement {
     pub readwrite: bool,
+    pub is_fixed_read_only: bool,
     pub default_value: String,
     pub flag_name: String,
+    pub flag_macro: String,
     pub device_config_namespace: String,
     pub device_config_flag: String,
 }
@@ -98,12 +106,14 @@
 fn create_class_element(package: &str, pf: &ProtoParsedFlag) -> ClassElement {
     ClassElement {
         readwrite: pf.permission() == ProtoFlagPermission::READ_WRITE,
+        is_fixed_read_only: pf.is_fixed_read_only(),
         default_value: if pf.state() == ProtoFlagState::ENABLED {
             "true".to_string()
         } else {
             "false".to_string()
         },
         flag_name: pf.name().to_string(),
+        flag_macro: pf.name().to_uppercase(),
         device_config_namespace: pf.namespace().to_string(),
         device_config_flag: codegen::create_device_config_ident(package, pf.name())
             .expect("values checked at flag parse time"),
@@ -118,6 +128,14 @@
     const EXPORTED_PROD_HEADER_EXPECTED: &str = r#"
 #pragma once
 
+#ifndef COM_ANDROID_ACONFIG_TEST
+#define COM_ANDROID_ACONFIG_TEST(FLAG) COM_ANDROID_ACONFIG_TEST_##FLAG
+#endif
+
+#ifndef COM_ANDROID_ACONFIG_TEST_ENABLED_FIXED_RO
+#define COM_ANDROID_ACONFIG_TEST_ENABLED_FIXED_RO true
+#endif
+
 #ifdef __cplusplus
 
 #include <memory>
@@ -149,7 +167,7 @@
 }
 
 inline bool enabled_fixed_ro() {
-    return true;
+    return COM_ANDROID_ACONFIG_TEST_ENABLED_FIXED_RO;
 }
 
 inline bool enabled_ro() {
@@ -319,7 +337,7 @@
             }
 
             virtual bool enabled_fixed_ro() override {
-                return true;
+                return COM_ANDROID_ACONFIG_TEST_ENABLED_FIXED_RO;
             }
 
             virtual bool enabled_ro() override {
@@ -348,7 +366,7 @@
 }
 
 bool com_android_aconfig_test_enabled_fixed_ro() {
-    return true;
+    return COM_ANDROID_ACONFIG_TEST_ENABLED_FIXED_RO;
 }
 
 bool com_android_aconfig_test_enabled_ro() {
diff --git a/tools/aconfig/templates/cpp_exported_header.template b/tools/aconfig/templates/cpp_exported_header.template
index 4d56dbc..6413699 100644
--- a/tools/aconfig/templates/cpp_exported_header.template
+++ b/tools/aconfig/templates/cpp_exported_header.template
@@ -1,9 +1,25 @@
 #pragma once
 
+{{ if not for_test- }}
+{{ if has_fixed_read_only- }}
+#ifndef {package_macro}
+#define {package_macro}(FLAG) {package_macro}_##FLAG
+#endif
+{{ for item in class_elements- }}
+{{ if item.is_fixed_read_only- }}
+#ifndef {package_macro}_{item.flag_macro}
+#define {package_macro}_{item.flag_macro} {item.default_value}
+#endif
+{{ endif }}
+{{ -endfor }}
+{{ -endif }}
+{{ -endif }}
+
 #ifdef __cplusplus
 
 #include <memory>
 
+
 namespace {cpp_namespace} \{
 
 class flag_provider_interface \{
@@ -15,7 +31,7 @@
     {{ if for_test }}
     virtual void {item.flag_name}(bool val) = 0;
     {{ -endif }}
-    {{ endfor }}
+    {{ -endfor }}
 
     {{ if for_test }}
     virtual void reset_flags() \{}
@@ -29,10 +45,14 @@
     {{ if for_test }}
     return provider_->{item.flag_name}();
     {{ -else- }}
-    {{ if not item.readwrite- }}
-    return {item.default_value};
-    {{ -else- }}
+    {{ if item.readwrite- }}
     return provider_->{item.flag_name}();
+    {{ -else- }}
+    {{ if item.is_fixed_read_only }}
+    return {package_macro}_{item.flag_macro};
+    {{ -else- }}
+    return {item.default_value};
+    {{ -endif }}
     {{ -endif }}
     {{ -endif }}
 }
@@ -42,7 +62,7 @@
     provider_->{item.flag_name}(val);
 }
 {{ -endif }}
-{{ endfor }}
+{{ -endfor }}
 
 {{ if for_test }}
 inline void reset_flags() \{
@@ -61,7 +81,7 @@
 {{ if for_test }}
 void set_{header}_{item.flag_name}(bool val);
 {{ -endif }}
-{{ endfor - }}
+{{ -endfor }}
 
 {{ if for_test }}
 void {header}_reset_flags();
diff --git a/tools/aconfig/templates/cpp_source_file.template b/tools/aconfig/templates/cpp_source_file.template
index 5b71b31..0f1b845 100644
--- a/tools/aconfig/templates/cpp_source_file.template
+++ b/tools/aconfig/templates/cpp_source_file.template
@@ -58,7 +58,11 @@
                 "{item.device_config_flag}",
                 "{item.default_value}") == "true";
             {{ -else- }}
-                return {item.default_value};
+            {{ if item.is_fixed_read_only }}
+            return {package_macro}_{item.flag_macro};
+            {{ -else- }}
+            return {item.default_value};
+            {{ -endif }}
             {{ -endif }}
         }
         {{ endfor }}
@@ -79,10 +83,14 @@
     {{ if for_test }}
     return {cpp_namespace}::{item.flag_name}();
     {{ -else- }}
-    {{ if not item.readwrite- }}
-    return {item.default_value};
-    {{ -else- }}
+    {{ if item.readwrite- }}
     return {cpp_namespace}::{item.flag_name}();
+    {{ -else- }}
+    {{ if item.is_fixed_read_only }}
+    return {package_macro}_{item.flag_macro};
+    {{ -else- }}
+    return {item.default_value};
+    {{ -endif }}
     {{ -endif }}
     {{ -endif }}
 }
diff --git a/tools/auto_gen_test_config_test.py b/tools/auto_gen_test_config_test.py
index ce97723..b7ef0b0 100644
--- a/tools/auto_gen_test_config_test.py
+++ b/tools/auto_gen_test_config_test.py
@@ -93,7 +93,7 @@
     </target_preparer>
 
     <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
-        {EXTRA_TEST_RUNNER_CONFIGS}<option name="package" value="com.android.my.tests.x" />
+        <option name="package" value="com.android.my.tests.x" />
         <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
     </test>
 </configuration>
@@ -125,7 +125,7 @@
     </target_preparer>
 
     <test class="com.android.tradefed.testtype.InstrumentationTest" >
-        {EXTRA_TEST_RUNNER_CONFIGS}<option name="package" value="com.android.my.tests.x" />
+        <option name="package" value="com.android.my.tests.x" />
         <option name="runner" value="android.test.InstrumentationTestRunner" />
     </test>
 </configuration>
@@ -178,7 +178,7 @@
     </target_preparer>
 
     <test class="com.android.tradefed.testtype.{TEST_TYPE}" >
-        {EXTRA_TEST_RUNNER_CONFIGS}<option name="package" value="{PACKAGE}" />
+        <option name="package" value="{PACKAGE}" />
         <option name="runner" value="{RUNNER}" />
     </test>
 </configuration>
diff --git a/tools/overrideflags.sh b/tools/overrideflags.sh
new file mode 100755
index 0000000..b8605dc
--- /dev/null
+++ b/tools/overrideflags.sh
@@ -0,0 +1,99 @@
+#!/bin/bash -e
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+source $(cd $(dirname $BASH_SOURCE) &> /dev/null && pwd)/../shell_utils.sh
+require_top
+
+function print_help() {
+    echo -e "overrideflags is used to set default value for local build."
+    echo -e "\nOptions:"
+    echo -e "\t--release-config  \tPath to release configuration directory. Required"
+    echo -e "\t--no-edit         \tIf present, skip editing flag value file."
+    echo -e "\t-h/--help         \tShow this help."
+}
+
+function main() {
+    while (($# > 0)); do
+        case $1 in
+        --release-config)
+            if [[ $# -le 1 ]]; then
+                echo "--release-config requires a path"
+                return 1
+            fi
+            local release_config_dir="$2"
+            shift 2
+            ;;
+        --no-edit)
+            local no_edit="true"
+            shift 1
+            ;;
+        -h|--help)
+            print_help
+            return
+            ;;
+        *)
+            echo "$1 is unrecognized"
+            print_help
+            return 1
+            ;;
+        esac
+    done
+
+
+
+    case $(uname -s) in
+        Darwin)
+            local host_arch=darwin-x86
+            ;;
+        Linux)
+            local host_arch=linux-x86
+            ;;
+        *)
+            >&2 echo Unknown host $(uname -s)
+            return
+            ;;
+    esac
+
+    if [[ -z "${release_config_dir}" ]]; then
+        echo "Please provide release configuration path by --release-config"
+        exit 1
+    elif [ ! -d "${release_config_dir}" ]; then
+        echo "${release_config_dir} is an invalid directory"
+        exit 1
+    fi
+    local T="$(gettop)"
+    local aconfig_dir="${T}"/build/make/tools/aconfig/
+    local overrideflag_py="${aconfig_dir}"/overrideflags/overrideflags.py
+    local overridefile="${release_config_dir}/aconfig/override_values.textproto"
+
+    # Edit override file
+    if [[ -z "${no_edit}" ]]; then
+        editor="${EDITOR:-$(which vim)}"
+
+        eval "${editor} ${overridefile}"
+        if [ $? -ne 0 ]; then
+            echo "Fail to set override values"
+            return 1
+        fi
+    fi
+
+    ${T}/prebuilts/build-tools/${host_arch}/bin/py3-cmd -u "${overrideflag_py}" \
+        --overrides "${overridefile}" \
+        --out "${release_config_dir}/aconfig"
+}
+
+
+main "$@"
diff --git a/tools/protos/metadata_file.proto b/tools/protos/metadata_file.proto
index ac1129a..47562c5 100644
--- a/tools/protos/metadata_file.proto
+++ b/tools/protos/metadata_file.proto
@@ -92,6 +92,8 @@
     SBOMRef sbom_ref = 10;
   }
 
+  // Identifiers for the package.
+  repeated Identifier identifier = 11;
 }
 
 // URL associated with a third-party package.
@@ -278,4 +280,136 @@
   // https://spdx.github.io/spdx-spec/v2.3/package-information/#72-package-spdx-identifier-field or
   // https://spdx.github.io/spdx-spec/v2.3/file-information/#82-file-spdx-identifier-field
   optional string element_id = 3;
+}
+
+// Identifier for a third-package package.
+// See go/tp-metadata-id.
+message Identifier {
+  // The type of the identifier. Either an "ecosystem" value from
+  // https://ossf.github.io/osv-schema/#affectedpackage-field such as "Go",
+  // "npm" or "PyPI". The "value" and "version" fields follow the same rules as
+  // defined in the OSV spec.
+
+  // Or one of:
+  //  - "Git": The "value" field is the URL of the upstream git repository this
+  //  package is retrieved from.
+  //  For example:
+  //   - https://github.com/git/git
+  //   - git://git.kernel.org/pub/scm/git/git
+  //
+  //  Use of a git URL requires that the package "version" value must specify a
+  //  specific git tag or revision. This must not be a branch name.
+  //
+  //  - "SVN": The "value" field is the URL of the upstream SVN repository this
+  //  package is retrieved from.
+  //  For example:
+  //   - http://llvm.org/svn/llvm-project/llvm/
+  //
+  //  Use of an SVN URL requires that the package "version" value must specify
+  //  a specific SVN tag or revision. This must not be a branch name.
+  //
+  //  - "Hg": The "value" field is the URL of the upstream mercurial repository
+  //  this package is retrieved from.
+  //  For example:
+  //   - https://mercurial-scm.org/repo/evolve
+  //
+  //  Use of a mercurial URL requires that the package "version" value must
+  //  specify a specific tag or revision. This must not be a branch name.
+  //
+  //  - "Darcs": the "value" field is the URL of the upstream darcs repository
+  //  this package is retrieved from.
+  //  For example:
+  //   - https://hub.darcs.net/hu.dwim/hu.dwim.util
+  //
+  //  Use of a Darcs URL requires that the package "version" value must
+  //  specify a specific tag or revision. This must not be a branch name.
+  //
+  //  - "Piper": The "value" field is the URL of the upstream piper location.
+  //  This is primarily used when a package is being migrated into third_party
+  //  from elsewhere in Piper, or when a package is being newly developed in
+  //  third_party.
+  //
+  //  - "VCS": This is a generic fallback for an unlisted VCS system. The
+  // "value" field is the URL of the repository for this VCS.
+  //
+  //  - "Archive": The "value" field is the URL of the archive containing the
+  //  source code for the package, for example a zip or tgz file.
+  //
+  //  - "PrebuiltByAlphabet": This type should be used for archives of primarily
+  //  Google-owned source code (may contain non-Google-owned dependencies),
+  //  which has been built using production Google infrastructure, and copied
+  //  into third_party.
+  //
+  //  - "LocalSource": The "value" field is the URL identifying where the local
+  //  copy of the package source code can be found.
+  //  Examples:
+  //   - https://android.googlesource.com/platform/external/apache-http/
+  //
+  //  Typically, the metadata files describing a package reside in the same
+  //  directory as the source code for the package. In a few rare cases where
+  //  they are separate, the LocalSource URL identifies where to find the
+  //  source code. This only describes where to find the local copy of the
+  //  source; there should always be an additional URL describing where the
+  //  package was retrieved from.
+  //
+  //  - "Other": An identifier that does not fit any other type. This may also
+  //  indicate that the Source code was received via email or some other
+  //  out-of-band way. This is most commonly used with commercial software
+  //  received directly from the Vendor. In the case of email, the "value" field
+  //  can be used to provide additional information about how it was received.
+  optional string type = 1;
+
+  // A human readable string to indicate why a third-package package does not
+  // have this identifier type set.
+  // Example:
+  //   identifier {
+  //     type: "PyPI"
+  //     omission_reason: "Only on Git. Not published to PyPI."
+  //   }
+  optional string omission_reason = 2;
+
+  // The value of the package identifier as defined by the "type".
+  // Example:
+  //  identifier {
+  //    type: "PyPI"
+  //    value: "django"
+  //    version: "3.2.8"
+  //  }
+  optional string value = 3;
+
+  // The version associated with this package as defined by the "type".
+  // Example:
+  //  identifier {
+  //    type: "PyPI"
+  //    value: "django"
+  //    version: "3.2.8"
+  //  }
+  optional string version = 4;
+
+  // The closest version associated with this package as defined by the "type".
+  // This should only be set by automated infrastructure by applying automated
+  // heuristics, such as the closest git tag or package version from a package
+  // manifest file (e.g. pom.xml).
+  //
+  // For most identifier types, only one of `version` or `closest_version`
+  // should be set (not both). The exception is source repository types such as
+  // "Git", where `version` will refer to a git commit, and `closest_version`
+  // refers to a git tag.
+  // Example:
+  //  identifier {
+  //    type: "Git",
+  //    value: "https://github.com/my/repo"
+  //    version: "e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e"
+  //    closest_version: "v1.4"
+  //  }
+  optional string closest_version = 5;
+
+  // When `true`, this Identifier represents the location from which the source
+  // code for this package was originally obtained. This should only be set for
+  // *one* Identifier in a third_party package's METADATA.
+
+  // For external packages, this is typically for the Identifier associated
+  // with the version control system or package manager that was used to
+  // check out or download the code.
+  optional bool primary_source = 6;
 }
\ No newline at end of file
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 971518a..ad014af 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -235,6 +235,9 @@
         "rangelib.py",
         "sparse_img.py",
     ],
+    data: [
+        ":zip2zip",
+    ],
     // Only the tools that are referenced directly are listed as required modules. For example,
     // `avbtool` is not here, as the script always uses the one from info_dict['avb_avbtool'].
     required: [
@@ -249,7 +252,6 @@
         "signapk",
         "toybox",
         "unpack_bootimg",
-        "zip2zip",
     ],
 }
 
@@ -481,8 +483,13 @@
     defaults: ["releasetools_binary_defaults"],
     srcs: [
         "make_recovery_patch.py",
+        "non_ab_ota.py",
+        "edify_generator.py",
+        "check_target_files_vintf.py",
     ],
     libs: [
+        "ota_utils_lib",
+        "ota_metadata_proto",
         "releasetools_common",
     ],
 }
diff --git a/tools/releasetools/apex_utils.py b/tools/releasetools/apex_utils.py
index bfc87b8..1ddffc1 100644
--- a/tools/releasetools/apex_utils.py
+++ b/tools/releasetools/apex_utils.py
@@ -68,7 +68,7 @@
     self.avbtool = avbtool if avbtool else "avbtool"
     self.sign_tool = sign_tool
 
-  def ProcessApexFile(self, apk_keys, payload_key, signing_args=None, is_sepolicy=False):
+  def ProcessApexFile(self, apk_keys, payload_key, signing_args=None):
     """Scans and signs the payload files and repack the apex
 
     Args:
@@ -86,13 +86,9 @@
                 'list', self.apex_path]
     entries_names = common.RunAndCheckOutput(list_cmd).split()
     apk_entries = [name for name in entries_names if name.endswith('.apk')]
-    sepolicy_entries = []
-    if is_sepolicy:
-      sepolicy_entries = [name for name in entries_names if
-          name.startswith('./etc/SEPolicy') and name.endswith('.zip')]
 
     # No need to sign and repack, return the original apex path.
-    if not apk_entries and not sepolicy_entries and self.sign_tool is None:
+    if not apk_entries and self.sign_tool is None:
       logger.info('No apk file to sign in %s', self.apex_path)
       return self.apex_path
 
@@ -108,14 +104,14 @@
                        ' %s', entry)
 
     payload_dir, has_signed_content = self.ExtractApexPayloadAndSignContents(
-        apk_entries, sepolicy_entries, apk_keys, payload_key, signing_args)
+        apk_entries, apk_keys, payload_key, signing_args)
     if not has_signed_content:
-      logger.info('No contents have been signed in %s', self.apex_path)
+      logger.info('No contents has been signed in %s', self.apex_path)
       return self.apex_path
 
     return self.RepackApexPayload(payload_dir, payload_key, signing_args)
 
-  def ExtractApexPayloadAndSignContents(self, apk_entries, sepolicy_entries, apk_keys, payload_key, signing_args):
+  def ExtractApexPayloadAndSignContents(self, apk_entries, apk_keys, payload_key, signing_args):
     """Extracts the payload image and signs the containing apk files."""
     if not os.path.exists(self.debugfs_path):
       raise ApexSigningError(
@@ -133,11 +129,11 @@
                    'extract',
                    self.apex_path, payload_dir]
     common.RunAndCheckOutput(extract_cmd)
-    assert os.path.exists(self.apex_path)
 
     has_signed_content = False
     for entry in apk_entries:
       apk_path = os.path.join(payload_dir, entry)
+      assert os.path.exists(self.apex_path)
 
       key_name = apk_keys.get(os.path.basename(entry))
       if key_name in common.SPECIAL_CERT_STRINGS:
@@ -154,37 +150,6 @@
           codename_to_api_level_map=self.codename_to_api_level_map)
       has_signed_content = True
 
-    for entry in sepolicy_entries:
-      sepolicy_path = os.path.join(payload_dir, entry)
-
-      if not 'etc' in entry:
-        logger.warning('Sepolicy path does not contain the intended directory name etc:'
-                       ' %s', entry)
-
-      key_name = apk_keys.get(os.path.basename(entry))
-      if key_name is None:
-        logger.warning('Failed to find signing keys for {} in'
-                       ' apex {}, payload key will be used instead.'
-                       ' Use "-e <name>=" to specify a key'
-                       .format(entry, self.apex_path))
-        key_name = payload_key
-
-      if key_name in common.SPECIAL_CERT_STRINGS:
-        logger.info('Not signing: %s due to special cert string', sepolicy_path)
-        continue
-
-      if OPTIONS.sign_sepolicy_path is not None:
-        sig_path = os.path.join(payload_dir, sepolicy_path + '.sig')
-        fsv_sig_path = os.path.join(payload_dir, sepolicy_path + '.fsv_sig')
-        old_sig = common.MakeTempFile()
-        old_fsv_sig = common.MakeTempFile()
-        os.rename(sig_path, old_sig)
-        os.rename(fsv_sig_path, old_fsv_sig)
-
-      logger.info('Signing sepolicy file %s in apex %s', sepolicy_path, self.apex_path)
-      if common.SignSePolicy(sepolicy_path, key_name, self.key_passwords.get(key_name)):
-        has_signed_content = True
-
     if self.sign_tool:
       logger.info('Signing payload contents in apex %s with %s', self.apex_path, self.sign_tool)
       # Pass avbtool to the custom signing tool
@@ -368,8 +333,7 @@
 
 def SignUncompressedApex(avbtool, apex_file, payload_key, container_key,
                          container_pw, apk_keys, codename_to_api_level_map,
-                         no_hashtree, signing_args=None, sign_tool=None,
-                         is_sepolicy=False):
+                         no_hashtree, signing_args=None, sign_tool=None):
   """Signs the current uncompressed APEX with the given payload/container keys.
 
   Args:
@@ -382,7 +346,6 @@
     no_hashtree: Don't include hashtree in the signed APEX.
     signing_args: Additional args to be passed to the payload signer.
     sign_tool: A tool to sign the contents of the APEX.
-    is_sepolicy: Indicates if the apex is a sepolicy.apex
 
   Returns:
     The path to the signed APEX file.
@@ -392,8 +355,7 @@
   apk_signer = ApexApkSigner(apex_file, container_pw,
                              codename_to_api_level_map,
                              avbtool, sign_tool)
-  apex_file = apk_signer.ProcessApexFile(
-      apk_keys, payload_key, signing_args, is_sepolicy)
+  apex_file = apk_signer.ProcessApexFile(apk_keys, payload_key, signing_args)
 
   # 2a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given
   # payload_key.
@@ -447,8 +409,7 @@
 
 def SignCompressedApex(avbtool, apex_file, payload_key, container_key,
                        container_pw, apk_keys, codename_to_api_level_map,
-                       no_hashtree, signing_args=None, sign_tool=None,
-                       is_sepolicy=False):
+                       no_hashtree, signing_args=None, sign_tool=None):
   """Signs the current compressed APEX with the given payload/container keys.
 
   Args:
@@ -460,7 +421,6 @@
     codename_to_api_level_map: A dict that maps from codename to API level.
     no_hashtree: Don't include hashtree in the signed APEX.
     signing_args: Additional args to be passed to the payload signer.
-    is_sepolicy: Indicates if the apex is a sepolicy.apex
 
   Returns:
     The path to the signed APEX file.
@@ -487,8 +447,7 @@
       codename_to_api_level_map,
       no_hashtree,
       signing_args,
-      sign_tool,
-      is_sepolicy)
+      sign_tool)
 
   # 3. Compress signed original apex.
   compressed_apex_file = common.MakeTempFile(prefix='apex-container-',
@@ -515,8 +474,8 @@
 
 
 def SignApex(avbtool, apex_data, payload_key, container_key, container_pw,
-             apk_keys, codename_to_api_level_map, no_hashtree,
-             signing_args=None, sign_tool=None, is_sepolicy=False):
+             apk_keys, codename_to_api_level_map,
+             no_hashtree, signing_args=None, sign_tool=None):
   """Signs the current APEX with the given payload/container keys.
 
   Args:
@@ -528,7 +487,6 @@
     codename_to_api_level_map: A dict that maps from codename to API level.
     no_hashtree: Don't include hashtree in the signed APEX.
     signing_args: Additional args to be passed to the payload signer.
-    is_sepolicy: Indicates if the apex is a sepolicy.apex
 
   Returns:
     The path to the signed APEX file.
@@ -554,8 +512,7 @@
           no_hashtree=no_hashtree,
           apk_keys=apk_keys,
           signing_args=signing_args,
-          sign_tool=sign_tool,
-          is_sepolicy=is_sepolicy)
+          sign_tool=sign_tool)
     elif apex_type == 'COMPRESSED':
       return SignCompressedApex(
           avbtool,
@@ -567,8 +524,7 @@
           no_hashtree=no_hashtree,
           apk_keys=apk_keys,
           signing_args=signing_args,
-          sign_tool=sign_tool,
-          is_sepolicy=is_sepolicy)
+          sign_tool=sign_tool)
     else:
       # TODO(b/172912232): support signing compressed apex
       raise ApexInfoError('Unsupported apex type {}'.format(apex_type))
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 5e4130c..34b7172 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -353,6 +353,8 @@
     if compress_hints:
       build_command.extend(["--compress-hints", compress_hints])
 
+    build_command.extend(["-b", prop_dict.get("erofs_blocksize", "4096")])
+
     build_command.extend(["--mount-point", prop_dict["mount_point"]])
     if target_out:
       build_command.extend(["--product-out", target_out])
@@ -711,6 +713,7 @@
       "erofs_default_compressor",
       "erofs_default_compress_hints",
       "erofs_pcluster_size",
+      "erofs_blocksize",
       "erofs_share_dup_blocks",
       "erofs_sparse_flag",
       "erofs_use_legacy_compression",
@@ -762,6 +765,7 @@
       (True, "{}_erofs_compressor", "erofs_compressor"),
       (True, "{}_erofs_compress_hints", "erofs_compress_hints"),
       (True, "{}_erofs_pcluster_size", "erofs_pcluster_size"),
+      (True, "{}_erofs_blocksize", "erofs_blocksize"),
       (True, "{}_erofs_share_dup_blocks", "erofs_share_dup_blocks"),
       (True, "{}_extfs_inode_count", "extfs_inode_count"),
       (True, "{}_f2fs_compress", "f2fs_compress"),
diff --git a/tools/releasetools/check_target_files_vintf.py b/tools/releasetools/check_target_files_vintf.py
index 33624f5..e7d3a18 100755
--- a/tools/releasetools/check_target_files_vintf.py
+++ b/tools/releasetools/check_target_files_vintf.py
@@ -31,7 +31,6 @@
 import zipfile
 
 import common
-from apex_manifest import ParseApexManifest
 
 logger = logging.getLogger(__name__)
 
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 0f3c430..d97611c 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -15,7 +15,6 @@
 from __future__ import print_function
 
 import base64
-import collections
 import copy
 import datetime
 import errno
@@ -23,7 +22,6 @@
 import getopt
 import getpass
 import gzip
-import imp
 import json
 import logging
 import logging.config
@@ -36,17 +34,13 @@
 import stat
 import sys
 import tempfile
-import threading
-import time
 import zipfile
 from dataclasses import dataclass
-from genericpath import isdir
 from hashlib import sha1, sha256
 
 import images
-import rangelib
 import sparse_img
-from blockimgdiff import BlockImageDiff
+
 
 logger = logging.getLogger(__name__)
 
@@ -75,9 +69,7 @@
       if "ANDROID_HOST_OUT" in os.environ:
         self.search_path = os.environ["ANDROID_HOST_OUT"]
     self.signapk_shared_library_path = "lib64"   # Relative to search_path
-    self.sign_sepolicy_path = None
     self.extra_signapk_args = []
-    self.extra_sign_sepolicy_args = []
     self.aapt2_path = "aapt2"
     self.java_path = "java"  # Use the one on the path by default.
     self.java_args = ["-Xmx4096m"]  # The default JVM args.
@@ -97,7 +89,6 @@
     self.cache_size = None
     self.stash_threshold = 0.8
     self.logfile = None
-    self.sepolicy_name = 'sepolicy.apex'
 
 
 OPTIONS = Options()
@@ -158,35 +149,6 @@
         self.partition, self.rollback_index_location, self.pubkey_path)
 
 
-class ErrorCode(object):
-  """Define error_codes for failures that happen during the actual
-  update package installation.
-
-  Error codes 0-999 are reserved for failures before the package
-  installation (i.e. low battery, package verification failure).
-  Detailed code in 'bootable/recovery/error_code.h' """
-
-  SYSTEM_VERIFICATION_FAILURE = 1000
-  SYSTEM_UPDATE_FAILURE = 1001
-  SYSTEM_UNEXPECTED_CONTENTS = 1002
-  SYSTEM_NONZERO_CONTENTS = 1003
-  SYSTEM_RECOVER_FAILURE = 1004
-  VENDOR_VERIFICATION_FAILURE = 2000
-  VENDOR_UPDATE_FAILURE = 2001
-  VENDOR_UNEXPECTED_CONTENTS = 2002
-  VENDOR_NONZERO_CONTENTS = 2003
-  VENDOR_RECOVER_FAILURE = 2004
-  OEM_PROP_MISMATCH = 3000
-  FINGERPRINT_MISMATCH = 3001
-  THUMBPRINT_MISMATCH = 3002
-  OLDER_BUILD = 3003
-  DEVICE_MISMATCH = 3004
-  BAD_PATCH_FILE = 3005
-  INSUFFICIENT_CACHE_SPACE = 3006
-  TUNE_PARTITION_FAILURE = 3007
-  APPLY_PATCH_FAILURE = 3008
-
-
 class ExternalError(RuntimeError):
   pass
 
@@ -213,7 +175,7 @@
           '': {
               'handlers': ['default'],
               'propagate': True,
-              'level': 'WARNING',
+              'level': 'NOTSET',
           }
       }
   }
@@ -1238,26 +1200,16 @@
   system_root_image = info_dict.get('system_root_image') == 'true'
   if info_dict.get('no_recovery') != 'true':
     recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab'
-    if isinstance(input_file, zipfile.ZipFile):
-      if recovery_fstab_path not in input_file.namelist():
-        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
-    else:
-      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
-      if not os.path.exists(path):
-        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
+    if not DoesInputFileContain(input_file, recovery_fstab_path):
+      recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
     return LoadRecoveryFSTab(
         read_helper, info_dict['fstab_version'], recovery_fstab_path,
         system_root_image)
 
   if info_dict.get('recovery_as_boot') == 'true':
     recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab'
-    if isinstance(input_file, zipfile.ZipFile):
-      if recovery_fstab_path not in input_file.namelist():
-        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
-    else:
-      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
-      if not os.path.exists(path):
-        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
+    if not DoesInputFileContain(input_file, recovery_fstab_path):
+      recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
     return LoadRecoveryFSTab(
         read_helper, info_dict['fstab_version'], recovery_fstab_path,
         system_root_image)
@@ -1950,7 +1902,15 @@
     cmd = [avbtool, "add_hash_footer", "--image", image_path,
            "--partition_size", str(part_size), "--partition_name",
            partition_name]
-    AppendAVBSigningArgs(cmd, partition_name)
+    # Use sha256 of the kernel as salt for reproducible builds
+    with tempfile.TemporaryDirectory() as tmpdir:
+      RunAndCheckOutput(["unpack_bootimg", "--boot_img", image_path, "--out", tmpdir])
+      for filename in ["kernel", "ramdisk", "vendor_ramdisk00"]:
+        path = os.path.join(tmpdir, filename)
+        if os.path.exists(path) and os.path.getsize(path):
+          with open(path, "rb") as fp:
+            salt = sha256(fp.read()).hexdigest()
+    AppendAVBSigningArgs(cmd, partition_name, salt)
     args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
     if args and args.strip():
       split_args = ResolveAVBSigningPathArgs(shlex.split(args))
@@ -2629,38 +2589,6 @@
                                                        proc.returncode, stdoutdata))
 
 
-def SignSePolicy(sepolicy, key, password):
-  """Sign the sepolicy zip, producing an fsverity .fsv_sig and
-  an RSA .sig signature files.
-  """
-
-  if OPTIONS.sign_sepolicy_path is None:
-    logger.info("No sign_sepolicy_path specified, %s was not signed", sepolicy)
-    return False
-
-  java_library_path = os.path.join(
-      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
-
-  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
-         ["-Djava.library.path=" + java_library_path,
-          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.sign_sepolicy_path)] +
-         OPTIONS.extra_sign_sepolicy_args)
-
-  cmd.extend([key + OPTIONS.public_key_suffix,
-              key + OPTIONS.private_key_suffix,
-              sepolicy, os.path.dirname(sepolicy)])
-
-  proc = Run(cmd, stdin=subprocess.PIPE)
-  if password is not None:
-    password += "\n"
-  stdoutdata, _ = proc.communicate(password)
-  if proc.returncode != 0:
-    raise ExternalError(
-        "Failed to run sign sepolicy: return code {}:\n{}".format(
-            proc.returncode, stdoutdata))
-  return True
-
-
 def CheckSize(data, target, info_dict):
   """Checks the data string passed against the max size limit.
 
@@ -2836,8 +2764,7 @@
     opts, args = getopt.getopt(
         argv, "hvp:s:x:" + extra_opts,
         ["help", "verbose", "path=", "signapk_path=",
-         "signapk_shared_library_path=", "extra_signapk_args=",
-         "sign_sepolicy_path=", "extra_sign_sepolicy_args=", "aapt2_path=",
+         "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=",
          "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
          "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
          "verity_signer_path=", "verity_signer_args=", "device_specific=",
@@ -2861,10 +2788,6 @@
       OPTIONS.signapk_shared_library_path = a
     elif o in ("--extra_signapk_args",):
       OPTIONS.extra_signapk_args = shlex.split(a)
-    elif o in ("--sign_sepolicy_path",):
-      OPTIONS.sign_sepolicy_path = a
-    elif o in ("--extra_sign_sepolicy_args",):
-      OPTIONS.extra_sign_sepolicy_args = shlex.split(a)
     elif o in ("--aapt2_path",):
       OPTIONS.aapt2_path = a
     elif o in ("--java_path",):
@@ -3176,107 +3099,6 @@
   zipfile.ZIP64_LIMIT = saved_zip64_limit
 
 
-class DeviceSpecificParams(object):
-  module = None
-
-  def __init__(self, **kwargs):
-    """Keyword arguments to the constructor become attributes of this
-    object, which is passed to all functions in the device-specific
-    module."""
-    for k, v in kwargs.items():
-      setattr(self, k, v)
-    self.extras = OPTIONS.extras
-
-    if self.module is None:
-      path = OPTIONS.device_specific
-      if not path:
-        return
-      try:
-        if os.path.isdir(path):
-          info = imp.find_module("releasetools", [path])
-        else:
-          d, f = os.path.split(path)
-          b, x = os.path.splitext(f)
-          if x == ".py":
-            f = b
-          info = imp.find_module(f, [d])
-        logger.info("loaded device-specific extensions from %s", path)
-        self.module = imp.load_module("device_specific", *info)
-      except ImportError:
-        logger.info("unable to load device-specific module; assuming none")
-
-  def _DoCall(self, function_name, *args, **kwargs):
-    """Call the named function in the device-specific module, passing
-    the given args and kwargs.  The first argument to the call will be
-    the DeviceSpecific object itself.  If there is no module, or the
-    module does not define the function, return the value of the
-    'default' kwarg (which itself defaults to None)."""
-    if self.module is None or not hasattr(self.module, function_name):
-      return kwargs.get("default")
-    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
-
-  def FullOTA_Assertions(self):
-    """Called after emitting the block of assertions at the top of a
-    full OTA package.  Implementations can add whatever additional
-    assertions they like."""
-    return self._DoCall("FullOTA_Assertions")
-
-  def FullOTA_InstallBegin(self):
-    """Called at the start of full OTA installation."""
-    return self._DoCall("FullOTA_InstallBegin")
-
-  def FullOTA_GetBlockDifferences(self):
-    """Called during full OTA installation and verification.
-    Implementation should return a list of BlockDifference objects describing
-    the update on each additional partitions.
-    """
-    return self._DoCall("FullOTA_GetBlockDifferences")
-
-  def FullOTA_InstallEnd(self):
-    """Called at the end of full OTA installation; typically this is
-    used to install the image for the device's baseband processor."""
-    return self._DoCall("FullOTA_InstallEnd")
-
-  def IncrementalOTA_Assertions(self):
-    """Called after emitting the block of assertions at the top of an
-    incremental OTA package.  Implementations can add whatever
-    additional assertions they like."""
-    return self._DoCall("IncrementalOTA_Assertions")
-
-  def IncrementalOTA_VerifyBegin(self):
-    """Called at the start of the verification phase of incremental
-    OTA installation; additional checks can be placed here to abort
-    the script before any changes are made."""
-    return self._DoCall("IncrementalOTA_VerifyBegin")
-
-  def IncrementalOTA_VerifyEnd(self):
-    """Called at the end of the verification phase of incremental OTA
-    installation; additional checks can be placed here to abort the
-    script before any changes are made."""
-    return self._DoCall("IncrementalOTA_VerifyEnd")
-
-  def IncrementalOTA_InstallBegin(self):
-    """Called at the start of incremental OTA installation (after
-    verification is complete)."""
-    return self._DoCall("IncrementalOTA_InstallBegin")
-
-  def IncrementalOTA_GetBlockDifferences(self):
-    """Called during incremental OTA installation and verification.
-    Implementation should return a list of BlockDifference objects describing
-    the update on each additional partitions.
-    """
-    return self._DoCall("IncrementalOTA_GetBlockDifferences")
-
-  def IncrementalOTA_InstallEnd(self):
-    """Called at the end of incremental OTA installation; typically
-    this is used to install the image for the device's baseband
-    processor."""
-    return self._DoCall("IncrementalOTA_InstallEnd")
-
-  def VerifyOTA_Assertions(self):
-    return self._DoCall("VerifyOTA_Assertions")
-
-
 class File(object):
   def __init__(self, name, data, compress_size=None):
     self.name = name
@@ -3306,454 +3128,11 @@
     ZipWriteStr(z, self.name, self.data, compress_type=compression)
 
 
-DIFF_PROGRAM_BY_EXT = {
-    ".gz": "imgdiff",
-    ".zip": ["imgdiff", "-z"],
-    ".jar": ["imgdiff", "-z"],
-    ".apk": ["imgdiff", "-z"],
-    ".img": "imgdiff",
-}
-
-
-class Difference(object):
-  def __init__(self, tf, sf, diff_program=None):
-    self.tf = tf
-    self.sf = sf
-    self.patch = None
-    self.diff_program = diff_program
-
-  def ComputePatch(self):
-    """Compute the patch (as a string of data) needed to turn sf into
-    tf.  Returns the same tuple as GetPatch()."""
-
-    tf = self.tf
-    sf = self.sf
-
-    if self.diff_program:
-      diff_program = self.diff_program
-    else:
-      ext = os.path.splitext(tf.name)[1]
-      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
-
-    ttemp = tf.WriteToTemp()
-    stemp = sf.WriteToTemp()
-
-    ext = os.path.splitext(tf.name)[1]
-
-    try:
-      ptemp = tempfile.NamedTemporaryFile()
-      if isinstance(diff_program, list):
-        cmd = copy.copy(diff_program)
-      else:
-        cmd = [diff_program]
-      cmd.append(stemp.name)
-      cmd.append(ttemp.name)
-      cmd.append(ptemp.name)
-      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-      err = []
-
-      def run():
-        _, e = p.communicate()
-        if e:
-          err.append(e)
-      th = threading.Thread(target=run)
-      th.start()
-      th.join(timeout=300)   # 5 mins
-      if th.is_alive():
-        logger.warning("diff command timed out")
-        p.terminate()
-        th.join(5)
-        if th.is_alive():
-          p.kill()
-          th.join()
-
-      if p.returncode != 0:
-        logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
-        self.patch = None
-        return None, None, None
-      diff = ptemp.read()
-    finally:
-      ptemp.close()
-      stemp.close()
-      ttemp.close()
-
-    self.patch = diff
-    return self.tf, self.sf, self.patch
-
-  def GetPatch(self):
-    """Returns a tuple of (target_file, source_file, patch_data).
-
-    patch_data may be None if ComputePatch hasn't been called, or if
-    computing the patch failed.
-    """
-    return self.tf, self.sf, self.patch
-
-
-def ComputeDifferences(diffs):
-  """Call ComputePatch on all the Difference objects in 'diffs'."""
-  logger.info("%d diffs to compute", len(diffs))
-
-  # Do the largest files first, to try and reduce the long-pole effect.
-  by_size = [(i.tf.size, i) for i in diffs]
-  by_size.sort(reverse=True)
-  by_size = [i[1] for i in by_size]
-
-  lock = threading.Lock()
-  diff_iter = iter(by_size)   # accessed under lock
-
-  def worker():
-    try:
-      lock.acquire()
-      for d in diff_iter:
-        lock.release()
-        start = time.time()
-        d.ComputePatch()
-        dur = time.time() - start
-        lock.acquire()
-
-        tf, sf, patch = d.GetPatch()
-        if sf.name == tf.name:
-          name = tf.name
-        else:
-          name = "%s (%s)" % (tf.name, sf.name)
-        if patch is None:
-          logger.error("patching failed! %40s", name)
-        else:
-          logger.info(
-              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
-              tf.size, 100.0 * len(patch) / tf.size, name)
-      lock.release()
-    except Exception:
-      logger.exception("Failed to compute diff from worker")
-      raise
-
-  # start worker threads; wait for them all to finish.
-  threads = [threading.Thread(target=worker)
-             for i in range(OPTIONS.worker_threads)]
-  for th in threads:
-    th.start()
-  while threads:
-    threads.pop().join()
-
-
-class BlockDifference(object):
-  def __init__(self, partition, tgt, src=None, check_first_block=False,
-               version=None, disable_imgdiff=False):
-    self.tgt = tgt
-    self.src = src
-    self.partition = partition
-    self.check_first_block = check_first_block
-    self.disable_imgdiff = disable_imgdiff
-
-    if version is None:
-      version = max(
-          int(i) for i in
-          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
-    assert version >= 3
-    self.version = version
-
-    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
-                       version=self.version,
-                       disable_imgdiff=self.disable_imgdiff)
-    self.path = os.path.join(MakeTempDir(), partition)
-    b.Compute(self.path)
-    self._required_cache = b.max_stashed_size
-    self.touched_src_ranges = b.touched_src_ranges
-    self.touched_src_sha1 = b.touched_src_sha1
-
-    # On devices with dynamic partitions, for new partitions,
-    # src is None but OPTIONS.source_info_dict is not.
-    if OPTIONS.source_info_dict is None:
-      is_dynamic_build = OPTIONS.info_dict.get(
-          "use_dynamic_partitions") == "true"
-      is_dynamic_source = False
-    else:
-      is_dynamic_build = OPTIONS.source_info_dict.get(
-          "use_dynamic_partitions") == "true"
-      is_dynamic_source = partition in shlex.split(
-          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
-
-    is_dynamic_target = partition in shlex.split(
-        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
-
-    # For dynamic partitions builds, check partition list in both source
-    # and target build because new partitions may be added, and existing
-    # partitions may be removed.
-    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
-
-    if is_dynamic:
-      self.device = 'map_partition("%s")' % partition
-    else:
-      if OPTIONS.source_info_dict is None:
-        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
-                                              OPTIONS.info_dict)
-      else:
-        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
-                                              OPTIONS.source_info_dict)
-      self.device = device_expr
-
-  @property
-  def required_cache(self):
-    return self._required_cache
-
-  def WriteScript(self, script, output_zip, progress=None,
-                  write_verify_script=False):
-    if not self.src:
-      # write the output unconditionally
-      script.Print("Patching %s image unconditionally..." % (self.partition,))
-    else:
-      script.Print("Patching %s image after verification." % (self.partition,))
-
-    if progress:
-      script.ShowProgress(progress, 0)
-    self._WriteUpdate(script, output_zip)
-
-    if write_verify_script:
-      self.WritePostInstallVerifyScript(script)
-
-  def WriteStrictVerifyScript(self, script):
-    """Verify all the blocks in the care_map, including clobbered blocks.
-
-    This differs from the WriteVerifyScript() function: a) it prints different
-    error messages; b) it doesn't allow half-way updated images to pass the
-    verification."""
-
-    partition = self.partition
-    script.Print("Verifying %s..." % (partition,))
-    ranges = self.tgt.care_map
-    ranges_str = ranges.to_string_raw()
-    script.AppendExtra(
-        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
-        'ui_print("%s has unexpected contents.");' % (
-            self.device, ranges_str,
-            self.tgt.TotalSha1(include_clobbered_blocks=True),
-            self.partition))
-    script.AppendExtra("")
-
-  def WriteVerifyScript(self, script, touched_blocks_only=False):
-    partition = self.partition
-
-    # full OTA
-    if not self.src:
-      script.Print("Image %s will be patched unconditionally." % (partition,))
-
-    # incremental OTA
-    else:
-      if touched_blocks_only:
-        ranges = self.touched_src_ranges
-        expected_sha1 = self.touched_src_sha1
-      else:
-        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
-        expected_sha1 = self.src.TotalSha1()
-
-      # No blocks to be checked, skipping.
-      if not ranges:
-        return
-
-      ranges_str = ranges.to_string_raw()
-      script.AppendExtra(
-          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
-          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
-          '"%s.patch.dat")) then' % (
-              self.device, ranges_str, expected_sha1,
-              self.device, partition, partition, partition))
-      script.Print('Verified %s image...' % (partition,))
-      script.AppendExtra('else')
-
-      if self.version >= 4:
-
-        # Bug: 21124327
-        # When generating incrementals for the system and vendor partitions in
-        # version 4 or newer, explicitly check the first block (which contains
-        # the superblock) of the partition to see if it's what we expect. If
-        # this check fails, give an explicit log message about the partition
-        # having been remounted R/W (the most likely explanation).
-        if self.check_first_block:
-          script.AppendExtra('check_first_block(%s);' % (self.device,))
-
-        # If version >= 4, try block recovery before abort update
-        if partition == "system":
-          code = ErrorCode.SYSTEM_RECOVER_FAILURE
-        else:
-          code = ErrorCode.VENDOR_RECOVER_FAILURE
-        script.AppendExtra((
-            'ifelse (block_image_recover({device}, "{ranges}") && '
-            'block_image_verify({device}, '
-            'package_extract_file("{partition}.transfer.list"), '
-            '"{partition}.new.dat", "{partition}.patch.dat"), '
-            'ui_print("{partition} recovered successfully."), '
-            'abort("E{code}: {partition} partition fails to recover"));\n'
-            'endif;').format(device=self.device, ranges=ranges_str,
-                             partition=partition, code=code))
-
-      # Abort the OTA update. Note that the incremental OTA cannot be applied
-      # even if it may match the checksum of the target partition.
-      # a) If version < 3, operations like move and erase will make changes
-      #    unconditionally and damage the partition.
-      # b) If version >= 3, it won't even reach here.
-      else:
-        if partition == "system":
-          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
-        else:
-          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
-        script.AppendExtra((
-            'abort("E%d: %s partition has unexpected contents");\n'
-            'endif;') % (code, partition))
-
-  def WritePostInstallVerifyScript(self, script):
-    partition = self.partition
-    script.Print('Verifying the updated %s image...' % (partition,))
-    # Unlike pre-install verification, clobbered_blocks should not be ignored.
-    ranges = self.tgt.care_map
-    ranges_str = ranges.to_string_raw()
-    script.AppendExtra(
-        'if range_sha1(%s, "%s") == "%s" then' % (
-            self.device, ranges_str,
-            self.tgt.TotalSha1(include_clobbered_blocks=True)))
-
-    # Bug: 20881595
-    # Verify that extended blocks are really zeroed out.
-    if self.tgt.extended:
-      ranges_str = self.tgt.extended.to_string_raw()
-      script.AppendExtra(
-          'if range_sha1(%s, "%s") == "%s" then' % (
-              self.device, ranges_str,
-              self._HashZeroBlocks(self.tgt.extended.size())))
-      script.Print('Verified the updated %s image.' % (partition,))
-      if partition == "system":
-        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
-      else:
-        code = ErrorCode.VENDOR_NONZERO_CONTENTS
-      script.AppendExtra(
-          'else\n'
-          '  abort("E%d: %s partition has unexpected non-zero contents after '
-          'OTA update");\n'
-          'endif;' % (code, partition))
-    else:
-      script.Print('Verified the updated %s image.' % (partition,))
-
-    if partition == "system":
-      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
-    else:
-      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
-
-    script.AppendExtra(
-        'else\n'
-        '  abort("E%d: %s partition has unexpected contents after OTA '
-        'update");\n'
-        'endif;' % (code, partition))
-
-  def _WriteUpdate(self, script, output_zip):
-    ZipWrite(output_zip,
-             '{}.transfer.list'.format(self.path),
-             '{}.transfer.list'.format(self.partition))
-
-    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
-    # its size. Quailty 9 almost triples the compression time but doesn't
-    # further reduce the size too much. For a typical 1.8G system.new.dat
-    #                       zip  | brotli(quality 6)  | brotli(quality 9)
-    #   compressed_size:    942M | 869M (~8% reduced) | 854M
-    #   compression_time:   75s  | 265s               | 719s
-    #   decompression_time: 15s  | 25s                | 25s
-
-    if not self.src:
-      brotli_cmd = ['brotli', '--quality=6',
-                    '--output={}.new.dat.br'.format(self.path),
-                    '{}.new.dat'.format(self.path)]
-      print("Compressing {}.new.dat with brotli".format(self.partition))
-      RunAndCheckOutput(brotli_cmd)
-
-      new_data_name = '{}.new.dat.br'.format(self.partition)
-      ZipWrite(output_zip,
-               '{}.new.dat.br'.format(self.path),
-               new_data_name,
-               compress_type=zipfile.ZIP_STORED)
-    else:
-      new_data_name = '{}.new.dat'.format(self.partition)
-      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
-
-    ZipWrite(output_zip,
-             '{}.patch.dat'.format(self.path),
-             '{}.patch.dat'.format(self.partition),
-             compress_type=zipfile.ZIP_STORED)
-
-    if self.partition == "system":
-      code = ErrorCode.SYSTEM_UPDATE_FAILURE
-    else:
-      code = ErrorCode.VENDOR_UPDATE_FAILURE
-
-    call = ('block_image_update({device}, '
-            'package_extract_file("{partition}.transfer.list"), '
-            '"{new_data_name}", "{partition}.patch.dat") ||\n'
-            '  abort("E{code}: Failed to update {partition} image.");'.format(
-                device=self.device, partition=self.partition,
-                new_data_name=new_data_name, code=code))
-    script.AppendExtra(script.WordWrap(call))
-
-  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
-    data = source.ReadRangeSet(ranges)
-    ctx = sha1()
-
-    for p in data:
-      ctx.update(p)
-
-    return ctx.hexdigest()
-
-  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
-    """Return the hash value for all zero blocks."""
-    zero_block = '\x00' * 4096
-    ctx = sha1()
-    for _ in range(num_blocks):
-      ctx.update(zero_block)
-
-    return ctx.hexdigest()
-
-
 # Expose these two classes to support vendor-specific scripts
 DataImage = images.DataImage
 EmptyImage = images.EmptyImage
 
 
-# map recovery.fstab's fs_types to mount/format "partition types"
-PARTITION_TYPES = {
-    "ext4": "EMMC",
-    "emmc": "EMMC",
-    "f2fs": "EMMC",
-    "squashfs": "EMMC",
-    "erofs": "EMMC"
-}
-
-
-def GetTypeAndDevice(mount_point, info, check_no_slot=True):
-  """
-  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
-  backwards compatibility. It aborts if the fstab entry has slotselect option
-  (unless check_no_slot is explicitly set to False).
-  """
-  fstab = info["fstab"]
-  if fstab:
-    if check_no_slot:
-      assert not fstab[mount_point].slotselect, \
-          "Use GetTypeAndDeviceExpr instead"
-    return (PARTITION_TYPES[fstab[mount_point].fs_type],
-            fstab[mount_point].device)
-  raise KeyError
-
-
-def GetTypeAndDeviceExpr(mount_point, info):
-  """
-  Return the filesystem of the partition, and an edify expression that evaluates
-  to the device at runtime.
-  """
-  fstab = info["fstab"]
-  if fstab:
-    p = fstab[mount_point]
-    device_expr = '"%s"' % fstab[mount_point].device
-    if p.slotselect:
-      device_expr = 'add_slot_suffix(%s)' % device_expr
-    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
-  raise KeyError
-
 
 def GetEntryForDevice(fstab, device):
   """
@@ -3829,349 +3208,6 @@
   return output
 
 
-def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
-                      info_dict=None):
-  """Generates the recovery-from-boot patch and writes the script to output.
-
-  Most of the space in the boot and recovery images is just the kernel, which is
-  identical for the two, so the resulting patch should be efficient. Add it to
-  the output zip, along with a shell script that is run from init.rc on first
-  boot to actually do the patching and install the new recovery image.
-
-  Args:
-    input_dir: The top-level input directory of the target-files.zip.
-    output_sink: The callback function that writes the result.
-    recovery_img: File object for the recovery image.
-    boot_img: File objects for the boot image.
-    info_dict: A dict returned by common.LoadInfoDict() on the input
-        target_files. Will use OPTIONS.info_dict if None has been given.
-  """
-  if info_dict is None:
-    info_dict = OPTIONS.info_dict
-
-  full_recovery_image = info_dict.get("full_recovery_image") == "true"
-  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
-
-  if board_uses_vendorimage:
-    # In this case, the output sink is rooted at VENDOR
-    recovery_img_path = "etc/recovery.img"
-    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
-    sh_dir = "bin"
-  else:
-    # In this case the output sink is rooted at SYSTEM
-    recovery_img_path = "vendor/etc/recovery.img"
-    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
-    sh_dir = "vendor/bin"
-
-  if full_recovery_image:
-    output_sink(recovery_img_path, recovery_img.data)
-
-  else:
-    system_root_image = info_dict.get("system_root_image") == "true"
-    include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
-    include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
-    path = os.path.join(input_dir, recovery_resource_dat_path)
-    # With system-root-image, boot and recovery images will have mismatching
-    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
-    # to handle such a case.
-    if system_root_image or include_recovery_dtbo or include_recovery_acpio:
-      diff_program = ["bsdiff"]
-      bonus_args = ""
-      assert not os.path.exists(path)
-    else:
-      diff_program = ["imgdiff"]
-      if os.path.exists(path):
-        diff_program.append("-b")
-        diff_program.append(path)
-        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
-      else:
-        bonus_args = ""
-
-    d = Difference(recovery_img, boot_img, diff_program=diff_program)
-    _, _, patch = d.ComputePatch()
-    output_sink("recovery-from-boot.p", patch)
-
-  try:
-    # The following GetTypeAndDevice()s need to use the path in the target
-    # info_dict instead of source_info_dict.
-    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
-                                              check_no_slot=False)
-    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
-                                                      check_no_slot=False)
-  except KeyError:
-    return
-
-  if full_recovery_image:
-
-    # Note that we use /vendor to refer to the recovery resources. This will
-    # work for a separate vendor partition mounted at /vendor or a
-    # /system/vendor subdirectory on the system partition, for which init will
-    # create a symlink from /vendor to /system/vendor.
-
-    sh = """#!/vendor/bin/sh
-if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
-  applypatch \\
-          --flash /vendor/etc/recovery.img \\
-          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
-      log -t recovery "Installing new recovery image: succeeded" || \\
-      log -t recovery "Installing new recovery image: failed"
-else
-  log -t recovery "Recovery image already installed"
-fi
-""" % {'type': recovery_type,
-       'device': recovery_device,
-       'sha1': recovery_img.sha1,
-       'size': recovery_img.size}
-  else:
-    sh = """#!/vendor/bin/sh
-if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
-  applypatch %(bonus_args)s \\
-          --patch /vendor/recovery-from-boot.p \\
-          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
-          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
-      log -t recovery "Installing new recovery image: succeeded" || \\
-      log -t recovery "Installing new recovery image: failed"
-else
-  log -t recovery "Recovery image already installed"
-fi
-""" % {'boot_size': boot_img.size,
-       'boot_sha1': boot_img.sha1,
-       'recovery_size': recovery_img.size,
-       'recovery_sha1': recovery_img.sha1,
-       'boot_type': boot_type,
-       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
-       'recovery_type': recovery_type,
-       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
-       'bonus_args': bonus_args}
-
-  # The install script location moved from /system/etc to /system/bin in the L
-  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
-  sh_location = os.path.join(sh_dir, "install-recovery.sh")
-
-  logger.info("putting script in %s", sh_location)
-
-  output_sink(sh_location, sh.encode())
-
-
-class DynamicPartitionUpdate(object):
-  def __init__(self, src_group=None, tgt_group=None, progress=None,
-               block_difference=None):
-    self.src_group = src_group
-    self.tgt_group = tgt_group
-    self.progress = progress
-    self.block_difference = block_difference
-
-  @property
-  def src_size(self):
-    if not self.block_difference:
-      return 0
-    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
-
-  @property
-  def tgt_size(self):
-    if not self.block_difference:
-      return 0
-    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
-
-  @staticmethod
-  def _GetSparseImageSize(img):
-    if not img:
-      return 0
-    return img.blocksize * img.total_blocks
-
-
-class DynamicGroupUpdate(object):
-  def __init__(self, src_size=None, tgt_size=None):
-    # None: group does not exist. 0: no size limits.
-    self.src_size = src_size
-    self.tgt_size = tgt_size
-
-
-class DynamicPartitionsDifference(object):
-  def __init__(self, info_dict, block_diffs, progress_dict=None,
-               source_info_dict=None):
-    if progress_dict is None:
-      progress_dict = {}
-
-    self._remove_all_before_apply = False
-    if source_info_dict is None:
-      self._remove_all_before_apply = True
-      source_info_dict = {}
-
-    block_diff_dict = collections.OrderedDict(
-        [(e.partition, e) for e in block_diffs])
-
-    assert len(block_diff_dict) == len(block_diffs), \
-        "Duplicated BlockDifference object for {}".format(
-            [partition for partition, count in
-             collections.Counter(e.partition for e in block_diffs).items()
-             if count > 1])
-
-    self._partition_updates = collections.OrderedDict()
-
-    for p, block_diff in block_diff_dict.items():
-      self._partition_updates[p] = DynamicPartitionUpdate()
-      self._partition_updates[p].block_difference = block_diff
-
-    for p, progress in progress_dict.items():
-      if p in self._partition_updates:
-        self._partition_updates[p].progress = progress
-
-    tgt_groups = shlex.split(info_dict.get(
-        "super_partition_groups", "").strip())
-    src_groups = shlex.split(source_info_dict.get(
-        "super_partition_groups", "").strip())
-
-    for g in tgt_groups:
-      for p in shlex.split(info_dict.get(
-              "super_%s_partition_list" % g, "").strip()):
-        assert p in self._partition_updates, \
-            "{} is in target super_{}_partition_list but no BlockDifference " \
-            "object is provided.".format(p, g)
-        self._partition_updates[p].tgt_group = g
-
-    for g in src_groups:
-      for p in shlex.split(source_info_dict.get(
-              "super_%s_partition_list" % g, "").strip()):
-        assert p in self._partition_updates, \
-            "{} is in source super_{}_partition_list but no BlockDifference " \
-            "object is provided.".format(p, g)
-        self._partition_updates[p].src_group = g
-
-    target_dynamic_partitions = set(shlex.split(info_dict.get(
-        "dynamic_partition_list", "").strip()))
-    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
-                                  if u.tgt_size)
-    assert block_diffs_with_target == target_dynamic_partitions, \
-        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
-            list(target_dynamic_partitions), list(block_diffs_with_target))
-
-    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
-        "dynamic_partition_list", "").strip()))
-    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
-                                  if u.src_size)
-    assert block_diffs_with_source == source_dynamic_partitions, \
-        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
-            list(source_dynamic_partitions), list(block_diffs_with_source))
-
-    if self._partition_updates:
-      logger.info("Updating dynamic partitions %s",
-                  self._partition_updates.keys())
-
-    self._group_updates = collections.OrderedDict()
-
-    for g in tgt_groups:
-      self._group_updates[g] = DynamicGroupUpdate()
-      self._group_updates[g].tgt_size = int(info_dict.get(
-          "super_%s_group_size" % g, "0").strip())
-
-    for g in src_groups:
-      if g not in self._group_updates:
-        self._group_updates[g] = DynamicGroupUpdate()
-      self._group_updates[g].src_size = int(source_info_dict.get(
-          "super_%s_group_size" % g, "0").strip())
-
-    self._Compute()
-
-  def WriteScript(self, script, output_zip, write_verify_script=False):
-    script.Comment('--- Start patching dynamic partitions ---')
-    for p, u in self._partition_updates.items():
-      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
-        script.Comment('Patch partition %s' % p)
-        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
-                                       write_verify_script=False)
-
-    op_list_path = MakeTempFile()
-    with open(op_list_path, 'w') as f:
-      for line in self._op_list:
-        f.write('{}\n'.format(line))
-
-    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
-
-    script.Comment('Update dynamic partition metadata')
-    script.AppendExtra('assert(update_dynamic_partitions('
-                       'package_extract_file("dynamic_partitions_op_list")));')
-
-    if write_verify_script:
-      for p, u in self._partition_updates.items():
-        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
-          u.block_difference.WritePostInstallVerifyScript(script)
-          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
-
-    for p, u in self._partition_updates.items():
-      if u.tgt_size and u.src_size <= u.tgt_size:
-        script.Comment('Patch partition %s' % p)
-        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
-                                       write_verify_script=write_verify_script)
-        if write_verify_script:
-          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
-
-    script.Comment('--- End patching dynamic partitions ---')
-
-  def _Compute(self):
-    self._op_list = list()
-
-    def append(line):
-      self._op_list.append(line)
-
-    def comment(line):
-      self._op_list.append("# %s" % line)
-
-    if self._remove_all_before_apply:
-      comment('Remove all existing dynamic partitions and groups before '
-              'applying full OTA')
-      append('remove_all_groups')
-
-    for p, u in self._partition_updates.items():
-      if u.src_group and not u.tgt_group:
-        append('remove %s' % p)
-
-    for p, u in self._partition_updates.items():
-      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
-        comment('Move partition %s from %s to default' % (p, u.src_group))
-        append('move %s default' % p)
-
-    for p, u in self._partition_updates.items():
-      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
-        comment('Shrink partition %s from %d to %d' %
-                (p, u.src_size, u.tgt_size))
-        append('resize %s %s' % (p, u.tgt_size))
-
-    for g, u in self._group_updates.items():
-      if u.src_size is not None and u.tgt_size is None:
-        append('remove_group %s' % g)
-      if (u.src_size is not None and u.tgt_size is not None and
-              u.src_size > u.tgt_size):
-        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
-        append('resize_group %s %d' % (g, u.tgt_size))
-
-    for g, u in self._group_updates.items():
-      if u.src_size is None and u.tgt_size is not None:
-        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
-        append('add_group %s %d' % (g, u.tgt_size))
-      if (u.src_size is not None and u.tgt_size is not None and
-              u.src_size < u.tgt_size):
-        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
-        append('resize_group %s %d' % (g, u.tgt_size))
-
-    for p, u in self._partition_updates.items():
-      if u.tgt_group and not u.src_group:
-        comment('Add partition %s to group %s' % (p, u.tgt_group))
-        append('add %s %s' % (p, u.tgt_group))
-
-    for p, u in self._partition_updates.items():
-      if u.tgt_size and u.src_size < u.tgt_size:
-        comment('Grow partition %s from %d to %d' %
-                (p, u.src_size, u.tgt_size))
-        append('resize %s %d' % (p, u.tgt_size))
-
-    for p, u in self._partition_updates.items():
-      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
-        comment('Move partition %s from default to %s' %
-                (p, u.tgt_group))
-        append('move %s %s' % (p, u.tgt_group))
-
-
 def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
   """
   Get build.prop from ramdisk within the boot image
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 033c02e..0a7653c 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -16,6 +16,45 @@
 
 import common
 
+# map recovery.fstab's fs_types to mount/format "partition types"
+PARTITION_TYPES = {
+    "ext4": "EMMC",
+    "emmc": "EMMC",
+    "f2fs": "EMMC",
+    "squashfs": "EMMC",
+    "erofs": "EMMC"
+}
+
+
+class ErrorCode(object):
+  """Define error_codes for failures that happen during the actual
+  update package installation.
+
+  Error codes 0-999 are reserved for failures before the package
+  installation (i.e. low battery, package verification failure).
+  Detailed code in 'bootable/recovery/error_code.h' """
+
+  SYSTEM_VERIFICATION_FAILURE = 1000
+  SYSTEM_UPDATE_FAILURE = 1001
+  SYSTEM_UNEXPECTED_CONTENTS = 1002
+  SYSTEM_NONZERO_CONTENTS = 1003
+  SYSTEM_RECOVER_FAILURE = 1004
+  VENDOR_VERIFICATION_FAILURE = 2000
+  VENDOR_UPDATE_FAILURE = 2001
+  VENDOR_UNEXPECTED_CONTENTS = 2002
+  VENDOR_NONZERO_CONTENTS = 2003
+  VENDOR_RECOVER_FAILURE = 2004
+  OEM_PROP_MISMATCH = 3000
+  FINGERPRINT_MISMATCH = 3001
+  THUMBPRINT_MISMATCH = 3002
+  OLDER_BUILD = 3003
+  DEVICE_MISMATCH = 3004
+  BAD_PATCH_FILE = 3005
+  INSUFFICIENT_CACHE_SPACE = 3006
+  TUNE_PARTITION_FAILURE = 3007
+  APPLY_PATCH_FAILURE = 3008
+
+
 class EdifyGenerator(object):
   """Class to generate scripts in the 'edify' recovery script language
   used from donut onwards."""
@@ -88,7 +127,7 @@
         'abort("E{code}: This package expects the value \\"{values}\\" for '
         '\\"{name}\\"; this has value \\"" + '
         '{get_prop_command} + "\\".");').format(
-            code=common.ErrorCode.OEM_PROP_MISMATCH,
+            code=ErrorCode.OEM_PROP_MISMATCH,
             get_prop_command=get_prop_command, name=name,
             values='\\" or \\"'.join(values))
     self.script.append(cmd)
@@ -101,7 +140,7 @@
                              for i in fp]) +
            ' ||\n    abort("E%d: Package expects build fingerprint of %s; '
            'this device has " + getprop("ro.build.fingerprint") + ".");') % (
-               common.ErrorCode.FINGERPRINT_MISMATCH, " or ".join(fp))
+               ErrorCode.FINGERPRINT_MISMATCH, " or ".join(fp))
     self.script.append(cmd)
 
   def AssertSomeThumbprint(self, *fp):
@@ -112,7 +151,7 @@
                              for i in fp]) +
            ' ||\n    abort("E%d: Package expects build thumbprint of %s; this '
            'device has " + getprop("ro.build.thumbprint") + ".");') % (
-               common.ErrorCode.THUMBPRINT_MISMATCH, " or ".join(fp))
+               ErrorCode.THUMBPRINT_MISMATCH, " or ".join(fp))
     self.script.append(cmd)
 
   def AssertFingerprintOrThumbprint(self, fp, tp):
@@ -133,14 +172,14 @@
         ('(!less_than_int(%s, getprop("ro.build.date.utc"))) || '
          'abort("E%d: Can\'t install this package (%s) over newer '
          'build (" + getprop("ro.build.date") + ").");') % (
-             timestamp, common.ErrorCode.OLDER_BUILD, timestamp_text))
+             timestamp, ErrorCode.OLDER_BUILD, timestamp_text))
 
   def AssertDevice(self, device):
     """Assert that the device identifier is the given string."""
     cmd = ('getprop("ro.product.device") == "%s" || '
            'abort("E%d: This package is for \\"%s\\" devices; '
            'this is a \\"" + getprop("ro.product.device") + "\\".");') % (
-               device, common.ErrorCode.DEVICE_MISMATCH, device)
+               device, ErrorCode.DEVICE_MISMATCH, device)
     self.script.append(cmd)
 
   def AssertSomeBootloader(self, *bootloaders):
@@ -207,7 +246,7 @@
         'unexpected contents."));').format(
             target=target_expr,
             source=source_expr,
-            code=common.ErrorCode.BAD_PATCH_FILE)))
+            code=ErrorCode.BAD_PATCH_FILE)))
 
   def CacheFreeSpaceCheck(self, amount):
     """Check that there's at least 'amount' space that can be made
@@ -216,7 +255,7 @@
     self.script.append(('apply_patch_space(%d) || abort("E%d: Not enough free '
                         'space on /cache to apply patches.");') % (
                             amount,
-                            common.ErrorCode.INSUFFICIENT_CACHE_SPACE))
+                            ErrorCode.INSUFFICIENT_CACHE_SPACE))
 
   def Mount(self, mount_point, mount_options_by_format=""):
     """Mount the partition with the given mount_point.
@@ -238,7 +277,7 @@
       if p.context is not None:
         mount_flags = p.context + ("," + mount_flags if mount_flags else "")
       self.script.append('mount("%s", "%s", %s, "%s", "%s");' % (
-          p.fs_type, common.PARTITION_TYPES[p.fs_type],
+          p.fs_type, PARTITION_TYPES[p.fs_type],
           self._GetSlotSuffixDeviceForEntry(p),
           p.mount_point, mount_flags))
       self.mounts.add(p.mount_point)
@@ -264,7 +303,7 @@
         'tune2fs(' + "".join(['"%s", ' % (i,) for i in options]) +
         '%s) || abort("E%d: Failed to tune partition %s");' % (
             self._GetSlotSuffixDeviceForEntry(p),
-            common.ErrorCode.TUNE_PARTITION_FAILURE, partition))
+            ErrorCode.TUNE_PARTITION_FAILURE, partition))
 
   def FormatPartition(self, partition):
     """Format the given partition, specified by its mount point (eg,
@@ -274,7 +313,7 @@
     if fstab:
       p = fstab[partition]
       self.script.append('format("%s", "%s", %s, "%s", "%s");' %
-                         (p.fs_type, common.PARTITION_TYPES[p.fs_type],
+                         (p.fs_type, PARTITION_TYPES[p.fs_type],
                           self._GetSlotSuffixDeviceForEntry(p),
                           p.length, p.mount_point))
 
@@ -354,7 +393,7 @@
             target=target_expr,
             source=source_expr,
             patch=patch_expr,
-            code=common.ErrorCode.APPLY_PATCH_FAILURE)))
+            code=ErrorCode.APPLY_PATCH_FAILURE)))
 
   def _GetSlotSuffixDeviceForEntry(self, entry=None):
     """
@@ -388,7 +427,7 @@
     fstab = self.fstab
     if fstab:
       p = fstab[mount_point]
-      partition_type = common.PARTITION_TYPES[p.fs_type]
+      partition_type = PARTITION_TYPES[p.fs_type]
       device = self._GetSlotSuffixDeviceForEntry(p)
       args = {'device': device, 'fn': fn}
       if partition_type == "EMMC":
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index a3e3681..b7a5ad8 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -67,6 +67,7 @@
 OPTIONS.use_fastboot_info = True
 OPTIONS.build_super_image = None
 
+
 def LoadOptions(input_file):
   """Loads information from input_file to OPTIONS.
 
@@ -105,6 +106,13 @@
   common.RunAndCheckOutput(cmd)
 
 
+def LocatePartitionEntry(partition_name, namelist):
+  for subdir in ["IMAGES", "PREBUILT_IMAGES", "RADIO"]:
+    entry_name = os.path.join(subdir, partition_name + ".img")
+    if entry_name in namelist:
+      return entry_name
+
+
 def EntriesForUserImages(input_file):
   """Returns the user images entries to be copied.
 
@@ -122,13 +130,18 @@
   ]
   if OPTIONS.use_fastboot_info:
     entries.append('META/fastboot-info.txt:fastboot-info.txt')
+  ab_partitions = []
   with zipfile.ZipFile(input_file) as input_zip:
     namelist = input_zip.namelist()
+    if "META/ab_partitions.txt" in namelist:
+      ab_partitions = input_zip.read(
+          "META/ab_partitions.txt").decode().strip().split()
   if 'PREBUILT_IMAGES/kernel_16k' in namelist:
     entries.append('PREBUILT_IMAGES/kernel_16k:kernel_16k')
   if 'PREBUILT_IMAGES/ramdisk_16k.img' in namelist:
     entries.append('PREBUILT_IMAGES/ramdisk_16k.img:ramdisk_16k.img')
 
+  visited_partitions = set(OPTIONS.dynamic_partition_list)
   for image_path in [name for name in namelist if name.startswith('IMAGES/')]:
     image = os.path.basename(image_path)
     if OPTIONS.bootable_only and image not in ('boot.img', 'recovery.img', 'bootloader', 'init_boot.img'):
@@ -143,7 +156,14 @@
         continue
       if image in dynamic_images:
         continue
+    partition_name = image.rstrip(".img")
+    visited_partitions.add(partition_name)
     entries.append('{}:{}'.format(image_path, image))
+  for part in [part for part in ab_partitions if part not in visited_partitions]:
+    entry = LocatePartitionEntry(part, namelist)
+    image = os.path.basename(entry)
+    if entry is not None:
+      entries.append('{}:{}'.format(entry, image))
   return entries
 
 
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
index 1497d69..397bf23 100644
--- a/tools/releasetools/make_recovery_patch.py
+++ b/tools/releasetools/make_recovery_patch.py
@@ -21,6 +21,7 @@
 import sys
 
 import common
+from non_ab_ota import MakeRecoveryPatch
 
 if sys.hexversion < 0x02070000:
   print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -60,7 +61,7 @@
                            *fn.split("/")), "wb") as f:
       f.write(data)
 
-  common.MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img)
+  MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img)
 
 
 if __name__ == '__main__':
diff --git a/tools/releasetools/non_ab_ota.py b/tools/releasetools/non_ab_ota.py
index 667891c..80c3083 100644
--- a/tools/releasetools/non_ab_ota.py
+++ b/tools/releasetools/non_ab_ota.py
@@ -13,17 +13,25 @@
 # limitations under the License.
 
 import collections
+import copy
+import imp
 import logging
 import os
+import time
+import threading
+import tempfile
 import zipfile
+import subprocess
+import shlex
 
 import common
 import edify_generator
-import verity_utils
+from edify_generator import ErrorCode, PARTITION_TYPES
 from check_target_files_vintf import CheckVintfIfTrebleEnabled, HasPartition
-from common import OPTIONS
+from common import OPTIONS, Run, MakeTempDir, RunAndCheckOutput, ZipWrite, MakeTempFile
 from ota_utils import UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata, PropertyFiles
-import subprocess
+from blockimgdiff import BlockImageDiff
+from hashlib import sha1
 
 logger = logging.getLogger(__name__)
 
@@ -51,10 +59,10 @@
     check_first_block = partition_source_info.fs_type == "ext4"
     # Disable imgdiff because it relies on zlib to produce stable output
     # across different versions, which is often not the case.
-    return common.BlockDifference(name, partition_tgt, partition_src,
-                                  check_first_block,
-                                  version=blockimgdiff_version,
-                                  disable_imgdiff=True)
+    return BlockDifference(name, partition_tgt, partition_src,
+                           check_first_block,
+                           version=blockimgdiff_version,
+                           disable_imgdiff=True)
 
   if source_zip:
     # See notes in common.GetUserImage()
@@ -76,8 +84,8 @@
       tgt = common.GetUserImage(partition, OPTIONS.input_tmp, target_zip,
                                 info_dict=target_info,
                                 reset_file_map=True)
-      block_diff_dict[partition] = common.BlockDifference(partition, tgt,
-                                                          src=None)
+      block_diff_dict[partition] = BlockDifference(partition, tgt,
+                                                   src=None)
     # Incremental OTA update.
     else:
       block_diff_dict[partition] = GetIncrementalBlockDifferenceForPartition(
@@ -95,7 +103,7 @@
     function_name = "FullOTA_GetBlockDifferences"
 
   if device_specific_diffs:
-    assert all(isinstance(diff, common.BlockDifference)
+    assert all(isinstance(diff, BlockDifference)
                for diff in device_specific_diffs), \
         "{} is not returning a list of BlockDifference objects".format(
             function_name)
@@ -131,7 +139,7 @@
   output_zip = zipfile.ZipFile(
       staging_file, "w", compression=zipfile.ZIP_DEFLATED)
 
-  device_specific = common.DeviceSpecificParams(
+  device_specific = DeviceSpecificParams(
       input_zip=input_zip,
       input_version=target_api_version,
       output_zip=output_zip,
@@ -217,7 +225,7 @@
   if target_info.get('use_dynamic_partitions') == "true":
     # Use empty source_info_dict to indicate that all partitions / groups must
     # be re-added.
-    dynamic_partitions_diff = common.DynamicPartitionsDifference(
+    dynamic_partitions_diff = DynamicPartitionsDifference(
         info_dict=OPTIONS.info_dict,
         block_diffs=block_diff_dict.values(),
         progress_dict=progress_dict)
@@ -309,7 +317,7 @@
   output_zip = zipfile.ZipFile(
       staging_file, "w", compression=zipfile.ZIP_DEFLATED)
 
-  device_specific = common.DeviceSpecificParams(
+  device_specific = DeviceSpecificParams(
       source_zip=source_zip,
       source_version=source_api_version,
       source_tmp=OPTIONS.source_tmp,
@@ -404,9 +412,9 @@
   required_cache_sizes = [diff.required_cache for diff in
                           block_diff_dict.values()]
   if updating_boot:
-    boot_type, boot_device_expr = common.GetTypeAndDeviceExpr("/boot",
-                                                              source_info)
-    d = common.Difference(target_boot, source_boot, "bsdiff")
+    boot_type, boot_device_expr = GetTypeAndDeviceExpr("/boot",
+                                                       source_info)
+    d = Difference(target_boot, source_boot, "bsdiff")
     _, _, d = d.ComputePatch()
     if d is None:
       include_full_boot = True
@@ -461,7 +469,7 @@
     if OPTIONS.target_info_dict.get("use_dynamic_partitions") != "true":
       raise RuntimeError(
           "can't generate incremental that disables dynamic partitions")
-    dynamic_partitions_diff = common.DynamicPartitionsDifference(
+    dynamic_partitions_diff = DynamicPartitionsDifference(
         info_dict=OPTIONS.target_info_dict,
         source_info_dict=OPTIONS.source_info_dict,
         block_diffs=block_diff_dict.values(),
@@ -687,3 +695,881 @@
 
   namelist = target_files_zip.namelist()
   return patch in namelist or img in namelist
+
+
+class DeviceSpecificParams(object):
+  module = None
+
+  def __init__(self, **kwargs):
+    """Keyword arguments to the constructor become attributes of this
+    object, which is passed to all functions in the device-specific
+    module."""
+    for k, v in kwargs.items():
+      setattr(self, k, v)
+    self.extras = OPTIONS.extras
+
+    if self.module is None:
+      path = OPTIONS.device_specific
+      if not path:
+        return
+      try:
+        if os.path.isdir(path):
+          info = imp.find_module("releasetools", [path])
+        else:
+          d, f = os.path.split(path)
+          b, x = os.path.splitext(f)
+          if x == ".py":
+            f = b
+          info = imp.find_module(f, [d])
+        logger.info("loaded device-specific extensions from %s", path)
+        self.module = imp.load_module("device_specific", *info)
+      except ImportError:
+        logger.info("unable to load device-specific module; assuming none")
+
+  def _DoCall(self, function_name, *args, **kwargs):
+    """Call the named function in the device-specific module, passing
+    the given args and kwargs.  The first argument to the call will be
+    the DeviceSpecific object itself.  If there is no module, or the
+    module does not define the function, return the value of the
+    'default' kwarg (which itself defaults to None)."""
+    if self.module is None or not hasattr(self.module, function_name):
+      return kwargs.get("default")
+    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
+
+  def FullOTA_Assertions(self):
+    """Called after emitting the block of assertions at the top of a
+    full OTA package.  Implementations can add whatever additional
+    assertions they like."""
+    return self._DoCall("FullOTA_Assertions")
+
+  def FullOTA_InstallBegin(self):
+    """Called at the start of full OTA installation."""
+    return self._DoCall("FullOTA_InstallBegin")
+
+  def FullOTA_GetBlockDifferences(self):
+    """Called during full OTA installation and verification.
+    Implementation should return a list of BlockDifference objects describing
+    the update on each additional partitions.
+    """
+    return self._DoCall("FullOTA_GetBlockDifferences")
+
+  def FullOTA_InstallEnd(self):
+    """Called at the end of full OTA installation; typically this is
+    used to install the image for the device's baseband processor."""
+    return self._DoCall("FullOTA_InstallEnd")
+
+  def IncrementalOTA_Assertions(self):
+    """Called after emitting the block of assertions at the top of an
+    incremental OTA package.  Implementations can add whatever
+    additional assertions they like."""
+    return self._DoCall("IncrementalOTA_Assertions")
+
+  def IncrementalOTA_VerifyBegin(self):
+    """Called at the start of the verification phase of incremental
+    OTA installation; additional checks can be placed here to abort
+    the script before any changes are made."""
+    return self._DoCall("IncrementalOTA_VerifyBegin")
+
+  def IncrementalOTA_VerifyEnd(self):
+    """Called at the end of the verification phase of incremental OTA
+    installation; additional checks can be placed here to abort the
+    script before any changes are made."""
+    return self._DoCall("IncrementalOTA_VerifyEnd")
+
+  def IncrementalOTA_InstallBegin(self):
+    """Called at the start of incremental OTA installation (after
+    verification is complete)."""
+    return self._DoCall("IncrementalOTA_InstallBegin")
+
+  def IncrementalOTA_GetBlockDifferences(self):
+    """Called during incremental OTA installation and verification.
+    Implementation should return a list of BlockDifference objects describing
+    the update on each additional partitions.
+    """
+    return self._DoCall("IncrementalOTA_GetBlockDifferences")
+
+  def IncrementalOTA_InstallEnd(self):
+    """Called at the end of incremental OTA installation; typically
+    this is used to install the image for the device's baseband
+    processor."""
+    return self._DoCall("IncrementalOTA_InstallEnd")
+
+  def VerifyOTA_Assertions(self):
+    return self._DoCall("VerifyOTA_Assertions")
+
+
+DIFF_PROGRAM_BY_EXT = {
+    ".gz": "imgdiff",
+    ".zip": ["imgdiff", "-z"],
+    ".jar": ["imgdiff", "-z"],
+    ".apk": ["imgdiff", "-z"],
+    ".img": "imgdiff",
+}
+
+
+class Difference(object):
+  def __init__(self, tf, sf, diff_program=None):
+    self.tf = tf
+    self.sf = sf
+    self.patch = None
+    self.diff_program = diff_program
+
+  def ComputePatch(self):
+    """Compute the patch (as a string of data) needed to turn sf into
+    tf.  Returns the same tuple as GetPatch()."""
+
+    tf = self.tf
+    sf = self.sf
+
+    if self.diff_program:
+      diff_program = self.diff_program
+    else:
+      ext = os.path.splitext(tf.name)[1]
+      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
+
+    ttemp = tf.WriteToTemp()
+    stemp = sf.WriteToTemp()
+
+    ext = os.path.splitext(tf.name)[1]
+
+    try:
+      ptemp = tempfile.NamedTemporaryFile()
+      if isinstance(diff_program, list):
+        cmd = copy.copy(diff_program)
+      else:
+        cmd = [diff_program]
+      cmd.append(stemp.name)
+      cmd.append(ttemp.name)
+      cmd.append(ptemp.name)
+      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+      err = []
+
+      def run():
+        _, e = p.communicate()
+        if e:
+          err.append(e)
+      th = threading.Thread(target=run)
+      th.start()
+      th.join(timeout=300)   # 5 mins
+      if th.is_alive():
+        logger.warning("diff command timed out")
+        p.terminate()
+        th.join(5)
+        if th.is_alive():
+          p.kill()
+          th.join()
+
+      if p.returncode != 0:
+        logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
+        self.patch = None
+        return None, None, None
+      diff = ptemp.read()
+    finally:
+      ptemp.close()
+      stemp.close()
+      ttemp.close()
+
+    self.patch = diff
+    return self.tf, self.sf, self.patch
+
+  def GetPatch(self):
+    """Returns a tuple of (target_file, source_file, patch_data).
+
+    patch_data may be None if ComputePatch hasn't been called, or if
+    computing the patch failed.
+    """
+    return self.tf, self.sf, self.patch
+
+
+def ComputeDifferences(diffs):
+  """Call ComputePatch on all the Difference objects in 'diffs'."""
+  logger.info("%d diffs to compute", len(diffs))
+
+  # Do the largest files first, to try and reduce the long-pole effect.
+  by_size = [(i.tf.size, i) for i in diffs]
+  by_size.sort(reverse=True)
+  by_size = [i[1] for i in by_size]
+
+  lock = threading.Lock()
+  diff_iter = iter(by_size)   # accessed under lock
+
+  def worker():
+    try:
+      lock.acquire()
+      for d in diff_iter:
+        lock.release()
+        start = time.time()
+        d.ComputePatch()
+        dur = time.time() - start
+        lock.acquire()
+
+        tf, sf, patch = d.GetPatch()
+        if sf.name == tf.name:
+          name = tf.name
+        else:
+          name = "%s (%s)" % (tf.name, sf.name)
+        if patch is None:
+          logger.error("patching failed! %40s", name)
+        else:
+          logger.info(
+              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
+              tf.size, 100.0 * len(patch) / tf.size, name)
+      lock.release()
+    except Exception:
+      logger.exception("Failed to compute diff from worker")
+      raise
+
+  # start worker threads; wait for them all to finish.
+  threads = [threading.Thread(target=worker)
+             for i in range(OPTIONS.worker_threads)]
+  for th in threads:
+    th.start()
+  while threads:
+    threads.pop().join()
+
+
+class BlockDifference(object):
+  def __init__(self, partition, tgt, src=None, check_first_block=False,
+               version=None, disable_imgdiff=False):
+    self.tgt = tgt
+    self.src = src
+    self.partition = partition
+    self.check_first_block = check_first_block
+    self.disable_imgdiff = disable_imgdiff
+
+    if version is None:
+      version = max(
+          int(i) for i in
+          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+    assert version >= 3
+    self.version = version
+
+    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
+                       version=self.version,
+                       disable_imgdiff=self.disable_imgdiff)
+    self.path = os.path.join(MakeTempDir(), partition)
+    b.Compute(self.path)
+    self._required_cache = b.max_stashed_size
+    self.touched_src_ranges = b.touched_src_ranges
+    self.touched_src_sha1 = b.touched_src_sha1
+
+    # On devices with dynamic partitions, for new partitions,
+    # src is None but OPTIONS.source_info_dict is not.
+    if OPTIONS.source_info_dict is None:
+      is_dynamic_build = OPTIONS.info_dict.get(
+          "use_dynamic_partitions") == "true"
+      is_dynamic_source = False
+    else:
+      is_dynamic_build = OPTIONS.source_info_dict.get(
+          "use_dynamic_partitions") == "true"
+      is_dynamic_source = partition in shlex.split(
+          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
+
+    is_dynamic_target = partition in shlex.split(
+        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
+
+    # For dynamic partitions builds, check partition list in both source
+    # and target build because new partitions may be added, and existing
+    # partitions may be removed.
+    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
+
+    if is_dynamic:
+      self.device = 'map_partition("%s")' % partition
+    else:
+      if OPTIONS.source_info_dict is None:
+        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
+                                              OPTIONS.info_dict)
+      else:
+        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
+                                              OPTIONS.source_info_dict)
+      self.device = device_expr
+
+  @property
+  def required_cache(self):
+    return self._required_cache
+
+  def WriteScript(self, script, output_zip, progress=None,
+                  write_verify_script=False):
+    if not self.src:
+      # write the output unconditionally
+      script.Print("Patching %s image unconditionally..." % (self.partition,))
+    else:
+      script.Print("Patching %s image after verification." % (self.partition,))
+
+    if progress:
+      script.ShowProgress(progress, 0)
+    self._WriteUpdate(script, output_zip)
+
+    if write_verify_script:
+      self.WritePostInstallVerifyScript(script)
+
+  def WriteStrictVerifyScript(self, script):
+    """Verify all the blocks in the care_map, including clobbered blocks.
+
+    This differs from the WriteVerifyScript() function: a) it prints different
+    error messages; b) it doesn't allow half-way updated images to pass the
+    verification."""
+
+    partition = self.partition
+    script.Print("Verifying %s..." % (partition,))
+    ranges = self.tgt.care_map
+    ranges_str = ranges.to_string_raw()
+    script.AppendExtra(
+        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
+        'ui_print("%s has unexpected contents.");' % (
+            self.device, ranges_str,
+            self.tgt.TotalSha1(include_clobbered_blocks=True),
+            self.partition))
+    script.AppendExtra("")
+
+  def WriteVerifyScript(self, script, touched_blocks_only=False):
+    partition = self.partition
+
+    # full OTA
+    if not self.src:
+      script.Print("Image %s will be patched unconditionally." % (partition,))
+
+    # incremental OTA
+    else:
+      if touched_blocks_only:
+        ranges = self.touched_src_ranges
+        expected_sha1 = self.touched_src_sha1
+      else:
+        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
+        expected_sha1 = self.src.TotalSha1()
+
+      # No blocks to be checked, skipping.
+      if not ranges:
+        return
+
+      ranges_str = ranges.to_string_raw()
+      script.AppendExtra(
+          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
+          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
+          '"%s.patch.dat")) then' % (
+              self.device, ranges_str, expected_sha1,
+              self.device, partition, partition, partition))
+      script.Print('Verified %s image...' % (partition,))
+      script.AppendExtra('else')
+
+      if self.version >= 4:
+
+        # Bug: 21124327
+        # When generating incrementals for the system and vendor partitions in
+        # version 4 or newer, explicitly check the first block (which contains
+        # the superblock) of the partition to see if it's what we expect. If
+        # this check fails, give an explicit log message about the partition
+        # having been remounted R/W (the most likely explanation).
+        if self.check_first_block:
+          script.AppendExtra('check_first_block(%s);' % (self.device,))
+
+        # If version >= 4, try block recovery before abort update
+        if partition == "system":
+          code = ErrorCode.SYSTEM_RECOVER_FAILURE
+        else:
+          code = ErrorCode.VENDOR_RECOVER_FAILURE
+        script.AppendExtra((
+            'ifelse (block_image_recover({device}, "{ranges}") && '
+            'block_image_verify({device}, '
+            'package_extract_file("{partition}.transfer.list"), '
+            '"{partition}.new.dat", "{partition}.patch.dat"), '
+            'ui_print("{partition} recovered successfully."), '
+            'abort("E{code}: {partition} partition fails to recover"));\n'
+            'endif;').format(device=self.device, ranges=ranges_str,
+                             partition=partition, code=code))
+
+      # Abort the OTA update. Note that the incremental OTA cannot be applied
+      # even if it may match the checksum of the target partition.
+      # a) If version < 3, operations like move and erase will make changes
+      #    unconditionally and damage the partition.
+      # b) If version >= 3, it won't even reach here.
+      else:
+        if partition == "system":
+          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
+        else:
+          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
+        script.AppendExtra((
+            'abort("E%d: %s partition has unexpected contents");\n'
+            'endif;') % (code, partition))
+
+  def WritePostInstallVerifyScript(self, script):
+    partition = self.partition
+    script.Print('Verifying the updated %s image...' % (partition,))
+    # Unlike pre-install verification, clobbered_blocks should not be ignored.
+    ranges = self.tgt.care_map
+    ranges_str = ranges.to_string_raw()
+    script.AppendExtra(
+        'if range_sha1(%s, "%s") == "%s" then' % (
+            self.device, ranges_str,
+            self.tgt.TotalSha1(include_clobbered_blocks=True)))
+
+    # Bug: 20881595
+    # Verify that extended blocks are really zeroed out.
+    if self.tgt.extended:
+      ranges_str = self.tgt.extended.to_string_raw()
+      script.AppendExtra(
+          'if range_sha1(%s, "%s") == "%s" then' % (
+              self.device, ranges_str,
+              self._HashZeroBlocks(self.tgt.extended.size())))
+      script.Print('Verified the updated %s image.' % (partition,))
+      if partition == "system":
+        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
+      else:
+        code = ErrorCode.VENDOR_NONZERO_CONTENTS
+      script.AppendExtra(
+          'else\n'
+          '  abort("E%d: %s partition has unexpected non-zero contents after '
+          'OTA update");\n'
+          'endif;' % (code, partition))
+    else:
+      script.Print('Verified the updated %s image.' % (partition,))
+
+    if partition == "system":
+      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
+    else:
+      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
+
+    script.AppendExtra(
+        'else\n'
+        '  abort("E%d: %s partition has unexpected contents after OTA '
+        'update");\n'
+        'endif;' % (code, partition))
+
+  def _WriteUpdate(self, script, output_zip):
+    ZipWrite(output_zip,
+             '{}.transfer.list'.format(self.path),
+             '{}.transfer.list'.format(self.partition))
+
+    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
+    # its size. Quailty 9 almost triples the compression time but doesn't
+    # further reduce the size too much. For a typical 1.8G system.new.dat
+    #                       zip  | brotli(quality 6)  | brotli(quality 9)
+    #   compressed_size:    942M | 869M (~8% reduced) | 854M
+    #   compression_time:   75s  | 265s               | 719s
+    #   decompression_time: 15s  | 25s                | 25s
+
+    if not self.src:
+      brotli_cmd = ['brotli', '--quality=6',
+                    '--output={}.new.dat.br'.format(self.path),
+                    '{}.new.dat'.format(self.path)]
+      print("Compressing {}.new.dat with brotli".format(self.partition))
+      RunAndCheckOutput(brotli_cmd)
+
+      new_data_name = '{}.new.dat.br'.format(self.partition)
+      ZipWrite(output_zip,
+               '{}.new.dat.br'.format(self.path),
+               new_data_name,
+               compress_type=zipfile.ZIP_STORED)
+    else:
+      new_data_name = '{}.new.dat'.format(self.partition)
+      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
+
+    ZipWrite(output_zip,
+             '{}.patch.dat'.format(self.path),
+             '{}.patch.dat'.format(self.partition),
+             compress_type=zipfile.ZIP_STORED)
+
+    if self.partition == "system":
+      code = ErrorCode.SYSTEM_UPDATE_FAILURE
+    else:
+      code = ErrorCode.VENDOR_UPDATE_FAILURE
+
+    call = ('block_image_update({device}, '
+            'package_extract_file("{partition}.transfer.list"), '
+            '"{new_data_name}", "{partition}.patch.dat") ||\n'
+            '  abort("E{code}: Failed to update {partition} image.");'.format(
+                device=self.device, partition=self.partition,
+                new_data_name=new_data_name, code=code))
+    script.AppendExtra(script.WordWrap(call))
+
+  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
+    data = source.ReadRangeSet(ranges)
+    ctx = sha1()
+
+    for p in data:
+      ctx.update(p)
+
+    return ctx.hexdigest()
+
+  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
+    """Return the hash value for all zero blocks."""
+    zero_block = '\x00' * 4096
+    ctx = sha1()
+    for _ in range(num_blocks):
+      ctx.update(zero_block)
+
+    return ctx.hexdigest()
+
+
+def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
+                      info_dict=None):
+  """Generates the recovery-from-boot patch and writes the script to output.
+
+  Most of the space in the boot and recovery images is just the kernel, which is
+  identical for the two, so the resulting patch should be efficient. Add it to
+  the output zip, along with a shell script that is run from init.rc on first
+  boot to actually do the patching and install the new recovery image.
+
+  Args:
+    input_dir: The top-level input directory of the target-files.zip.
+    output_sink: The callback function that writes the result.
+    recovery_img: File object for the recovery image.
+    boot_img: File objects for the boot image.
+    info_dict: A dict returned by common.LoadInfoDict() on the input
+        target_files. Will use OPTIONS.info_dict if None has been given.
+  """
+  if info_dict is None:
+    info_dict = OPTIONS.info_dict
+
+  full_recovery_image = info_dict.get("full_recovery_image") == "true"
+  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
+
+  if board_uses_vendorimage:
+    # In this case, the output sink is rooted at VENDOR
+    recovery_img_path = "etc/recovery.img"
+    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
+    sh_dir = "bin"
+  else:
+    # In this case the output sink is rooted at SYSTEM
+    recovery_img_path = "vendor/etc/recovery.img"
+    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
+    sh_dir = "vendor/bin"
+
+  if full_recovery_image:
+    output_sink(recovery_img_path, recovery_img.data)
+
+  else:
+    system_root_image = info_dict.get("system_root_image") == "true"
+    include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
+    include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
+    path = os.path.join(input_dir, recovery_resource_dat_path)
+    # With system-root-image, boot and recovery images will have mismatching
+    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
+    # to handle such a case.
+    if system_root_image or include_recovery_dtbo or include_recovery_acpio:
+      diff_program = ["bsdiff"]
+      bonus_args = ""
+      assert not os.path.exists(path)
+    else:
+      diff_program = ["imgdiff"]
+      if os.path.exists(path):
+        diff_program.append("-b")
+        diff_program.append(path)
+        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
+      else:
+        bonus_args = ""
+
+    d = Difference(recovery_img, boot_img, diff_program=diff_program)
+    _, _, patch = d.ComputePatch()
+    output_sink("recovery-from-boot.p", patch)
+
+  try:
+    # The following GetTypeAndDevice()s need to use the path in the target
+    # info_dict instead of source_info_dict.
+    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
+                                              check_no_slot=False)
+    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
+                                                      check_no_slot=False)
+  except KeyError:
+    return
+
+  if full_recovery_image:
+
+    # Note that we use /vendor to refer to the recovery resources. This will
+    # work for a separate vendor partition mounted at /vendor or a
+    # /system/vendor subdirectory on the system partition, for which init will
+    # create a symlink from /vendor to /system/vendor.
+
+    sh = """#!/vendor/bin/sh
+if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
+  applypatch \\
+          --flash /vendor/etc/recovery.img \\
+          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
+      log -t recovery "Installing new recovery image: succeeded" || \\
+      log -t recovery "Installing new recovery image: failed"
+else
+  log -t recovery "Recovery image already installed"
+fi
+""" % {'type': recovery_type,
+       'device': recovery_device,
+       'sha1': recovery_img.sha1,
+       'size': recovery_img.size}
+  else:
+    sh = """#!/vendor/bin/sh
+if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
+  applypatch %(bonus_args)s \\
+          --patch /vendor/recovery-from-boot.p \\
+          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
+          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
+      log -t recovery "Installing new recovery image: succeeded" || \\
+      log -t recovery "Installing new recovery image: failed"
+else
+  log -t recovery "Recovery image already installed"
+fi
+""" % {'boot_size': boot_img.size,
+       'boot_sha1': boot_img.sha1,
+       'recovery_size': recovery_img.size,
+       'recovery_sha1': recovery_img.sha1,
+       'boot_type': boot_type,
+       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
+       'recovery_type': recovery_type,
+       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
+       'bonus_args': bonus_args}
+
+  # The install script location moved from /system/etc to /system/bin in the L
+  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
+  sh_location = os.path.join(sh_dir, "install-recovery.sh")
+
+  logger.info("putting script in %s", sh_location)
+
+  output_sink(sh_location, sh.encode())
+
+
+class DynamicPartitionUpdate(object):
+  def __init__(self, src_group=None, tgt_group=None, progress=None,
+               block_difference=None):
+    self.src_group = src_group
+    self.tgt_group = tgt_group
+    self.progress = progress
+    self.block_difference = block_difference
+
+  @property
+  def src_size(self):
+    if not self.block_difference:
+      return 0
+    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
+
+  @property
+  def tgt_size(self):
+    if not self.block_difference:
+      return 0
+    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
+
+  @staticmethod
+  def _GetSparseImageSize(img):
+    if not img:
+      return 0
+    return img.blocksize * img.total_blocks
+
+
+class DynamicGroupUpdate(object):
+  def __init__(self, src_size=None, tgt_size=None):
+    # None: group does not exist. 0: no size limits.
+    self.src_size = src_size
+    self.tgt_size = tgt_size
+
+
+class DynamicPartitionsDifference(object):
+  def __init__(self, info_dict, block_diffs, progress_dict=None,
+               source_info_dict=None):
+    if progress_dict is None:
+      progress_dict = {}
+
+    self._remove_all_before_apply = False
+    if source_info_dict is None:
+      self._remove_all_before_apply = True
+      source_info_dict = {}
+
+    block_diff_dict = collections.OrderedDict(
+        [(e.partition, e) for e in block_diffs])
+
+    assert len(block_diff_dict) == len(block_diffs), \
+        "Duplicated BlockDifference object for {}".format(
+            [partition for partition, count in
+             collections.Counter(e.partition for e in block_diffs).items()
+             if count > 1])
+
+    self._partition_updates = collections.OrderedDict()
+
+    for p, block_diff in block_diff_dict.items():
+      self._partition_updates[p] = DynamicPartitionUpdate()
+      self._partition_updates[p].block_difference = block_diff
+
+    for p, progress in progress_dict.items():
+      if p in self._partition_updates:
+        self._partition_updates[p].progress = progress
+
+    tgt_groups = shlex.split(info_dict.get(
+        "super_partition_groups", "").strip())
+    src_groups = shlex.split(source_info_dict.get(
+        "super_partition_groups", "").strip())
+
+    for g in tgt_groups:
+      for p in shlex.split(info_dict.get(
+              "super_%s_partition_list" % g, "").strip()):
+        assert p in self._partition_updates, \
+            "{} is in target super_{}_partition_list but no BlockDifference " \
+            "object is provided.".format(p, g)
+        self._partition_updates[p].tgt_group = g
+
+    for g in src_groups:
+      for p in shlex.split(source_info_dict.get(
+              "super_%s_partition_list" % g, "").strip()):
+        assert p in self._partition_updates, \
+            "{} is in source super_{}_partition_list but no BlockDifference " \
+            "object is provided.".format(p, g)
+        self._partition_updates[p].src_group = g
+
+    target_dynamic_partitions = set(shlex.split(info_dict.get(
+        "dynamic_partition_list", "").strip()))
+    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
+                                  if u.tgt_size)
+    assert block_diffs_with_target == target_dynamic_partitions, \
+        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
+            list(target_dynamic_partitions), list(block_diffs_with_target))
+
+    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
+        "dynamic_partition_list", "").strip()))
+    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
+                                  if u.src_size)
+    assert block_diffs_with_source == source_dynamic_partitions, \
+        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
+            list(source_dynamic_partitions), list(block_diffs_with_source))
+
+    if self._partition_updates:
+      logger.info("Updating dynamic partitions %s",
+                  self._partition_updates.keys())
+
+    self._group_updates = collections.OrderedDict()
+
+    for g in tgt_groups:
+      self._group_updates[g] = DynamicGroupUpdate()
+      self._group_updates[g].tgt_size = int(info_dict.get(
+          "super_%s_group_size" % g, "0").strip())
+
+    for g in src_groups:
+      if g not in self._group_updates:
+        self._group_updates[g] = DynamicGroupUpdate()
+      self._group_updates[g].src_size = int(source_info_dict.get(
+          "super_%s_group_size" % g, "0").strip())
+
+    self._Compute()
+
+  def WriteScript(self, script, output_zip, write_verify_script=False):
+    script.Comment('--- Start patching dynamic partitions ---')
+    for p, u in self._partition_updates.items():
+      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
+        script.Comment('Patch partition %s' % p)
+        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
+                                       write_verify_script=False)
+
+    op_list_path = MakeTempFile()
+    with open(op_list_path, 'w') as f:
+      for line in self._op_list:
+        f.write('{}\n'.format(line))
+
+    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
+
+    script.Comment('Update dynamic partition metadata')
+    script.AppendExtra('assert(update_dynamic_partitions('
+                       'package_extract_file("dynamic_partitions_op_list")));')
+
+    if write_verify_script:
+      for p, u in self._partition_updates.items():
+        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
+          u.block_difference.WritePostInstallVerifyScript(script)
+          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
+
+    for p, u in self._partition_updates.items():
+      if u.tgt_size and u.src_size <= u.tgt_size:
+        script.Comment('Patch partition %s' % p)
+        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
+                                       write_verify_script=write_verify_script)
+        if write_verify_script:
+          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
+
+    script.Comment('--- End patching dynamic partitions ---')
+
+  def _Compute(self):
+    self._op_list = list()
+
+    def append(line):
+      self._op_list.append(line)
+
+    def comment(line):
+      self._op_list.append("# %s" % line)
+
+    if self._remove_all_before_apply:
+      comment('Remove all existing dynamic partitions and groups before '
+              'applying full OTA')
+      append('remove_all_groups')
+
+    for p, u in self._partition_updates.items():
+      if u.src_group and not u.tgt_group:
+        append('remove %s' % p)
+
+    for p, u in self._partition_updates.items():
+      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
+        comment('Move partition %s from %s to default' % (p, u.src_group))
+        append('move %s default' % p)
+
+    for p, u in self._partition_updates.items():
+      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
+        comment('Shrink partition %s from %d to %d' %
+                (p, u.src_size, u.tgt_size))
+        append('resize %s %s' % (p, u.tgt_size))
+
+    for g, u in self._group_updates.items():
+      if u.src_size is not None and u.tgt_size is None:
+        append('remove_group %s' % g)
+      if (u.src_size is not None and u.tgt_size is not None and
+              u.src_size > u.tgt_size):
+        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
+        append('resize_group %s %d' % (g, u.tgt_size))
+
+    for g, u in self._group_updates.items():
+      if u.src_size is None and u.tgt_size is not None:
+        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
+        append('add_group %s %d' % (g, u.tgt_size))
+      if (u.src_size is not None and u.tgt_size is not None and
+              u.src_size < u.tgt_size):
+        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
+        append('resize_group %s %d' % (g, u.tgt_size))
+
+    for p, u in self._partition_updates.items():
+      if u.tgt_group and not u.src_group:
+        comment('Add partition %s to group %s' % (p, u.tgt_group))
+        append('add %s %s' % (p, u.tgt_group))
+
+    for p, u in self._partition_updates.items():
+      if u.tgt_size and u.src_size < u.tgt_size:
+        comment('Grow partition %s from %d to %d' %
+                (p, u.src_size, u.tgt_size))
+        append('resize %s %d' % (p, u.tgt_size))
+
+    for p, u in self._partition_updates.items():
+      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
+        comment('Move partition %s from default to %s' %
+                (p, u.tgt_group))
+        append('move %s %s' % (p, u.tgt_group))
+
+
+def GetTypeAndDevice(mount_point, info, check_no_slot=True):
+  """
+  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
+  backwards compatibility. It aborts if the fstab entry has slotselect option
+  (unless check_no_slot is explicitly set to False).
+  """
+  fstab = info["fstab"]
+  if fstab:
+    if check_no_slot:
+      assert not fstab[mount_point].slotselect, \
+          "Use GetTypeAndDeviceExpr instead"
+    return (PARTITION_TYPES[fstab[mount_point].fs_type],
+            fstab[mount_point].device)
+  raise KeyError
+
+
+def GetTypeAndDeviceExpr(mount_point, info):
+  """
+  Return the filesystem of the partition, and an edify expression that evaluates
+  to the device at runtime.
+  """
+  fstab = info["fstab"]
+  if fstab:
+    p = fstab[mount_point]
+    device_expr = '"%s"' % fstab[mount_point].device
+    if p.slotselect:
+      device_expr = 'add_slot_suffix(%s)' % device_expr
+    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
+  raise KeyError
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 7be9876..de0e187 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -995,7 +995,7 @@
   metadata = GetPackageMetadata(target_info, source_info)
   # Generate payload.
   payload = PayloadGenerator(
-      wipe_user_data=OPTIONS.wipe_user_data, minor_version=OPTIONS.force_minor_version, is_partial_update=OPTIONS.partial)
+      wipe_user_data=OPTIONS.wipe_user_data, minor_version=OPTIONS.force_minor_version, is_partial_update=OPTIONS.partial, spl_downgrade=OPTIONS.spl_downgrade)
 
   partition_timestamps_flags = []
   # Enforce a max timestamp this payload can be applied on top of.
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 5c70223..6ca9d64 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -791,7 +791,7 @@
   SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
   SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
 
-  def __init__(self, secondary=False, wipe_user_data=False, minor_version=None, is_partial_update=False):
+  def __init__(self, secondary=False, wipe_user_data=False, minor_version=None, is_partial_update=False, spl_downgrade=False):
     """Initializes a Payload instance.
 
     Args:
@@ -803,6 +803,7 @@
     self.wipe_user_data = wipe_user_data
     self.minor_version = minor_version
     self.is_partial_update = is_partial_update
+    self.spl_downgrade = spl_downgrade
 
   def _Run(self, cmd):  # pylint: disable=no-self-use
     # Don't pipe (buffer) the output if verbose is set. Let
@@ -912,13 +913,15 @@
            "--properties_file=" + properties_file]
     self._Run(cmd)
 
-    if self.secondary:
-      with open(properties_file, "a") as f:
-        f.write("SWITCH_SLOT_ON_REBOOT=0\n")
 
-    if self.wipe_user_data:
-      with open(properties_file, "a") as f:
+    with open(properties_file, "a") as f:
+      if self.wipe_user_data:
         f.write("POWERWASH=1\n")
+      if self.secondary:
+        f.write("SWITCH_SLOT_ON_REBOOT=0\n")
+      if self.spl_downgrade:
+        f.write("SPL_DOWNGRADE=1\n")
+
 
     self.payload_properties = properties_file
 
diff --git a/tools/releasetools/sign_apex.py b/tools/releasetools/sign_apex.py
index d739982..a0a94f6 100755
--- a/tools/releasetools/sign_apex.py
+++ b/tools/releasetools/sign_apex.py
@@ -56,7 +56,6 @@
 import common
 
 logger = logging.getLogger(__name__)
-OPTIONS = common.OPTIONS
 
 
 def SignApexFile(avbtool, apex_file, payload_key, container_key, no_hashtree,
@@ -75,8 +74,7 @@
       no_hashtree=no_hashtree,
       apk_keys=apk_keys,
       signing_args=signing_args,
-      sign_tool=sign_tool,
-      is_sepolicy=apex_file.endswith(OPTIONS.sepolicy_name))
+      sign_tool=sign_tool)
 
 
 def main(argv):
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 14f0e88..8052821 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -26,7 +26,6 @@
 import common
 import test_utils
 import validate_target_files
-from images import EmptyImage, DataImage
 from rangelib import RangeSet
 
 
@@ -1671,292 +1670,6 @@
                       test_file.name, 'generic_kernel')
 
 
-class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
-  """Checks the format of install-recovery.sh.
-
-  Its format should match between common.py and validate_target_files.py.
-  """
-
-  def setUp(self):
-    self._tempdir = common.MakeTempDir()
-    # Create a fake dict that contains the fstab info for boot&recovery.
-    self._info = {"fstab": {}}
-    fake_fstab = [
-        "/dev/soc.0/by-name/boot /boot emmc defaults defaults",
-        "/dev/soc.0/by-name/recovery /recovery emmc defaults defaults"]
-    self._info["fstab"] = common.LoadRecoveryFSTab("\n".join, 2, fake_fstab)
-    # Construct the gzipped recovery.img and boot.img
-    self.recovery_data = bytearray([
-        0x1f, 0x8b, 0x08, 0x00, 0x81, 0x11, 0x02, 0x5a, 0x00, 0x03, 0x2b, 0x4a,
-        0x4d, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0x04, 0x00, 0xc9, 0x93, 0x43, 0xf3,
-        0x08, 0x00, 0x00, 0x00
-    ])
-    # echo -n "boot" | gzip -f | hd
-    self.boot_data = bytearray([
-        0x1f, 0x8b, 0x08, 0x00, 0x8c, 0x12, 0x02, 0x5a, 0x00, 0x03, 0x4b, 0xca,
-        0xcf, 0x2f, 0x01, 0x00, 0xc4, 0xae, 0xed, 0x46, 0x04, 0x00, 0x00, 0x00
-    ])
-
-  def _out_tmp_sink(self, name, data, prefix="SYSTEM"):
-    loc = os.path.join(self._tempdir, prefix, name)
-    if not os.path.exists(os.path.dirname(loc)):
-      os.makedirs(os.path.dirname(loc))
-    with open(loc, "wb") as f:
-      f.write(data)
-
-  def test_full_recovery(self):
-    recovery_image = common.File("recovery.img", self.recovery_data)
-    boot_image = common.File("boot.img", self.boot_data)
-    self._info["full_recovery_image"] = "true"
-
-    common.MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
-                             recovery_image, boot_image, self._info)
-    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
-                                                        self._info)
-
-  @test_utils.SkipIfExternalToolsUnavailable()
-  def test_recovery_from_boot(self):
-    recovery_image = common.File("recovery.img", self.recovery_data)
-    self._out_tmp_sink("recovery.img", recovery_image.data, "IMAGES")
-    boot_image = common.File("boot.img", self.boot_data)
-    self._out_tmp_sink("boot.img", boot_image.data, "IMAGES")
-
-    common.MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
-                             recovery_image, boot_image, self._info)
-    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
-                                                        self._info)
-    # Validate 'recovery-from-boot' with bonus argument.
-    self._out_tmp_sink("etc/recovery-resource.dat", b"bonus", "SYSTEM")
-    common.MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
-                             recovery_image, boot_image, self._info)
-    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
-                                                        self._info)
-
-
-class MockBlockDifference(object):
-
-  def __init__(self, partition, tgt, src=None):
-    self.partition = partition
-    self.tgt = tgt
-    self.src = src
-
-  def WriteScript(self, script, _, progress=None,
-                  write_verify_script=False):
-    if progress:
-      script.AppendExtra("progress({})".format(progress))
-    script.AppendExtra("patch({});".format(self.partition))
-    if write_verify_script:
-      self.WritePostInstallVerifyScript(script)
-
-  def WritePostInstallVerifyScript(self, script):
-    script.AppendExtra("verify({});".format(self.partition))
-
-
-class FakeSparseImage(object):
-
-  def __init__(self, size):
-    self.blocksize = 4096
-    self.total_blocks = size // 4096
-    assert size % 4096 == 0, "{} is not a multiple of 4096".format(size)
-
-
-class DynamicPartitionsDifferenceTest(test_utils.ReleaseToolsTestCase):
-
-  @staticmethod
-  def get_op_list(output_path):
-    with zipfile.ZipFile(output_path, allowZip64=True) as output_zip:
-      with output_zip.open('dynamic_partitions_op_list') as op_list:
-        return [line.decode().strip() for line in op_list.readlines()
-                if not line.startswith(b'#')]
-
-  def setUp(self):
-    self.script = test_utils.MockScriptWriter()
-    self.output_path = common.MakeTempFile(suffix='.zip')
-
-  def test_full(self):
-    target_info = common.LoadDictionaryFromLines("""
-dynamic_partition_list=system vendor
-super_partition_groups=group_foo
-super_group_foo_group_size={group_size}
-super_group_foo_partition_list=system vendor
-""".format(group_size=4 * GiB).split("\n"))
-    block_diffs = [MockBlockDifference("system", FakeSparseImage(3 * GiB)),
-                   MockBlockDifference("vendor", FakeSparseImage(1 * GiB))]
-
-    dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs)
-    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
-      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
-    self.assertEqual(str(self.script).strip(), """
-assert(update_dynamic_partitions(package_extract_file("dynamic_partitions_op_list")));
-patch(system);
-verify(system);
-unmap_partition("system");
-patch(vendor);
-verify(vendor);
-unmap_partition("vendor");
-""".strip())
-
-    lines = self.get_op_list(self.output_path)
-
-    remove_all_groups = lines.index("remove_all_groups")
-    add_group = lines.index("add_group group_foo 4294967296")
-    add_vendor = lines.index("add vendor group_foo")
-    add_system = lines.index("add system group_foo")
-    resize_vendor = lines.index("resize vendor 1073741824")
-    resize_system = lines.index("resize system 3221225472")
-
-    self.assertLess(remove_all_groups, add_group,
-                    "Should add groups after removing all groups")
-    self.assertLess(add_group, min(add_vendor, add_system),
-                    "Should add partitions after adding group")
-    self.assertLess(add_system, resize_system,
-                    "Should resize system after adding it")
-    self.assertLess(add_vendor, resize_vendor,
-                    "Should resize vendor after adding it")
-
-  def test_inc_groups(self):
-    source_info = common.LoadDictionaryFromLines("""
-super_partition_groups=group_foo group_bar group_baz
-super_group_foo_group_size={group_foo_size}
-super_group_bar_group_size={group_bar_size}
-""".format(group_foo_size=4 * GiB, group_bar_size=3 * GiB).split("\n"))
-    target_info = common.LoadDictionaryFromLines("""
-super_partition_groups=group_foo group_baz group_qux
-super_group_foo_group_size={group_foo_size}
-super_group_baz_group_size={group_baz_size}
-super_group_qux_group_size={group_qux_size}
-""".format(group_foo_size=3 * GiB, group_baz_size=4 * GiB,
-           group_qux_size=1 * GiB).split("\n"))
-
-    dp_diff = common.DynamicPartitionsDifference(target_info,
-                                                 block_diffs=[],
-                                                 source_info_dict=source_info)
-    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
-      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
-    lines = self.get_op_list(self.output_path)
-
-    removed = lines.index("remove_group group_bar")
-    shrunk = lines.index("resize_group group_foo 3221225472")
-    grown = lines.index("resize_group group_baz 4294967296")
-    added = lines.index("add_group group_qux 1073741824")
-
-    self.assertLess(max(removed, shrunk),
-                    min(grown, added),
-                    "ops that remove / shrink partitions must precede ops that "
-                    "grow / add partitions")
-
-  def test_incremental(self):
-    source_info = common.LoadDictionaryFromLines("""
-dynamic_partition_list=system vendor product system_ext
-super_partition_groups=group_foo
-super_group_foo_group_size={group_foo_size}
-super_group_foo_partition_list=system vendor product system_ext
-""".format(group_foo_size=4 * GiB).split("\n"))
-    target_info = common.LoadDictionaryFromLines("""
-dynamic_partition_list=system vendor product odm
-super_partition_groups=group_foo group_bar
-super_group_foo_group_size={group_foo_size}
-super_group_foo_partition_list=system vendor odm
-super_group_bar_group_size={group_bar_size}
-super_group_bar_partition_list=product
-""".format(group_foo_size=3 * GiB, group_bar_size=1 * GiB).split("\n"))
-
-    block_diffs = [MockBlockDifference("system", FakeSparseImage(1536 * MiB),
-                                       src=FakeSparseImage(1024 * MiB)),
-                   MockBlockDifference("vendor", FakeSparseImage(512 * MiB),
-                                       src=FakeSparseImage(1024 * MiB)),
-                   MockBlockDifference("product", FakeSparseImage(1024 * MiB),
-                                       src=FakeSparseImage(1024 * MiB)),
-                   MockBlockDifference("system_ext", None,
-                                       src=FakeSparseImage(1024 * MiB)),
-                   MockBlockDifference("odm", FakeSparseImage(1024 * MiB),
-                                       src=None)]
-
-    dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs,
-                                                 source_info_dict=source_info)
-    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
-      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
-    metadata_idx = self.script.lines.index(
-        'assert(update_dynamic_partitions(package_extract_file('
-        '"dynamic_partitions_op_list")));')
-    self.assertLess(self.script.lines.index('patch(vendor);'), metadata_idx)
-    self.assertLess(metadata_idx, self.script.lines.index('verify(vendor);'))
-    for p in ("product", "system", "odm"):
-      patch_idx = self.script.lines.index("patch({});".format(p))
-      verify_idx = self.script.lines.index("verify({});".format(p))
-      self.assertLess(metadata_idx, patch_idx,
-                      "Should patch {} after updating metadata".format(p))
-      self.assertLess(patch_idx, verify_idx,
-                      "Should verify {} after patching".format(p))
-
-    self.assertNotIn("patch(system_ext);", self.script.lines)
-
-    lines = self.get_op_list(self.output_path)
-
-    remove = lines.index("remove system_ext")
-    move_product_out = lines.index("move product default")
-    shrink = lines.index("resize vendor 536870912")
-    shrink_group = lines.index("resize_group group_foo 3221225472")
-    add_group_bar = lines.index("add_group group_bar 1073741824")
-    add_odm = lines.index("add odm group_foo")
-    grow_existing = lines.index("resize system 1610612736")
-    grow_added = lines.index("resize odm 1073741824")
-    move_product_in = lines.index("move product group_bar")
-
-    max_idx_move_partition_out_foo = max(remove, move_product_out, shrink)
-    min_idx_move_partition_in_foo = min(add_odm, grow_existing, grow_added)
-
-    self.assertLess(max_idx_move_partition_out_foo, shrink_group,
-                    "Must shrink group after partitions inside group are shrunk"
-                    " / removed")
-
-    self.assertLess(add_group_bar, move_product_in,
-                    "Must add partitions to group after group is added")
-
-    self.assertLess(max_idx_move_partition_out_foo,
-                    min_idx_move_partition_in_foo,
-                    "Must shrink partitions / remove partitions from group"
-                    "before adding / moving partitions into group")
-
-  def test_remove_partition(self):
-    source_info = common.LoadDictionaryFromLines("""
-blockimgdiff_versions=3,4
-use_dynamic_partitions=true
-dynamic_partition_list=foo
-super_partition_groups=group_foo
-super_group_foo_group_size={group_foo_size}
-super_group_foo_partition_list=foo
-""".format(group_foo_size=4 * GiB).split("\n"))
-    target_info = common.LoadDictionaryFromLines("""
-blockimgdiff_versions=3,4
-use_dynamic_partitions=true
-super_partition_groups=group_foo
-super_group_foo_group_size={group_foo_size}
-""".format(group_foo_size=4 * GiB).split("\n"))
-
-    common.OPTIONS.info_dict = target_info
-    common.OPTIONS.target_info_dict = target_info
-    common.OPTIONS.source_info_dict = source_info
-    common.OPTIONS.cache_size = 4 * 4096
-
-    block_diffs = [common.BlockDifference("foo", EmptyImage(),
-                                          src=DataImage("source", pad=True))]
-
-    dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs,
-                                                 source_info_dict=source_info)
-    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
-      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
-    self.assertNotIn("block_image_update", str(self.script),
-                     "Removed partition should not be patched.")
-
-    lines = self.get_op_list(self.output_path)
-    self.assertEqual(lines, ["remove foo"])
-
-
 class PartitionBuildPropsTest(test_utils.ReleaseToolsTestCase):
   def setUp(self):
     self.odm_build_prop = [
diff --git a/tools/releasetools/test_non_ab_ota.py b/tools/releasetools/test_non_ab_ota.py
index 5207e2f..7a5ccd3 100644
--- a/tools/releasetools/test_non_ab_ota.py
+++ b/tools/releasetools/test_non_ab_ota.py
@@ -15,19 +15,24 @@
 #
 
 import copy
+import os
 import zipfile
 
 import common
 import test_utils
+import validate_target_files
 
-from non_ab_ota import NonAbOtaPropertyFiles, WriteFingerprintAssertion
+from images import EmptyImage, DataImage
+from non_ab_ota import NonAbOtaPropertyFiles, WriteFingerprintAssertion, BlockDifference, DynamicPartitionsDifference, MakeRecoveryPatch
 from test_utils import PropertyFilesTestCase
 
 
 class NonAbOtaPropertyFilesTest(PropertyFilesTestCase):
   """Additional validity checks specialized for NonAbOtaPropertyFiles."""
+
   def setUp(self):
-     common.OPTIONS.no_signing = False
+    common.OPTIONS.no_signing = False
+
   def test_init(self):
     property_files = NonAbOtaPropertyFiles()
     self.assertEqual('ota-property-files', property_files.name)
@@ -55,7 +60,8 @@
     with zipfile.ZipFile(zip_file) as zip_fp:
       raw_metadata = property_files.GetPropertyFilesString(
           zip_fp, reserve_space=False)
-      property_files_string = property_files.Finalize(zip_fp, len(raw_metadata))
+      property_files_string = property_files.Finalize(
+          zip_fp, len(raw_metadata))
     tokens = self._parse_property_files_string(property_files_string)
 
     self.assertEqual(2, len(tokens))
@@ -77,6 +83,7 @@
 
       property_files.Verify(zip_fp, raw_metadata)
 
+
 class NonAbOTATest(test_utils.ReleaseToolsTestCase):
   TEST_TARGET_INFO_DICT = {
       'build.prop': common.PartitionBuildProps.FromDictionary(
@@ -98,7 +105,7 @@
       ),
       'vendor.build.prop': common.PartitionBuildProps.FromDictionary(
           'vendor', {
-               'ro.vendor.build.fingerprint': 'vendor-build-fingerprint'}
+              'ro.vendor.build.fingerprint': 'vendor-build-fingerprint'}
       ),
       'property1': 'value1',
       'property2': 4096,
@@ -118,6 +125,7 @@
           'ro.product.device': 'device3',
       },
   ]
+
   def test_WriteFingerprintAssertion_without_oem_props(self):
     target_info = common.BuildInfo(self.TEST_TARGET_INFO_DICT, None)
     source_info_dict = copy.deepcopy(self.TEST_TARGET_INFO_DICT)
@@ -170,3 +178,296 @@
         [('AssertSomeThumbprint', 'build-thumbprint',
           'source-build-thumbprint')],
         script_writer.lines)
+
+
+KiB = 1024
+MiB = 1024 * KiB
+GiB = 1024 * MiB
+
+
+class MockBlockDifference(object):
+
+  def __init__(self, partition, tgt, src=None):
+    self.partition = partition
+    self.tgt = tgt
+    self.src = src
+
+  def WriteScript(self, script, _, progress=None,
+                  write_verify_script=False):
+    if progress:
+      script.AppendExtra("progress({})".format(progress))
+    script.AppendExtra("patch({});".format(self.partition))
+    if write_verify_script:
+      self.WritePostInstallVerifyScript(script)
+
+  def WritePostInstallVerifyScript(self, script):
+    script.AppendExtra("verify({});".format(self.partition))
+
+
+class FakeSparseImage(object):
+
+  def __init__(self, size):
+    self.blocksize = 4096
+    self.total_blocks = size // 4096
+    assert size % 4096 == 0, "{} is not a multiple of 4096".format(size)
+
+
+class DynamicPartitionsDifferenceTest(test_utils.ReleaseToolsTestCase):
+
+  @staticmethod
+  def get_op_list(output_path):
+    with zipfile.ZipFile(output_path, allowZip64=True) as output_zip:
+      with output_zip.open('dynamic_partitions_op_list') as op_list:
+        return [line.decode().strip() for line in op_list.readlines()
+                if not line.startswith(b'#')]
+
+  def setUp(self):
+    self.script = test_utils.MockScriptWriter()
+    self.output_path = common.MakeTempFile(suffix='.zip')
+
+  def test_full(self):
+    target_info = common.LoadDictionaryFromLines("""
+dynamic_partition_list=system vendor
+super_partition_groups=group_foo
+super_group_foo_group_size={group_size}
+super_group_foo_partition_list=system vendor
+""".format(group_size=4 * GiB).split("\n"))
+    block_diffs = [MockBlockDifference("system", FakeSparseImage(3 * GiB)),
+                   MockBlockDifference("vendor", FakeSparseImage(1 * GiB))]
+
+    dp_diff = DynamicPartitionsDifference(target_info, block_diffs)
+    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+    self.assertEqual(str(self.script).strip(), """
+assert(update_dynamic_partitions(package_extract_file("dynamic_partitions_op_list")));
+patch(system);
+verify(system);
+unmap_partition("system");
+patch(vendor);
+verify(vendor);
+unmap_partition("vendor");
+""".strip())
+
+    lines = self.get_op_list(self.output_path)
+
+    remove_all_groups = lines.index("remove_all_groups")
+    add_group = lines.index("add_group group_foo 4294967296")
+    add_vendor = lines.index("add vendor group_foo")
+    add_system = lines.index("add system group_foo")
+    resize_vendor = lines.index("resize vendor 1073741824")
+    resize_system = lines.index("resize system 3221225472")
+
+    self.assertLess(remove_all_groups, add_group,
+                    "Should add groups after removing all groups")
+    self.assertLess(add_group, min(add_vendor, add_system),
+                    "Should add partitions after adding group")
+    self.assertLess(add_system, resize_system,
+                    "Should resize system after adding it")
+    self.assertLess(add_vendor, resize_vendor,
+                    "Should resize vendor after adding it")
+
+  def test_inc_groups(self):
+    source_info = common.LoadDictionaryFromLines("""
+super_partition_groups=group_foo group_bar group_baz
+super_group_foo_group_size={group_foo_size}
+super_group_bar_group_size={group_bar_size}
+""".format(group_foo_size=4 * GiB, group_bar_size=3 * GiB).split("\n"))
+    target_info = common.LoadDictionaryFromLines("""
+super_partition_groups=group_foo group_baz group_qux
+super_group_foo_group_size={group_foo_size}
+super_group_baz_group_size={group_baz_size}
+super_group_qux_group_size={group_qux_size}
+""".format(group_foo_size=3 * GiB, group_baz_size=4 * GiB,
+           group_qux_size=1 * GiB).split("\n"))
+
+    dp_diff = DynamicPartitionsDifference(target_info,
+                                                 block_diffs=[],
+                                                 source_info_dict=source_info)
+    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+    lines = self.get_op_list(self.output_path)
+
+    removed = lines.index("remove_group group_bar")
+    shrunk = lines.index("resize_group group_foo 3221225472")
+    grown = lines.index("resize_group group_baz 4294967296")
+    added = lines.index("add_group group_qux 1073741824")
+
+    self.assertLess(max(removed, shrunk),
+                    min(grown, added),
+                    "ops that remove / shrink partitions must precede ops that "
+                    "grow / add partitions")
+
+  def test_incremental(self):
+    source_info = common.LoadDictionaryFromLines("""
+dynamic_partition_list=system vendor product system_ext
+super_partition_groups=group_foo
+super_group_foo_group_size={group_foo_size}
+super_group_foo_partition_list=system vendor product system_ext
+""".format(group_foo_size=4 * GiB).split("\n"))
+    target_info = common.LoadDictionaryFromLines("""
+dynamic_partition_list=system vendor product odm
+super_partition_groups=group_foo group_bar
+super_group_foo_group_size={group_foo_size}
+super_group_foo_partition_list=system vendor odm
+super_group_bar_group_size={group_bar_size}
+super_group_bar_partition_list=product
+""".format(group_foo_size=3 * GiB, group_bar_size=1 * GiB).split("\n"))
+
+    block_diffs = [MockBlockDifference("system", FakeSparseImage(1536 * MiB),
+                                       src=FakeSparseImage(1024 * MiB)),
+                   MockBlockDifference("vendor", FakeSparseImage(512 * MiB),
+                                       src=FakeSparseImage(1024 * MiB)),
+                   MockBlockDifference("product", FakeSparseImage(1024 * MiB),
+                                       src=FakeSparseImage(1024 * MiB)),
+                   MockBlockDifference("system_ext", None,
+                                       src=FakeSparseImage(1024 * MiB)),
+                   MockBlockDifference("odm", FakeSparseImage(1024 * MiB),
+                                       src=None)]
+
+    dp_diff = DynamicPartitionsDifference(target_info, block_diffs,
+                                                 source_info_dict=source_info)
+    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+    metadata_idx = self.script.lines.index(
+        'assert(update_dynamic_partitions(package_extract_file('
+        '"dynamic_partitions_op_list")));')
+    self.assertLess(self.script.lines.index('patch(vendor);'), metadata_idx)
+    self.assertLess(metadata_idx, self.script.lines.index('verify(vendor);'))
+    for p in ("product", "system", "odm"):
+      patch_idx = self.script.lines.index("patch({});".format(p))
+      verify_idx = self.script.lines.index("verify({});".format(p))
+      self.assertLess(metadata_idx, patch_idx,
+                      "Should patch {} after updating metadata".format(p))
+      self.assertLess(patch_idx, verify_idx,
+                      "Should verify {} after patching".format(p))
+
+    self.assertNotIn("patch(system_ext);", self.script.lines)
+
+    lines = self.get_op_list(self.output_path)
+
+    remove = lines.index("remove system_ext")
+    move_product_out = lines.index("move product default")
+    shrink = lines.index("resize vendor 536870912")
+    shrink_group = lines.index("resize_group group_foo 3221225472")
+    add_group_bar = lines.index("add_group group_bar 1073741824")
+    add_odm = lines.index("add odm group_foo")
+    grow_existing = lines.index("resize system 1610612736")
+    grow_added = lines.index("resize odm 1073741824")
+    move_product_in = lines.index("move product group_bar")
+
+    max_idx_move_partition_out_foo = max(remove, move_product_out, shrink)
+    min_idx_move_partition_in_foo = min(add_odm, grow_existing, grow_added)
+
+    self.assertLess(max_idx_move_partition_out_foo, shrink_group,
+                    "Must shrink group after partitions inside group are shrunk"
+                    " / removed")
+
+    self.assertLess(add_group_bar, move_product_in,
+                    "Must add partitions to group after group is added")
+
+    self.assertLess(max_idx_move_partition_out_foo,
+                    min_idx_move_partition_in_foo,
+                    "Must shrink partitions / remove partitions from group"
+                    "before adding / moving partitions into group")
+
+  def test_remove_partition(self):
+    source_info = common.LoadDictionaryFromLines("""
+blockimgdiff_versions=3,4
+use_dynamic_partitions=true
+dynamic_partition_list=foo
+super_partition_groups=group_foo
+super_group_foo_group_size={group_foo_size}
+super_group_foo_partition_list=foo
+""".format(group_foo_size=4 * GiB).split("\n"))
+    target_info = common.LoadDictionaryFromLines("""
+blockimgdiff_versions=3,4
+use_dynamic_partitions=true
+super_partition_groups=group_foo
+super_group_foo_group_size={group_foo_size}
+""".format(group_foo_size=4 * GiB).split("\n"))
+
+    common.OPTIONS.info_dict = target_info
+    common.OPTIONS.target_info_dict = target_info
+    common.OPTIONS.source_info_dict = source_info
+    common.OPTIONS.cache_size = 4 * 4096
+
+    block_diffs = [BlockDifference("foo", EmptyImage(),
+                                   src=DataImage("source", pad=True))]
+
+    dp_diff = DynamicPartitionsDifference(target_info, block_diffs,
+                                          source_info_dict=source_info)
+    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+    self.assertNotIn("block_image_update", str(self.script),
+                     "Removed partition should not be patched.")
+
+    lines = self.get_op_list(self.output_path)
+    self.assertEqual(lines, ["remove foo"])
+
+
+
+class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
+  """Checks the format of install-recovery.sh.
+
+  Its format should match between common.py and validate_target_files.py.
+  """
+
+  def setUp(self):
+    self._tempdir = common.MakeTempDir()
+    # Create a fake dict that contains the fstab info for boot&recovery.
+    self._info = {"fstab": {}}
+    fake_fstab = [
+        "/dev/soc.0/by-name/boot /boot emmc defaults defaults",
+        "/dev/soc.0/by-name/recovery /recovery emmc defaults defaults"]
+    self._info["fstab"] = common.LoadRecoveryFSTab("\n".join, 2, fake_fstab)
+    # Construct the gzipped recovery.img and boot.img
+    self.recovery_data = bytearray([
+        0x1f, 0x8b, 0x08, 0x00, 0x81, 0x11, 0x02, 0x5a, 0x00, 0x03, 0x2b, 0x4a,
+        0x4d, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0x04, 0x00, 0xc9, 0x93, 0x43, 0xf3,
+        0x08, 0x00, 0x00, 0x00
+    ])
+    # echo -n "boot" | gzip -f | hd
+    self.boot_data = bytearray([
+        0x1f, 0x8b, 0x08, 0x00, 0x8c, 0x12, 0x02, 0x5a, 0x00, 0x03, 0x4b, 0xca,
+        0xcf, 0x2f, 0x01, 0x00, 0xc4, 0xae, 0xed, 0x46, 0x04, 0x00, 0x00, 0x00
+    ])
+
+  def _out_tmp_sink(self, name, data, prefix="SYSTEM"):
+    loc = os.path.join(self._tempdir, prefix, name)
+    if not os.path.exists(os.path.dirname(loc)):
+      os.makedirs(os.path.dirname(loc))
+    with open(loc, "wb") as f:
+      f.write(data)
+
+  def test_full_recovery(self):
+    recovery_image = common.File("recovery.img", self.recovery_data)
+    boot_image = common.File("boot.img", self.boot_data)
+    self._info["full_recovery_image"] = "true"
+
+    MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
+                             recovery_image, boot_image, self._info)
+    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
+                                                        self._info)
+
+  @test_utils.SkipIfExternalToolsUnavailable()
+  def test_recovery_from_boot(self):
+    recovery_image = common.File("recovery.img", self.recovery_data)
+    self._out_tmp_sink("recovery.img", recovery_image.data, "IMAGES")
+    boot_image = common.File("boot.img", self.boot_data)
+    self._out_tmp_sink("boot.img", boot_image.data, "IMAGES")
+
+    MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
+                             recovery_image, boot_image, self._info)
+    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
+                                                        self._info)
+    # Validate 'recovery-from-boot' with bonus argument.
+    self._out_tmp_sink("etc/recovery-resource.dat", b"bonus", "SYSTEM")
+    MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
+                             recovery_image, boot_image, self._info)
+    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
+                                                        self._info)
+
diff --git a/tools/releasetools/test_sign_apex.py b/tools/releasetools/test_sign_apex.py
index 7723de7..8470f20 100644
--- a/tools/releasetools/test_sign_apex.py
+++ b/tools/releasetools/test_sign_apex.py
@@ -59,21 +59,6 @@
     self.assertTrue(os.path.exists(signed_test_apex))
 
   @test_utils.SkipIfExternalToolsUnavailable()
-  def test_SignSepolicyApex(self):
-    test_apex = os.path.join(self.testdata_dir, 'sepolicy.apex')
-    payload_key = os.path.join(self.testdata_dir, 'testkey_RSA4096.key')
-    container_key = os.path.join(self.testdata_dir, 'testkey')
-    apk_keys = {'SEPolicy-33.zip': os.path.join(self.testdata_dir, 'testkey')}
-    signed_test_apex = sign_apex.SignApexFile(
-        'avbtool',
-        test_apex,
-        payload_key,
-        container_key,
-        False,
-        None)
-    self.assertTrue(os.path.exists(signed_test_apex))
-
-  @test_utils.SkipIfExternalToolsUnavailable()
   def test_SignCompressedApexFile(self):
     apex = os.path.join(test_utils.get_current_dir(), 'com.android.apex.compressed.v1.capex')
     payload_key = os.path.join(self.testdata_dir, 'testkey_RSA4096.key')
diff --git a/tools/releasetools/testdata/sepolicy.apex b/tools/releasetools/testdata/sepolicy.apex
deleted file mode 100644
index 2c646cd..0000000
--- a/tools/releasetools/testdata/sepolicy.apex
+++ /dev/null
Binary files differ
diff --git a/tools/sbom/Android.bp b/tools/sbom/Android.bp
index 519251e..2b2b573 100644
--- a/tools/sbom/Android.bp
+++ b/tools/sbom/Android.bp
@@ -77,3 +77,18 @@
     },
     test_suites: ["general-tests"],
 }
+
+python_binary_host {
+    name: "generate-sbom-framework_res",
+    srcs: [
+        "generate-sbom-framework_res.py",
+    ],
+    version: {
+        py3: {
+            embedded_launcher: true,
+        },
+    },
+    libs: [
+        "sbom_lib",
+    ],
+}
\ No newline at end of file
diff --git a/tools/sbom/generate-sbom-framework_res.py b/tools/sbom/generate-sbom-framework_res.py
new file mode 100644
index 0000000..e637d53
--- /dev/null
+++ b/tools/sbom/generate-sbom-framework_res.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import hashlib
+import json
+import sbom_data
+import sbom_writers
+
+'''
+This script generates SBOM of framework_res.jar of layoutlib shipped with Android Studio.
+
+The generated SBOM contains some placeholders which should be substituted by release_layoutlib.sh.
+The placeholders include: document name, document namespace, organization, created timestamp and 
+the SHA1 checksum of framework_res.jar.
+'''
+
+def get_args():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('-v', '--verbose', action='store_true', default=False,
+                      help='Print more information.')
+  parser.add_argument('--output_file', required=True,
+                      help='The generated SBOM file in SPDX format.')
+  parser.add_argument('--layoutlib_sbom', required=True,
+                      help='The file path of the SBOM of layoutlib.')
+
+  return parser.parse_args()
+
+
+def main():
+  global args
+  args = get_args()
+
+  doc = sbom_data.Document(name='<name>',
+                           namespace='<namespace>',
+                           creators=['Organization: <organization>'],
+                           created='<created>')
+
+  filename = 'data/framework_res.jar'
+  file_id = f'SPDXRef-{sbom_data.encode_for_spdxid(filename)}'
+  file = sbom_data.File(id=file_id, name=filename, checksum='SHA1: <checksum>')
+  doc.files.append(file)
+  doc.describes = file_id
+
+  with open(args.layoutlib_sbom, 'r', encoding='utf-8') as f:
+    layoutlib_sbom = json.load(f)
+
+  with open(args.layoutlib_sbom, 'rb') as f:
+    sha1 = hashlib.file_digest(f, 'sha1')
+
+  layoutlib_sbom_namespace = layoutlib_sbom[sbom_writers.PropNames.DOCUMENT_NAMESPACE]
+  external_doc_ref = 'DocumentRef-layoutlib'
+  doc.external_refs = [
+    sbom_data.DocumentExternalReference(external_doc_ref, layoutlib_sbom_namespace,
+                                        f'SHA1: {sha1.hexdigest()}')]
+
+  resource_file_spdxids = []
+  for file in layoutlib_sbom[sbom_writers.PropNames.FILES]:
+    if file[sbom_writers.PropNames.FILE_NAME].startswith('data/res/'):
+      resource_file_spdxids.append(file[sbom_writers.PropNames.SPDXID])
+
+  doc.relationships = []
+  for spdxid in resource_file_spdxids:
+    doc.relationships.append(
+      sbom_data.Relationship(file_id, sbom_data.RelationshipType.GENERATED_FROM,
+                             f'{external_doc_ref}:{spdxid}'))
+
+  # write sbom file
+  with open(args.output_file, 'w', encoding='utf-8') as f:
+    sbom_writers.JSONWriter.write(doc, f)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/tools/sbom/generate-sbom.py b/tools/sbom/generate-sbom.py
index b19be87..5eae262 100755
--- a/tools/sbom/generate-sbom.py
+++ b/tools/sbom/generate-sbom.py
@@ -82,6 +82,46 @@
   'vndk_prebuilt_shared',
 ]
 
+THIRD_PARTY_IDENTIFIER_TYPES = [
+    # Types defined in metadata_file.proto
+    'Git',
+    'SVN',
+    'Hg',
+    'Darcs',
+    'VCS',
+    'Archive',
+    'PrebuiltByAlphabet',
+    'LocalSource',
+    'Other',
+    # OSV ecosystems defined at https://ossf.github.io/osv-schema/#affectedpackage-field.
+    'Go',
+    'npm',
+    'OSS-Fuzz',
+    'PyPI',
+    'RubyGems',
+    'crates.io',
+    'Hackage',
+    'GHC',
+    'Packagist',
+    'Maven',
+    'NuGet',
+    'Linux',
+    'Debian',
+    'Alpine',
+    'Hex',
+    'Android',
+    'GitHub Actions',
+    'Pub',
+    'ConanCenter',
+    'Rocky Linux',
+    'AlmaLinux',
+    'Bitnami',
+    'Photon OS',
+    'CRAN',
+    'Bioconductor',
+    'SwiftURL'
+]
+
 
 def get_args():
   parser = argparse.ArgumentParser()
@@ -90,6 +130,7 @@
   parser.add_argument('--metadata', required=True, help='The SBOM metadata file path.')
   parser.add_argument('--build_version', required=True, help='The build version.')
   parser.add_argument('--product_mfr', required=True, help='The product manufacturer.')
+  parser.add_argument('--module_name', help='The module name. If specified, the generated SBOM is for the module.')
   parser.add_argument('--json', action='store_true', default=False, help='Generated SBOM file in SPDX JSON format')
   parser.add_argument('--unbundled_apk', action='store_true', default=False, help='Generate SBOM for unbundled APKs')
   parser.add_argument('--unbundled_apex', action='store_true', default=False, help='Generate SBOM for unbundled APEXs')
@@ -103,26 +144,12 @@
       print(i)
 
 
-def encode_for_spdxid(s):
-  """Simple encode for string values used in SPDXID which uses the charset of A-Za-Z0-9.-"""
-  result = ''
-  for c in s:
-    if c.isalnum() or c in '.-':
-      result += c
-    elif c in '_@/':
-      result += '-'
-    else:
-      result += '0x' + c.encode('utf-8').hex()
-
-  return result.lstrip('-')
-
-
 def new_package_id(package_name, type):
-  return f'SPDXRef-{type}-{encode_for_spdxid(package_name)}'
+  return f'SPDXRef-{type}-{sbom_data.encode_for_spdxid(package_name)}'
 
 
 def new_file_id(file_path):
-  return f'SPDXRef-{encode_for_spdxid(file_path)}'
+  return f'SPDXRef-{sbom_data.encode_for_spdxid(file_path)}'
 
 
 def checksum(file_path):
@@ -360,6 +387,20 @@
   return True
 
 
+# Validate identifiers in a package's METADATA.
+# 1) Only known identifier type is allowed
+# 2) Only one identifier's primary_source can be true
+def validate_package_metadata(metadata_file_path, package_metadata):
+  primary_source_found = False
+  for identifier in package_metadata.third_party.identifier:
+    if identifier.type not in THIRD_PARTY_IDENTIFIER_TYPES:
+      sys.exit(f'Unknown value of third_party.identifier.type in {metadata_file_path}/METADATA: {identifier.type}.')
+    if primary_source_found and identifier.primary_source:
+      sys.exit(
+        f'Field "primary_source" is set to true in multiple third_party.identifier in {metadata_file_path}/METADATA.')
+    primary_source_found = identifier.primary_source
+
+
 def report_metadata_file(metadata_file_path, installed_file_metadata, report):
   if metadata_file_path:
     report[INFO_METADATA_FOUND_FOR_PACKAGE].append(
@@ -372,6 +413,8 @@
     with open(metadata_file_path + '/METADATA', 'rt') as f:
       text_format.Parse(f.read(), package_metadata)
 
+    validate_package_metadata(metadata_file_path, package_metadata)
+
     if not metadata_file_path in metadata_file_protos:
       metadata_file_protos[metadata_file_path] = package_metadata
       if not package_metadata.name:
@@ -441,16 +484,25 @@
   global metadata_file_protos
   metadata_file_protos = {}
 
-  product_package = sbom_data.Package(id=sbom_data.SPDXID_PRODUCT,
-                                      name=sbom_data.PACKAGE_NAME_PRODUCT,
+  product_package_id = sbom_data.SPDXID_PRODUCT
+  product_package_name = sbom_data.PACKAGE_NAME_PRODUCT
+  if args.module_name:
+    # Build SBOM of a module so use the module name instead.
+    product_package_id = f'SPDXRef-{sbom_data.encode_for_spdxid(args.module_name)}'
+    product_package_name = args.module_name
+  product_package = sbom_data.Package(id=product_package_id,
+                                      name=product_package_name,
                                       download_location=sbom_data.VALUE_NONE,
                                       version=args.build_version,
                                       supplier='Organization: ' + args.product_mfr,
                                       files_analyzed=True)
-
-  doc = sbom_data.Document(name=args.build_version,
-                           namespace=f'https://www.google.com/sbom/spdx/android/{args.build_version}',
-                           creators=['Organization: ' + args.product_mfr])
+  doc_name = args.build_version
+  if args.module_name:
+    doc_name = f'{args.build_version}/{args.module_name}'
+  doc = sbom_data.Document(name=doc_name,
+                           namespace=f'https://www.google.com/sbom/spdx/android/{doc_name}',
+                           creators=['Organization: ' + args.product_mfr],
+                           describes=product_package_id)
   if not args.unbundled_apex:
     doc.packages.append(product_package)
 
diff --git a/tools/sbom/sbom_data.py b/tools/sbom/sbom_data.py
index 71f8660..b5ac8a5 100644
--- a/tools/sbom/sbom_data.py
+++ b/tools/sbom/sbom_data.py
@@ -138,3 +138,16 @@
       h = hashlib.sha1()
       h.update(''.join(checksums).encode(encoding='utf-8'))
       package.verification_code = h.hexdigest()
+
+def encode_for_spdxid(s):
+  """Simple encode for string values used in SPDXID which uses the charset of A-Za-Z0-9.-"""
+  result = ''
+  for c in s:
+    if c.isalnum() or c in '.-':
+      result += c
+    elif c in '_@/':
+      result += '-'
+    else:
+      result += '0x' + c.encode('utf-8').hex()
+
+  return result.lstrip('-')
\ No newline at end of file
diff --git a/tools/signapk/src/com/android/signapk/SignApk.java b/tools/signapk/src/com/android/signapk/SignApk.java
index 25c53d3..2f2b833 100644
--- a/tools/signapk/src/com/android/signapk/SignApk.java
+++ b/tools/signapk/src/com/android/signapk/SignApk.java
@@ -687,7 +687,7 @@
         if (entryName.endsWith(".so")) {
             // Align .so contents to memory page boundary to enable memory-mapped
             // execution.
-            return 4096;
+            return 16384;
         } else {
             return defaultAlignment;
         }