Merge "Add support for auto-generated characteristics RRO" into main
diff --git a/core/Makefile b/core/Makefile
index 79c8a17..09c815e 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -1167,13 +1167,16 @@
 .PHONY: bootimage_16k
 
 BUILT_BOOT_OTA_PACKAGE_16K := $(PRODUCT_OUT)/boot_ota_16k.zip
-$(BUILT_BOOT_OTA_PACKAGE_16K): $(OTA_FROM_RAW_IMG) $(BUILT_BOOTIMAGE_16K_TARGET) $(DEFAULT_SYSTEM_DEV_CERTIFICATE).pk8
+$(BUILT_BOOT_OTA_PACKAGE_16K): $(OTA_FROM_RAW_IMG) $(BUILT_BOOTIMAGE_16K_TARGET) $(INSTALLED_BOOTIMAGE_TARGET) $(DEFAULT_SYSTEM_DEV_CERTIFICATE).pk8
 	$(OTA_FROM_RAW_IMG) --package_key $(DEFAULT_SYSTEM_DEV_CERTIFICATE) \
                       --max_timestamp `cat $(BUILD_DATETIME_FILE)` \
                       --path $(HOST_OUT) \
                       --partition_name boot \
                       --output $@ \
-                      $(BUILT_BOOTIMAGE_16K_TARGET)
+                      $(if $(BOARD_16K_OTA_USE_INCREMENTAL),\
+                        $(INSTALLED_BOOTIMAGE_TARGET):$(BUILT_BOOTIMAGE_16K_TARGET),\
+                        $(BUILT_BOOTIMAGE_16K_TARGET)\
+                      )
 
 boototapackage_16k: $(BUILT_BOOT_OTA_PACKAGE_16K)
 .PHONY: boototapackage_16k
@@ -1503,13 +1506,16 @@
 
 ifneq ($(BOARD_KERNEL_PATH_16K),)
 BUILT_BOOT_OTA_PACKAGE_4K := $(PRODUCT_OUT)/boot_ota_4k.zip
-$(BUILT_BOOT_OTA_PACKAGE_4K): $(OTA_FROM_RAW_IMG) $(INSTALLED_BOOTIMAGE_TARGET) $(DEFAULT_SYSTEM_DEV_CERTIFICATE).pk8
+$(BUILT_BOOT_OTA_PACKAGE_4K): $(OTA_FROM_RAW_IMG) $(INSTALLED_BOOTIMAGE_TARGET) $(BUILT_BOOTIMAGE_16K_TARGET) $(DEFAULT_SYSTEM_DEV_CERTIFICATE).pk8
 	$(OTA_FROM_RAW_IMG) --package_key $(DEFAULT_SYSTEM_DEV_CERTIFICATE) \
                       --max_timestamp `cat $(BUILD_DATETIME_FILE)` \
                       --path $(HOST_OUT) \
                       --partition_name boot \
                       --output $@ \
-                      $(INSTALLED_BOOTIMAGE_TARGET)
+                      $(if $(BOARD_16K_OTA_USE_INCREMENTAL),\
+                        $(BUILT_BOOTIMAGE_16K_TARGET):$(INSTALLED_BOOTIMAGE_TARGET),\
+                        $(INSTALLED_BOOTIMAGE_TARGET)\
+                      )
 
 boototapackage_4k: $(BUILT_BOOT_OTA_PACKAGE_4K)
 .PHONY: boototapackage_4k
diff --git a/core/art_config.mk b/core/art_config.mk
index 1ea05db..f47a8e2 100644
--- a/core/art_config.mk
+++ b/core/art_config.mk
@@ -44,3 +44,17 @@
 endif
 
 ADDITIONAL_PRODUCT_PROPERTIES += ro.dalvik.vm.enable_uffd_gc=$(ENABLE_UFFD_GC)
+
+# Create APEX_BOOT_JARS_EXCLUDED which is a list of jars to be removed from
+# ApexBoorJars when built from mainline prebuilts.
+# soong variables indicate whether the prebuilt is enabled:
+# - $(m)_module/source_build for art and TOGGLEABLE_PREBUILT_MODULES
+# - ANDROID/module_build_from_source for other mainline modules
+APEX_BOOT_JARS_EXCLUDED :=
+$(foreach pair, $(PRODUCT_APEX_BOOT_JARS_FOR_SOURCE_BUILD_ONLY),\
+  $(eval m := $(subst com.android.,,$(call word-colon,1,$(pair)))) \
+  $(if $(call soong_config_get,$(m)_module,source_build), \
+    $(if $(filter true,$(call soong_config_get,$(m)_module,source_build)),, \
+      $(eval APEX_BOOT_JARS_EXCLUDED += $(pair))), \
+    $(if $(filter true,$(call soong_config_get,ANDROID,module_build_from_source)),, \
+      $(eval APEX_BOOT_JARS_EXCLUDED += $(pair)))))
diff --git a/core/board_config.mk b/core/board_config.mk
index b7ca3a4..537fb73 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -202,7 +202,7 @@
 
 # Conditional to building on linux, as dex2oat currently does not work on darwin.
 ifeq ($(HOST_OS),linux)
-  WITH_DEXPREOPT := true
+  WITH_DEXPREOPT ?= true
 endif
 
 # ###############################################################
@@ -989,6 +989,21 @@
 endif
 
 ###########################################
+# BOARD_API_LEVEL for vendor API surface
+ifdef RELEASE_BOARD_API_LEVEL
+  ifdef BOARD_API_LEVEL
+    $(error BOARD_API_LEVEL must not set manully. The build system automatically sets this value.)
+  endif
+  BOARD_API_LEVEL := $(RELEASE_BOARD_API_LEVEL)
+  .KATI_READONLY := BOARD_API_LEVEL
+
+  ifdef RELEASE_BOARD_API_LEVEL_FROZEN
+    BOARD_API_LEVEL_FROZEN := true
+    .KATI_READONLY := BOARD_API_LEVEL_FROZEN
+  endif
+endif
+
+###########################################
 # Handle BUILD_BROKEN_USES_BUILD_*
 
 $(foreach m,$(DEFAULT_WARNING_BUILD_MODULE_TYPES),\
diff --git a/core/config.mk b/core/config.mk
index c747fd5..90cc33c 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -427,10 +427,10 @@
 endif
 .KATI_READONLY := TARGET_MAX_PAGE_SIZE_SUPPORTED
 
-# Only arm64 arch supports TARGET_MAX_PAGE_SIZE_SUPPORTED greater than 4096.
+# Only arm64 and x86_64 archs supports TARGET_MAX_PAGE_SIZE_SUPPORTED greater than 4096.
 ifneq ($(TARGET_MAX_PAGE_SIZE_SUPPORTED),4096)
-  ifneq ($(TARGET_ARCH),arm64)
-    $(error TARGET_MAX_PAGE_SIZE_SUPPORTED=$(TARGET_MAX_PAGE_SIZE_SUPPORTED) is greater than 4096. Only supported in arm64 arch)
+  ifeq (,$(filter arm64 x86_64,$(TARGET_ARCH)))
+    $(error TARGET_MAX_PAGE_SIZE_SUPPORTED=$(TARGET_MAX_PAGE_SIZE_SUPPORTED) is greater than 4096. Only supported in arm64 and x86_64 archs)
   endif
 endif
 
@@ -813,7 +813,11 @@
 requirements :=
 
 # Set default value of KEEP_VNDK.
-KEEP_VNDK ?= true
+ifeq ($(RELEASE_DEPRECATE_VNDK),true)
+  KEEP_VNDK ?= false
+else
+  KEEP_VNDK ?= true
+endif
 
 # BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED can be true only if early-mount of
 # partitions is supported. But the early-mount must be supported for full
diff --git a/core/dex_preopt_config.mk b/core/dex_preopt_config.mk
index 6739459..10fbe8f 100644
--- a/core/dex_preopt_config.mk
+++ b/core/dex_preopt_config.mk
@@ -58,25 +58,10 @@
 
 # Conditional to building on linux, as dex2oat currently does not work on darwin.
 ifeq ($(HOST_OS),linux)
-  ifeq (eng,$(TARGET_BUILD_VARIANT))
-    # For an eng build only pre-opt the boot image and system server. This gives reasonable performance
-    # and still allows a simple workflow: building in frameworks/base and syncing.
-    WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY ?= true
-  endif
   # Add mini-debug-info to the boot classpath unless explicitly asked not to.
   ifneq (false,$(WITH_DEXPREOPT_DEBUG_INFO))
     PRODUCT_DEX_PREOPT_BOOT_FLAGS += --generate-mini-debug-info
   endif
-
-  # Non eng linux builds must have preopt enabled so that system server doesn't run as interpreter
-  # only. b/74209329
-  ifeq (,$(filter eng, $(TARGET_BUILD_VARIANT)))
-    ifneq (true,$(WITH_DEXPREOPT))
-      ifneq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
-        $(call pretty-error, DEXPREOPT must be enabled for user and userdebug builds)
-      endif
-    endif
-  endif
 endif
 
 # Get value of a property. It is first searched from PRODUCT_VENDOR_PROPERTIES
@@ -100,7 +85,7 @@
   $(call add_json_bool, DisablePreopt,                           $(call invert_bool,$(ENABLE_PREOPT)))
   $(call add_json_bool, DisablePreoptBootImages,                 $(call invert_bool,$(ENABLE_PREOPT_BOOT_IMAGES)))
   $(call add_json_list, DisablePreoptModules,                    $(DEXPREOPT_DISABLED_MODULES))
-  $(call add_json_bool, OnlyPreoptBootImageAndSystemServer,      $(filter true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY)))
+  $(call add_json_bool, OnlyPreoptArtBootImage            ,      $(filter true,$(WITH_DEXPREOPT_ART_BOOT_IMG_ONLY)))
   $(call add_json_bool, PreoptWithUpdatableBcp,                  $(filter true,$(DEX_PREOPT_WITH_UPDATABLE_BCP)))
   $(call add_json_bool, DontUncompressPrivAppsDex,               $(filter true,$(DONT_UNCOMPRESS_PRIV_APPS_DEXS)))
   $(call add_json_list, ModulesLoadedByPrivilegedModules,        $(PRODUCT_LOADED_BY_PRIVILEGED_MODULES))
@@ -109,7 +94,7 @@
   $(call add_json_bool, DisableGenerateProfile,                  $(filter false,$(WITH_DEX_PREOPT_GENERATE_PROFILE)))
   $(call add_json_str,  ProfileDir,                              $(PRODUCT_DEX_PREOPT_PROFILE_DIR))
   $(call add_json_list, BootJars,                                $(PRODUCT_BOOT_JARS))
-  $(call add_json_list, ApexBootJars,                            $(PRODUCT_APEX_BOOT_JARS))
+  $(call add_json_list, ApexBootJars,                            $(filter-out $(APEX_BOOT_JARS_EXCLUDED), $(PRODUCT_APEX_BOOT_JARS)))
   $(call add_json_list, ArtApexJars,                             $(filter $(PRODUCT_BOOT_JARS),$(ART_APEX_JARS)))
   $(call add_json_list, TestOnlyArtBootImageJars,                $(PRODUCT_TEST_ONLY_ART_BOOT_IMAGE_JARS))
   $(call add_json_list, SystemServerJars,                        $(PRODUCT_SYSTEM_SERVER_JARS))
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 54a57d1..0155c67 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -60,17 +60,9 @@
   LOCAL_DEX_PREOPT :=
 endif
 
-# if WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY=true and module is not in boot class path skip
-# Also preopt system server jars since selinux prevents system server from loading anything from
-# /data. If we don't do this they will need to be extracted which is not favorable for RAM usage
-# or performance. If my_preopt_for_extracted_apk is true, we ignore the only preopt boot image
-# options.
-system_server_jars := $(foreach m,$(PRODUCT_SYSTEM_SERVER_JARS),$(call word-colon,2,$(m)))
 ifneq (true,$(my_preopt_for_extracted_apk))
-  ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
-    ifeq ($(filter $(system_server_jars) $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE)),)
-      LOCAL_DEX_PREOPT :=
-    endif
+  ifeq (true,$(WITH_DEXPREOPT_ART_BOOT_IMG_ONLY))
+    LOCAL_DEX_PREOPT :=
   endif
 endif
 
@@ -226,7 +218,7 @@
 # as a failure to get manifest from an APK).
 ifneq (true,$(WITH_DEXPREOPT))
   LOCAL_ENFORCE_USES_LIBRARIES := false
-else ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
+else ifeq (true,$(WITH_DEXPREOPT_ART_BOOT_IMG_ONLY))
   LOCAL_ENFORCE_USES_LIBRARIES := false
 endif
 
diff --git a/core/main.mk b/core/main.mk
index 3f5f766..367b5bf 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -293,16 +293,22 @@
 
 # Vendors with GRF must define BOARD_SHIPPING_API_LEVEL for the vendor API level.
 # This must not be defined for the non-GRF devices.
+# The values of the GRF properties will be verified by post_process_props.py
 ifdef BOARD_SHIPPING_API_LEVEL
 ADDITIONAL_VENDOR_PROPERTIES += \
     ro.board.first_api_level=$(BOARD_SHIPPING_API_LEVEL)
+endif
 
-# To manually set the vendor API level of the vendor modules, BOARD_API_LEVEL can be used.
-# The values of the GRF properties will be verified by post_process_props.py
+# Build system set BOARD_API_LEVEL to show the api level of the vendor API surface.
+# This must not be altered outside of build system.
 ifdef BOARD_API_LEVEL
 ADDITIONAL_VENDOR_PROPERTIES += \
     ro.board.api_level=$(BOARD_API_LEVEL)
 endif
+# BOARD_API_LEVEL_FROZEN is true when the vendor API surface is frozen.
+ifdef BOARD_API_LEVEL_FROZEN
+ADDITIONAL_VENDOR_PROPERTIES += \
+    ro.board.api_frozen=$(BOARD_API_LEVEL_FROZEN)
 endif
 
 # Set build prop. This prop is read by ota_from_target_files when generating OTA,
@@ -2162,7 +2168,7 @@
 metadata_files := $(subst $(newline),$(space),$(file <$(metadata_list)))
 $(PRODUCT_OUT)/sbom-metadata.csv:
 	rm -f $@
-	echo installed_file,module_path,soong_module_type,is_prebuilt_make_module,product_copy_files,kernel_module_copy_files,is_platform_generated,build_output_path,static_libraries,whole_static_libraries,is_static_lib >> $@
+	echo 'installed_file,module_path,soong_module_type,is_prebuilt_make_module,product_copy_files,kernel_module_copy_files,is_platform_generated,build_output_path,static_libraries,whole_static_libraries,is_static_lib' >> $@
 	$(eval _all_static_libs :=)
 	$(foreach f,$(installed_files),\
 	  $(eval _module_name := $(ALL_INSTALLED_FILES.$f)) \
@@ -2190,7 +2196,7 @@
 	  $(eval _whole_static_libs := $(ALL_INSTALLED_FILES.$f.WHOLE_STATIC_LIBRARIES)) \
 	  $(foreach l,$(_static_libs),$(eval _all_static_libs += $l:$(strip $(sort $(ALL_MODULES.$l.PATH))):$(strip $(sort $(ALL_MODULES.$l.SOONG_MODULE_TYPE))):$(ALL_STATIC_LIBRARIES.$l.BUILT_FILE))) \
 	  $(foreach l,$(_whole_static_libs),$(eval _all_static_libs += $l:$(strip $(sort $(ALL_MODULES.$l.PATH))):$(strip $(sort $(ALL_MODULES.$l.SOONG_MODULE_TYPE))):$(ALL_STATIC_LIBRARIES.$l.BUILT_FILE))) \
-	  echo /$(_path_on_device),$(_module_path),$(_soong_module_type),$(_is_prebuilt_make_module),$(_product_copy_files),$(_kernel_module_copy_files),$(_is_platform_generated),$(_build_output_path),$(_static_libs),$(_whole_static_libs), >> $@; \
+	  echo '/$(_path_on_device),$(_module_path),$(_soong_module_type),$(_is_prebuilt_make_module),$(_product_copy_files),$(_kernel_module_copy_files),$(_is_platform_generated),$(_build_output_path),$(_static_libs),$(_whole_static_libs),' >> $@; \
 	)
 	$(foreach l,$(sort $(_all_static_libs)), \
 	  $(eval _lib_stem := $(call word-colon,1,$l)) \
@@ -2200,7 +2206,7 @@
 	  $(eval _static_libs := $(ALL_STATIC_LIBRARIES.$l.STATIC_LIBRARIES)) \
 	  $(eval _whole_static_libs := $(ALL_STATIC_LIBRARIES.$l.WHOLE_STATIC_LIBRARIES)) \
 	  $(eval _is_static_lib := Y) \
-	  echo $(_lib_stem).a,$(_module_path),$(_soong_module_type),,,,,$(_built_file),$(_static_libs),$(_whole_static_libs),$(_is_static_lib) >> $@; \
+	  echo '$(_lib_stem).a,$(_module_path),$(_soong_module_type),,,,,$(_built_file),$(_static_libs),$(_whole_static_libs),$(_is_static_lib)' >> $@; \
 	)
 
 # (TODO: b/272358583 find another way of always rebuilding sbom.spdx)
diff --git a/core/notice_files.mk b/core/notice_files.mk
index a5852cc..7465cbf 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -1,36 +1,11 @@
 ###########################################################
 ## Track NOTICE files
 ###########################################################
-$(call record-module-type,NOTICE_FILE)
 
 ifneq ($(LOCAL_NOTICE_FILE),)
-notice_file:=$(strip $(LOCAL_NOTICE_FILE))
+  notice_file:=$(strip $(LOCAL_NOTICE_FILE))
 else
-notice_file:=$(strip $(wildcard $(LOCAL_PATH)/LICENSE $(LOCAL_PATH)/LICENCE $(LOCAL_PATH)/NOTICE))
-endif
-
-ifneq (,$(strip $(LOCAL_LICENSE_PACKAGE_NAME)))
-license_package_name:=$(strip $(LOCAL_LICENSE_PACKAGE_NAME))
-else
-license_package_name:=
-endif
-
-ifneq (,$(strip $(LOCAL_LICENSE_INSTALL_MAP)))
-install_map:=$(strip $(LOCAL_LICENSE_INSTALL_MAP))
-else
-install_map:=
-endif
-
-ifneq (,$(strip $(LOCAL_LICENSE_KINDS)))
-license_kinds:=$(strip $(LOCAL_LICENSE_KINDS))
-else
-license_kinds:=legacy_by_exception_only
-endif
-
-ifneq (,$(strip $(LOCAL_LICENSE_CONDITIONS)))
-license_conditions:=$(strip $(LOCAL_LICENSE_CONDITIONS))
-else
-license_conditions:=by_exception_only
+  notice_file:=$(strip $(wildcard $(LOCAL_PATH)/LICENSE $(LOCAL_PATH)/LICENCE $(LOCAL_PATH)/NOTICE))
 endif
 
 ifeq ($(LOCAL_MODULE_CLASS),GYP)
@@ -51,117 +26,118 @@
   notice_file :=
 endif
 
-ifeq ($(LOCAL_MODULE_CLASS),NOTICE_FILES)
-# If this is a NOTICE-only module, we don't include base_rule.mk,
-# so my_prefix is not set at this point.
-ifeq ($(LOCAL_IS_HOST_MODULE),true)
-  my_prefix := HOST_
-  LOCAL_HOST_PREFIX :=
+module_license_metadata := $(call local-meta-intermediates-dir)/$(my_register_name).meta_lic
+
+$(foreach target,$(ALL_MODULES.$(my_register_name).BUILT) $(ALL_MODULES.$(my_register_name).INSTALLED) $(foreach bi,$(LOCAL_SOONG_BUILT_INSTALLED),$(call word-colon,1,$(bi))),\
+  $(eval ALL_TARGETS.$(target).META_LIC := $(module_license_metadata)))
+
+$(foreach f,$(my_test_data) $(my_test_config),\
+  $(if $(strip $(ALL_TARGETS.$(call word-colon,1,$(f)).META_LIC)), \
+    $(call declare-copy-target-license-metadata,$(call word-colon,2,$(f)),$(call word-colon,1,$(f))), \
+    $(eval ALL_TARGETS.$(call word-colon,2,$(f)).META_LIC := $(module_license_metadata))))
+
+ALL_MODULES.$(my_register_name).META_LIC := $(strip $(ALL_MODULES.$(my_register_name).META_LIC) $(module_license_metadata))
+
+ifdef LOCAL_SOONG_LICENSE_METADATA
+  # Soong modules have already produced a license metadata file, copy it to where Make expects it.
+  $(eval $(call copy-one-license-metadata-file, $(LOCAL_SOONG_LICENSE_METADATA), $(module_license_metadata),$(ALL_MODULES.$(my_register_name).BUILT),$(ALL_MODUES.$(my_register_name).INSTALLED)))
 else
-  my_prefix := TARGET_
-endif
-endif
+  # Make modules don't have enough information to produce a license metadata rule until after fix-notice-deps
+  # has been called, store the necessary information until later.
 
-installed_notice_file :=
-
-is_container:=$(strip $(LOCAL_MODULE_IS_CONTAINER))
-ifeq (,$(is_container))
-ifneq (,$(strip $(filter %.zip %.tar %.tgz %.tar.gz %.apk %.img %.srcszip %.apex, $(LOCAL_BUILT_MODULE))))
-is_container:=true
-else
-is_container:=false
-endif
-else ifneq (,$(strip $(filter-out true false,$(is_container))))
-$(error Unrecognized value '$(is_container)' for LOCAL_MODULE_IS_CONTAINER)
-endif
-
-ifeq (true,$(is_container))
-# Include shared libraries' notices for "container" types, but not for binaries etc.
-notice_deps := \
-    $(strip \
-        $(foreach d, \
-            $(LOCAL_REQUIRED_MODULES) \
-            $(LOCAL_STATIC_LIBRARIES) \
-            $(LOCAL_WHOLE_STATIC_LIBRARIES) \
-            $(LOCAL_SHARED_LIBRARIES) \
-            $(LOCAL_DYLIB_LIBRARIES) \
-            $(LOCAL_RLIB_LIBRARIES) \
-            $(LOCAL_PROC_MACRO_LIBRARIES) \
-            $(LOCAL_HEADER_LIBRARIES) \
-            $(LOCAL_STATIC_JAVA_LIBRARIES) \
-            $(LOCAL_JAVA_LIBRARIES) \
-            $(LOCAL_JNI_SHARED_LIBRARIES) \
-            ,$(subst :,_,$(d)):static \
-        ) \
-    )
-else
-notice_deps := \
-    $(strip \
-        $(foreach d, \
-            $(LOCAL_REQUIRED_MODULES) \
-            $(LOCAL_STATIC_LIBRARIES) \
-            $(LOCAL_WHOLE_STATIC_LIBRARIES) \
-            $(LOCAL_RLIB_LIBRARIES) \
-            $(LOCAL_PROC_MACRO_LIBRARIES) \
-            $(LOCAL_HEADER_LIBRARIES) \
-            $(LOCAL_STATIC_JAVA_LIBRARIES) \
-            ,$(subst :,_,$(d)):static \
-        )$(foreach d, \
-            $(LOCAL_SHARED_LIBRARIES) \
-            $(LOCAL_DYLIB_LIBRARIES) \
-            $(LOCAL_JAVA_LIBRARIES) \
-            $(LOCAL_JNI_SHARED_LIBRARIES) \
-            ,$(subst :,_,$(d)):dynamic \
-        ) \
-    )
-endif
-ifeq ($(LOCAL_IS_HOST_MODULE),true)
-notice_deps := $(strip $(notice_deps) $(foreach d,$(LOCAL_HOST_REQUIRED_MODULES),$(subst :,_,$(d)):static))
-else
-notice_deps := $(strip $(notice_deps) $(foreach d,$(LOCAL_TARGET_REQUIRED_MODULES),$(subst :,_,$(d)):static))
-endif
-
-local_path := $(LOCAL_PATH)
-
-
-module_license_metadata :=
-
-ifdef my_register_name
-  module_license_metadata := $(call local-meta-intermediates-dir)/$(my_register_name).meta_lic
-
-  $(foreach target,$(ALL_MODULES.$(my_register_name).BUILT) $(ALL_MODULES.$(my_register_name).INSTALLED) $(foreach bi,$(LOCAL_SOONG_BUILT_INSTALLED),$(call word-colon,1,$(bi))),\
-    $(eval ALL_TARGETS.$(target).META_LIC := $(module_license_metadata)))
-
-  $(foreach f,$(my_test_data) $(my_test_config),\
-    $(if $(strip $(ALL_TARGETS.$(call word-colon,1,$(f)).META_LIC)), \
-      $(call declare-copy-target-license-metadata,$(call word-colon,2,$(f)),$(call word-colon,1,$(f))), \
-      $(eval ALL_TARGETS.$(call word-colon,2,$(f)).META_LIC := $(module_license_metadata))))
-
-  ALL_MODULES.$(my_register_name).META_LIC := $(strip $(ALL_MODULES.$(my_register_name).META_LIC) $(module_license_metadata))
-
-  ifdef LOCAL_SOONG_LICENSE_METADATA
-    # Soong modules have already produced a license metadata file, copy it to where Make expects it.
-    $(eval $(call copy-one-license-metadata-file, $(LOCAL_SOONG_LICENSE_METADATA), $(module_license_metadata),$(ALL_MODULES.$(my_register_name).BUILT),$(ALL_MODUES.$(my_register_name).INSTALLED)))
+  ifneq (,$(strip $(LOCAL_LICENSE_PACKAGE_NAME)))
+    license_package_name:=$(strip $(LOCAL_LICENSE_PACKAGE_NAME))
   else
-    # Make modules don't have enough information to produce a license metadata rule until after fix-notice-deps
-    # has been called, store the necessary information until later.
-    ALL_MODULES.$(my_register_name).DELAYED_META_LIC := $(strip $(ALL_MODULES.$(my_register_name).DELAYED_META_LIC) $(module_license_metadata))
-    ALL_MODULES.$(my_register_name).LICENSE_PACKAGE_NAME := $(strip $(license_package_name))
-    ALL_MODULES.$(my_register_name).MODULE_TYPE := $(strip $(ALL_MODULES.$(my_register_name).MODULE_TYPE) $(LOCAL_MODULE_TYPE))
-    ALL_MODULES.$(my_register_name).MODULE_CLASS := $(strip $(ALL_MODULES.$(my_register_name).MODULE_CLASS) $(LOCAL_MODULE_CLASS))
-    ALL_MODULES.$(my_register_name).LICENSE_KINDS := $(ALL_MODULES.$(my_register_name).LICENSE_KINDS) $(license_kinds)
-    ALL_MODULES.$(my_register_name).LICENSE_CONDITIONS := $(ALL_MODULES.$(my_register_name).LICENSE_CONDITIONS) $(license_conditions)
-    ALL_MODULES.$(my_register_name).LICENSE_INSTALL_MAP := $(ALL_MODULES.$(my_register_name).LICENSE_INSTALL_MAP) $(install_map)
-    ALL_MODULES.$(my_register_name).NOTICE_DEPS := $(ALL_MODULES.$(my_register_name).NOTICE_DEPS) $(notice_deps)
-    ALL_MODULES.$(my_register_name).IS_CONTAINER := $(strip $(filter-out false,$(ALL_MODULES.$(my_register_name).IS_CONTAINER) $(is_container)))
-    ALL_MODULES.$(my_register_name).PATH := $(strip $(ALL_MODULES.$(my_register_name).PATH) $(local_path))
+    license_package_name:=
   endif
+
+  ifneq (,$(strip $(LOCAL_LICENSE_INSTALL_MAP)))
+    install_map:=$(strip $(LOCAL_LICENSE_INSTALL_MAP))
+  else
+    install_map:=
+  endif
+
+  ifneq (,$(strip $(LOCAL_LICENSE_KINDS)))
+    license_kinds:=$(strip $(LOCAL_LICENSE_KINDS))
+  else
+    license_kinds:=legacy_by_exception_only
+  endif
+
+  ifneq (,$(strip $(LOCAL_LICENSE_CONDITIONS)))
+    license_conditions:=$(strip $(LOCAL_LICENSE_CONDITIONS))
+  else
+    license_conditions:=by_exception_only
+  endif
+
+  is_container:=$(strip $(LOCAL_MODULE_IS_CONTAINER))
+  ifeq (,$(is_container))
+    ifneq (,$(strip $(filter %.zip %.tar %.tgz %.tar.gz %.apk %.img %.srcszip %.apex, $(LOCAL_BUILT_MODULE))))
+      is_container:=true
+    else
+      is_container:=false
+    endif
+  else ifneq (,$(strip $(filter-out true false,$(is_container))))
+    $(error Unrecognized value '$(is_container)' for LOCAL_MODULE_IS_CONTAINER)
+  endif
+
+  ifeq (true,$(is_container))
+    # Include shared libraries' notices for "container" types, but not for binaries etc.
+    notice_deps := \
+        $(strip \
+            $(foreach d, \
+                $(LOCAL_REQUIRED_MODULES) \
+                $(LOCAL_STATIC_LIBRARIES) \
+                $(LOCAL_WHOLE_STATIC_LIBRARIES) \
+                $(LOCAL_SHARED_LIBRARIES) \
+                $(LOCAL_DYLIB_LIBRARIES) \
+                $(LOCAL_RLIB_LIBRARIES) \
+                $(LOCAL_PROC_MACRO_LIBRARIES) \
+                $(LOCAL_HEADER_LIBRARIES) \
+                $(LOCAL_STATIC_JAVA_LIBRARIES) \
+                $(LOCAL_JAVA_LIBRARIES) \
+                $(LOCAL_JNI_SHARED_LIBRARIES) \
+                ,$(subst :,_,$(d)):static \
+            ) \
+        )
+  else
+    notice_deps := \
+        $(strip \
+            $(foreach d, \
+                $(LOCAL_REQUIRED_MODULES) \
+                $(LOCAL_STATIC_LIBRARIES) \
+                $(LOCAL_WHOLE_STATIC_LIBRARIES) \
+                $(LOCAL_RLIB_LIBRARIES) \
+                $(LOCAL_PROC_MACRO_LIBRARIES) \
+                $(LOCAL_HEADER_LIBRARIES) \
+                $(LOCAL_STATIC_JAVA_LIBRARIES) \
+                ,$(subst :,_,$(d)):static \
+            )$(foreach d, \
+                $(LOCAL_SHARED_LIBRARIES) \
+                $(LOCAL_DYLIB_LIBRARIES) \
+                $(LOCAL_JAVA_LIBRARIES) \
+                $(LOCAL_JNI_SHARED_LIBRARIES) \
+                ,$(subst :,_,$(d)):dynamic \
+            ) \
+        )
+  endif
+  ifeq ($(LOCAL_IS_HOST_MODULE),true)
+    notice_deps := $(strip $(notice_deps) $(foreach d,$(LOCAL_HOST_REQUIRED_MODULES),$(subst :,_,$(d)):static))
+  else
+    notice_deps := $(strip $(notice_deps) $(foreach d,$(LOCAL_TARGET_REQUIRED_MODULES),$(subst :,_,$(d)):static))
+  endif
+
+  ALL_MODULES.$(my_register_name).DELAYED_META_LIC := $(strip $(ALL_MODULES.$(my_register_name).DELAYED_META_LIC) $(module_license_metadata))
+  ALL_MODULES.$(my_register_name).LICENSE_PACKAGE_NAME := $(strip $(license_package_name))
+  ALL_MODULES.$(my_register_name).MODULE_TYPE := $(strip $(ALL_MODULES.$(my_register_name).MODULE_TYPE) $(LOCAL_MODULE_TYPE))
+  ALL_MODULES.$(my_register_name).MODULE_CLASS := $(strip $(ALL_MODULES.$(my_register_name).MODULE_CLASS) $(LOCAL_MODULE_CLASS))
+  ALL_MODULES.$(my_register_name).LICENSE_KINDS := $(ALL_MODULES.$(my_register_name).LICENSE_KINDS) $(license_kinds)
+  ALL_MODULES.$(my_register_name).LICENSE_CONDITIONS := $(ALL_MODULES.$(my_register_name).LICENSE_CONDITIONS) $(license_conditions)
+  ALL_MODULES.$(my_register_name).LICENSE_INSTALL_MAP := $(ALL_MODULES.$(my_register_name).LICENSE_INSTALL_MAP) $(install_map)
+  ALL_MODULES.$(my_register_name).NOTICE_DEPS := $(ALL_MODULES.$(my_register_name).NOTICE_DEPS) $(notice_deps)
+  ALL_MODULES.$(my_register_name).IS_CONTAINER := $(strip $(filter-out false,$(ALL_MODULES.$(my_register_name).IS_CONTAINER) $(is_container)))
+  ALL_MODULES.$(my_register_name).PATH := $(strip $(ALL_MODULES.$(my_register_name).PATH) $(local_path))
 endif
 
 ifdef notice_file
-
-ifdef my_register_name
 ALL_MODULES.$(my_register_name).NOTICES := $(ALL_MODULES.$(my_register_name).NOTICES) $(notice_file)
-endif
-
 endif  # notice_file
diff --git a/core/product.mk b/core/product.mk
index 7d3331d..0d5a07d 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -447,6 +447,8 @@
 
 _product_single_value_vars += PRODUCT_NEXT_RELEASE_HIDE_FLAGGED_API
 
+_product_list_vars += PRODUCT_RELEASE_CONFIG_MAPS
+
 _product_list_vars += PRODUCT_VALIDATION_CHECKS
 
 .KATI_READONLY := _product_single_value_vars _product_list_vars
@@ -546,7 +548,7 @@
 # be cleaned up to not be product variables.
 _readonly_late_variables := \
   DEVICE_PACKAGE_OVERLAYS \
-  WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY \
+  WITH_DEXPREOPT_ART_BOOT_IMG_ONLY \
 
 # Modified internally in the build system
 _readonly_late_variables += \
diff --git a/core/product_validation_checks.mk b/core/product_validation_checks.mk
index 5939a0c..e0d976f 100644
--- a/core/product_validation_checks.mk
+++ b/core/product_validation_checks.mk
@@ -29,17 +29,20 @@
   BOARD_VENDOR_SEPOLICY_DIRS BOARD_SEPOLICY_DIRS \
   SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS \
   SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS \
+  PRODUCT_PUBLIC_SEPOLICY_DIRS \
+  PRODUCT_PRIVATE_SEPOLICY_DIRS
 
 known_board_list_variables := \
   BOARD_VENDOR_SEPOLICY_DIRS BOARD_SEPOLICY_DIRS \
   SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS \
   SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS \
+  PRODUCT_PUBLIC_SEPOLICY_DIRS \
+  PRODUCT_PRIVATE_SEPOLICY_DIRS
 
 escape_starlark_string=$(subst ",\",$(subst \,\\,$(1)))
 product_variable_starlark_value=$(if $(filter $(1),$(_product_list_vars) $(known_board_list_variables)),[$(foreach w,$($(1)),"$(call escape_starlark_string,$(w))", )],"$(call escape_starlark_string,$(1))")
 filename_to_starlark=$(subst -,_,$(subst /,_,$(subst .,_,$(1))))
-_c:=load("//build/make/core/release_config.bzl", "release_config")
-_c+=$(foreach f,$(PRODUCT_VALIDATION_CHECKS),$(newline)load("$(f)", validate_product_variables_$(call filename_to_starlark,$(f)) = "validate_product_variables"))
+_c:=$(foreach f,$(PRODUCT_VALIDATION_CHECKS),$(newline)load("$(f)", validate_product_variables_$(call filename_to_starlark,$(f)) = "validate_product_variables"))
 # TODO: we should freeze the context because it contains mutable lists, so that validation checks can't affect each other
 _c+=$(newline)_ctx = struct(
 _c+=$(newline)product_variables = struct(
@@ -49,8 +52,6 @@
 _c+=$(foreach v,$(known_board_variables),$(newline)  $(v) = $(call product_variable_starlark_value,$(v)),)
 _c+=$(newline))
 _c+=$(newline))
-# It's important that we call the function using keyword arguments, so that if we want to add
-# more arguments in the future it's easier.
 _c+=$(foreach f,$(PRODUCT_VALIDATION_CHECKS),$(newline)validate_product_variables_$(call filename_to_starlark,$(f))(_ctx))
 _c+=$(newline)variables_to_export_to_make = {}
 $(KATI_file_no_rerun >$(OUT_DIR)/product_validation_checks_entrypoint.scl,$(_c))
diff --git a/core/proguard.flags b/core/proguard.flags
index 6dbee84..9cbba0f 100644
--- a/core/proguard.flags
+++ b/core/proguard.flags
@@ -61,3 +61,4 @@
 }
 
 -include proguard_basic_keeps.flags
+-include proguard/kotlin.flags
diff --git a/core/proguard/kotlin.flags b/core/proguard/kotlin.flags
new file mode 100644
index 0000000..70dbaa7
--- /dev/null
+++ b/core/proguard/kotlin.flags
@@ -0,0 +1,19 @@
+# Ignore missing Kotlin meta-annotations so that Java-only projects can depend
+# on projects that happen to be written in Kotlin but do not have a run-time
+# dependency on the Kotlin standard library. Note these annotations are RUNTIME
+# retention, but we won't need them available in Java-only projects.
+-dontwarn kotlin.Metadata
+-dontwarn kotlin.annotation.AnnotationRetention
+-dontwarn kotlin.annotation.AnnotationTarget
+-dontwarn kotlin.annotation.Retention
+-dontwarn kotlin.annotation.Target
+
+# Kotlin DebugMetadata has no value in release builds, these two rules, will
+# allow AppReduce to strip out DebutMetadata.
+-checkdiscard interface kotlin.coroutines.jvm.internal.DebugMetadata
+-assumenosideeffects class kotlin.coroutines.jvm.internal.DebugMetadataKt {
+  *** getDebugMetadataAnnotation(...);
+}
+-assumevalues class kotlin.coroutines.jvm.internal.DebugMetadataKt {
+  *** getDebugMetadataAnnotation(...) return null;
+}
diff --git a/core/rbe.mk b/core/rbe.mk
index 001a549..0f90ddd 100644
--- a/core/rbe.mk
+++ b/core/rbe.mk
@@ -64,7 +64,7 @@
     d8_exec_strategy := remote_local_fallback
   endif
 
-  platform := container-image=docker://gcr.io/androidbuild-re-dockerimage/android-build-remoteexec-image@sha256:953fed4a6b2501256a0d17f055dc17884ff71b024e50ade773e0b348a6c303e6
+  platform := container-image=docker://gcr.io/androidbuild-re-dockerimage/android-build-remoteexec-image@sha256:1eb7f64b9e17102b970bd7a1af7daaebdb01c3fb777715899ef462d6c6d01a45
   cxx_platform := $(platform),Pool=$(cxx_pool)
   java_r8_d8_platform := $(platform),Pool=$(java_pool)
 
diff --git a/core/release_config.mk b/core/release_config.mk
index b72ee89..68dd876 100644
--- a/core/release_config.mk
+++ b/core/release_config.mk
@@ -52,6 +52,15 @@
         ) \
     )
 
+# PRODUCT_RELEASE_CONFIG_MAPS is set by Soong using an initial run of product
+# config to capture only the list of config maps needed by the build.
+# Keep them in the order provided, but remove duplicates.
+$(foreach map,$(PRODUCT_RELEASE_CONFIG_MAPS), \
+    $(if $(filter $(map),$(config_map_files)),,$(eval config_map_files += $(map))) \
+)
+
+# Declare or extend a release-config.
+#
 # $1 config name
 # $2 release config files
 define declare-release-config
@@ -63,30 +72,42 @@
     $(eval _all_release_configs.$(strip $(1)).FILES := $(_all_release_configs.$(strip $(1)).FILES) $(strip $(2)))
 endef
 
-# Include the config map files
+# Include the config map files and populate _flag_declaration_files.
+_flag_declaration_files :=
 $(foreach f, $(config_map_files), \
+    $(eval FLAG_DECLARATION_FILES:= ) \
     $(eval _included := $(f)) \
     $(eval include $(f)) \
+    $(eval _flag_declaration_files += $(FLAG_DECLARATION_FILES)) \
 )
+FLAG_DECLARATION_FILES :=
 
-# If TARGET_RELEASE is set, fail if there is no matching release config
-# If it isn't set, no release config files will be included and all flags
-# will get their default values.
-ifneq ($(TARGET_RELEASE),)
+ifeq ($(TARGET_RELEASE),)
+    # We allow some internal paths to explicitly set TARGET_RELEASE to the
+    # empty string.  For the most part, 'make' treats unset and empty string as
+    # the same.  But the following line differentiates, and will only assign
+    # if the variable was completely unset.
+    TARGET_RELEASE ?= was_unset
+    ifeq ($(TARGET_RELEASE),was_unset)
+        $(error No release config set for target; please set TARGET_RELEASE, or if building on the command line use 'lunch <target>-<release>-<build_type>', where release is one of: $(_all_release_configs))
+    endif
+    # Instead of leaving this string empty, we want to default to a valid
+    # setting.  Full builds coming through this path is a bug, but in case
+    # of such a bug, we want to at least get consistent, valid results.
+    TARGET_RELEASE = trunk_staging
+endif
+
 ifeq ($(filter $(_all_release_configs), $(TARGET_RELEASE)),)
     $(error No release config found for TARGET_RELEASE: $(TARGET_RELEASE). Available releases are: $(_all_release_configs))
-else
-    # Choose flag files
-    # Don't sort this, use it in the order they gave us.
-    flag_value_files := $(_all_release_configs.$(TARGET_RELEASE).FILES)
 endif
-else
-# Useful for finding scripts etc that aren't passing or setting TARGET_RELEASE
-ifneq ($(FAIL_IF_NO_RELEASE_CONFIG),)
-    $(error FAIL_IF_NO_RELEASE_CONFIG was set and TARGET_RELEASE was not)
-endif
+
+# Choose flag files
+# Don't sort this, use it in the order they gave us.
+# Do allow duplicate entries, retaining only the first usage.
 flag_value_files :=
-endif
+$(foreach f,$(_all_release_configs.$(TARGET_RELEASE).FILES), \
+  $(if $(filter $(f),$(flag_value_files)),,$(eval flag_value_files += $(f)))\
+)
 
 # Unset variables so they can't use them
 define declare-release-config
@@ -121,36 +142,23 @@
 # that we chose from the config map above.  Then we run that, and load the
 # results of that into the make environment.
 
-# If this is a google source tree, restrict it to only the one file
-# which has OWNERS control.  If it isn't let others define their own.
-# TODO: Remove wildcard for build/release one when all branch manifests
-# have updated.
-flag_declaration_files := $(wildcard build/release/build_flags.bzl) \
-    $(if $(wildcard vendor/google/release/build_flags.bzl), \
-        vendor/google/release/build_flags.bzl, \
-        $(sort \
-            $(wildcard device/*/release/build_flags.bzl) \
-            $(wildcard device/*/*/release/build_flags.bzl) \
-            $(wildcard vendor/*/release/build_flags.bzl) \
-            $(wildcard vendor/*/*/release/build_flags.bzl) \
-        ) \
-    )
-
+# _flag_declaration_files is the combined list of FLAG_DECLARATION_FILES set by
+# release_config_map.mk files above.
 
 # Because starlark can't find files with $(wildcard), write an entrypoint starlark script that
 # contains the result of the above wildcards for the starlark code to use.
 filename_to_starlark=$(subst /,_,$(subst .,_,$(1)))
-_c:=load("//build/make/core/release_config.bzl", "release_config")
+_c:=load("//build/make/core/release_config.scl", "release_config")
 _c+=$(newline)def add(d, k, v):
 _c+=$(newline)$(space)d = dict(d)
 _c+=$(newline)$(space)d[k] = v
 _c+=$(newline)$(space)return d
-_c+=$(foreach f,$(flag_declaration_files),$(newline)load("$(f)", flags_$(call filename_to_starlark,$(f)) = "flags"))
-_c+=$(newline)all_flags = [] $(foreach f,$(flag_declaration_files),+ [add(x, "declared_in", "$(f)") for x in flags_$(call filename_to_starlark,$(f))])
+_c+=$(foreach f,$(_flag_declaration_files),$(newline)load("$(f)", flags_$(call filename_to_starlark,$(f)) = "flags"))
+_c+=$(newline)all_flags = [] $(foreach f,$(_flag_declaration_files),+ [add(x, "declared_in", "$(f)") for x in flags_$(call filename_to_starlark,$(f))])
 _c+=$(foreach f,$(flag_value_files),$(newline)load("//$(f)", values_$(call filename_to_starlark,$(f)) = "values"))
 _c+=$(newline)all_values = [] $(foreach f,$(flag_value_files),+ [add(x, "set_in", "$(f)") for x in values_$(call filename_to_starlark,$(f))])
 _c+=$(newline)variables_to_export_to_make = release_config(all_flags, all_values)
-$(file >$(OUT_DIR)/release_config_entrypoint.bzl,$(_c))
+$(file >$(OUT_DIR)/release_config_entrypoint.scl,$(_c))
 _c:=
 filename_to_starlark:=
 
@@ -160,5 +168,5 @@
 #
 # We also need to pass --allow_external_entrypoint to rbcrun in case the OUT_DIR is set to something
 # outside of the source tree.
-$(call run-starlark,$(OUT_DIR)/release_config_entrypoint.bzl,$(OUT_DIR)/release_config_entrypoint.bzl,--allow_external_entrypoint)
+$(call run-starlark,$(OUT_DIR)/release_config_entrypoint.scl,$(OUT_DIR)/release_config_entrypoint.scl,--allow_external_entrypoint)
 
diff --git a/core/release_config.bzl b/core/release_config.scl
similarity index 87%
rename from core/release_config.bzl
rename to core/release_config.scl
index 0c08858..662d155 100644
--- a/core/release_config.bzl
+++ b/core/release_config.scl
@@ -15,7 +15,7 @@
 Export build flags (with values) to make.
 """
 
-load("//build/bazel/utils:schema_validation.bzl", "validate")
+load("//build/bazel/utils:schema_validation.scl", "validate")
 
 # Partitions that get build system flag summaries
 _flag_partitions = [
@@ -55,6 +55,11 @@
             },
             "declared_in": {"type": "string"},
         },
+        "optional_keys": {
+            "appends": {
+                "type": "bool",
+            },
+        },
     },
 }
 
@@ -75,13 +80,14 @@
     },
 }
 
-def flag(name, partitions, default):
+def flag(name, partitions, default, *, appends = False):
     """Declare a flag.
 
     Args:
       name: name of the flag
       partitions: the partitions where this should be recorded.
       default: the default value of the flag.
+      appends: Whether new values should be append (not replace) the old.
 
     Returns:
       A dictionary containing the flag declaration.
@@ -105,6 +111,7 @@
         "name": name,
         "partitions": partitions,
         "default": default,
+        "appends": appends,
     }
 
 def value(name, value):
@@ -153,10 +160,12 @@
 
     # Validate flags
     flag_names = []
+    flags_dict = {}
     for flag in all_flags:
         if flag["name"] in flag_names:
             fail(flag["declared_in"] + ": Duplicate declaration of flag " + flag["name"])
         flag_names.append(flag["name"])
+        flags_dict[flag["name"]] = flag
 
     # Record which flags go on which partition
     partitions = {}
@@ -170,13 +179,21 @@
             else:
                 partitions.setdefault(partition, []).append(flag["name"])
 
-    # Validate values
-    # TODO(joeo): Disallow duplicate values after we've split AOSP and vendor flags.
+    # Generate final values.
+    # Only declared flags may have a value.
     values = {}
     for value in all_values:
-        if value["name"] not in flag_names:
-            fail(value["set_in"] + ": Value set for undeclared build flag: " + value["name"])
-        values[value["name"]] = value
+        name = value["name"]
+        if name not in flag_names:
+            fail(value["set_in"] + ": Value set for undeclared build flag: " + name)
+        if flags_dict[name]["appends"]:
+            if name in values:
+                values[name]["value"] += " " + value["value"]
+                values[name]["set_in"] += " " + value["set_in"]
+            else:
+                values[name] = value
+        else:
+            values[name] = value
 
     # Collect values
     result = {
diff --git a/core/soong_cc_rust_prebuilt.mk b/core/soong_cc_rust_prebuilt.mk
index 143931b..94e1115 100644
--- a/core/soong_cc_rust_prebuilt.mk
+++ b/core/soong_cc_rust_prebuilt.mk
@@ -129,8 +129,13 @@
   ifdef LOCAL_SHARED_LIBRARIES
     my_shared_libraries := $(LOCAL_SHARED_LIBRARIES)
     ifdef LOCAL_USE_VNDK
-      my_shared_libraries := $(foreach l,$(my_shared_libraries),\
-        $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+      ifdef LOCAL_USE_VNDK_PRODUCT
+        my_shared_libraries := $(foreach l,$(my_shared_libraries),\
+          $(if $(SPLIT_PRODUCT.SHARED_LIBRARIES.$(l)),$(l).product,$(l)))
+      else
+        my_shared_libraries := $(foreach l,$(my_shared_libraries),\
+          $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+      endif
     endif
     $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
       $(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_shared_libraries))
@@ -139,8 +144,13 @@
     my_dylibs := $(LOCAL_DYLIB_LIBRARIES)
     # Treat these as shared library dependencies for installation purposes.
     ifdef LOCAL_USE_VNDK
-      my_dylibs := $(foreach l,$(my_dylibs),\
-        $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+      ifdef LOCAL_USE_VNDK_PRODUCT
+        my_dylibs := $(foreach l,$(my_dylibs),\
+          $(if $(SPLIT_PRODUCT.SHARED_LIBRARIES.$(l)),$(l).product,$(l)))
+      else
+        my_dylibs := $(foreach l,$(my_dylibs),\
+          $(if $(SPLIT_VENDOR.SHARED_LIBRARIES.$(l)),$(l).vendor,$(l)))
+      endif
     endif
     $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
       $(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(my_dylibs))
diff --git a/core/soong_config.mk b/core/soong_config.mk
index e39f2fa..08e9a65 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -164,7 +164,7 @@
 $(call add_json_list, ModulesLoadedByPrivilegedModules,  $(PRODUCT_LOADED_BY_PRIVILEGED_MODULES))
 
 $(call add_json_list, BootJars,                          $(PRODUCT_BOOT_JARS))
-$(call add_json_list, ApexBootJars,                      $(PRODUCT_APEX_BOOT_JARS))
+$(call add_json_list, ApexBootJars,                      $(filter-out $(APEX_BOOT_JARS_EXCLUDED), $(PRODUCT_APEX_BOOT_JARS)))
 
 $(call add_json_bool, VndkUseCoreVariant,                $(TARGET_VNDK_USE_CORE_VARIANT))
 $(call add_json_bool, VndkSnapshotBuildArtifacts,        $(VNDK_SNAPSHOT_BUILD_ARTIFACTS))
diff --git a/envsetup.sh b/envsetup.sh
index 3b76980..c20837b 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -776,7 +776,7 @@
     else
         print_lunch_menu
         echo "Which would you like? [aosp_arm-trunk_staging-eng]"
-        echo -n "Pick from common choices above (e.g. 13) or specify your own (e.g. aosp_barbet-eng): "
+        echo -n "Pick from common choices above (e.g. 13) or specify your own (e.g. aosp_barbet-trunk_staging-eng): "
         read answer
         used_lunch_menu=1
     fi
diff --git a/rbesetup.sh b/rbesetup.sh
index 9e246ff..0da7a57 100644
--- a/rbesetup.sh
+++ b/rbesetup.sh
@@ -34,7 +34,7 @@
 # for the build to be executed with RBE.
 function use_rbe() {
   local RBE_BINARIES_DIR="prebuilts/remoteexecution-client/latest"
-  local DOCKER_IMAGE="gcr.io/androidbuild-re-dockerimage/android-build-remoteexec-image@sha256:953fed4a6b2501256a0d17f055dc17884ff71b024e50ade773e0b348a6c303e6"
+  local DOCKER_IMAGE="gcr.io/androidbuild-re-dockerimage/android-build-remoteexec-image@sha256:1eb7f64b9e17102b970bd7a1af7daaebdb01c3fb777715899ef462d6c6d01a45"
 
   # Do not set an invocation-ID and let reproxy auto-generate one.
   USE_RBE="true" \
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index e3ebaa3..69a2abf 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -53,7 +53,6 @@
     com.android.btservices \
     com.android.configinfrastructure \
     com.android.conscrypt \
-    com.android.crashrecovery \
     com.android.devicelock \
     com.android.extservices \
     com.android.healthfitness \
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
index f7c92aa..bc11fea 100644
--- a/target/product/default_art_config.mk
+++ b/target/product/default_art_config.mk
@@ -56,6 +56,8 @@
     ims-common
 
 # APEX boot jars. Keep the list sorted by module names and then library names.
+# Note: If the existing apex introduces the new jar, also add it to
+# PRODUCT_APEX_BOOT_JARS_FOR_SOURCE_BUILD_ONLY below.
 # Note: core-icu4j is moved back to PRODUCT_BOOT_JARS in product_config.mk at a later stage.
 # Note: For modules available in Q, DO NOT add new entries here.
 PRODUCT_APEX_BOOT_JARS := \
@@ -65,7 +67,6 @@
     com.android.btservices:framework-bluetooth \
     com.android.configinfrastructure:framework-configinfrastructure \
     com.android.conscrypt:conscrypt \
-    com.android.crashrecovery:framework-crashrecovery \
     com.android.devicelock:framework-devicelock \
     com.android.healthfitness:framework-healthfitness \
     com.android.i18n:core-icu4j \
@@ -85,6 +86,12 @@
     com.android.virt:framework-virtualization \
     com.android.wifi:framework-wifi \
 
+# TODO(b/308174306): Adjust this after multiple prebuilts version is supported.
+# APEX boot jars that are not in prebuilt apexes.
+# Keep the list sorted by module names and then library names.
+PRODUCT_APEX_BOOT_JARS_FOR_SOURCE_BUILD_ONLY := \
+    com.android.mediaprovider:framework-pdf \
+
 # List of system_server classpath jars delivered via apex.
 # Keep the list sorted by module names and then library names.
 # Note: For modules available in Q, DO NOT add new entries here.
@@ -94,7 +101,6 @@
     com.android.appsearch:service-appsearch \
     com.android.art:service-art \
     com.android.configinfrastructure:service-configinfrastructure \
-    com.android.crashrecovery:service-crashrecovery \
     com.android.healthfitness:service-healthfitness \
     com.android.media:service-media-s \
     com.android.ondevicepersonalization:service-ondevicepersonalization \
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index 23eb534..fc5db6a 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -62,13 +62,16 @@
 PRODUCT_COPY_FILES += \
     device/generic/common/overlays/overlay-config.xml:$(TARGET_COPY_OUT_SYSTEM_EXT)/overlay/config/config.xml
 
+# b/308878144 no more VNDK on 24Q1 and beyond
+KEEP_VNDK ?= false
+
 # Support additional VNDK snapshots
 PRODUCT_EXTRA_VNDK_VERSIONS := \
-    29 \
     30 \
     31 \
     32 \
     33 \
+    34 \
 
 # Do not build non-GSI partition images.
 PRODUCT_BUILD_CACHE_IMAGE := false
diff --git a/target/product/handheld_system.mk b/target/product/handheld_system.mk
index 00b62bc..6c93dd7 100644
--- a/target/product/handheld_system.mk
+++ b/target/product/handheld_system.mk
@@ -40,7 +40,6 @@
     BuiltInPrintService \
     CalendarProvider \
     cameraserver \
-    com.android.nfcservices \
     CameraExtensionsProxy \
     CaptivePortalLogin \
     CertInstaller \
@@ -57,6 +56,7 @@
     MmsService \
     MtpService \
     MusicFX \
+    NfcNci \
     PacProcessor \
     preinstalled-packages-platform-handheld-system.xml \
     PrintRecommendationService \
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 68ed249..1e01b33 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -102,39 +102,63 @@
 PRODUCT_SYSTEM_PROPERTIES += \
     ro.dalvik.vm.native.bridge?=0
 
-# Different dexopt types for different package update/install times.
-# On eng builds, make "boot" reasons only extract for faster turnaround.
-ifeq (eng,$(TARGET_BUILD_VARIANT))
-    PRODUCT_SYSTEM_PROPERTIES += \
-        pm.dexopt.first-boot?=extract \
-        pm.dexopt.boot-after-ota?=extract
-else
-    PRODUCT_SYSTEM_PROPERTIES += \
-        pm.dexopt.first-boot?=verify \
-        pm.dexopt.boot-after-ota?=verify
-endif
-
 # The install filter is speed-profile in order to enable the use of
 # profiles from the dex metadata files. Note that if a profile is not provided
 # or if it is empty speed-profile is equivalent to (quicken + empty app image).
 # Note that `cmdline` is not strictly needed but it simplifies the management
 # of compilation reason in the platform (as we have a unified, single path,
 # without exceptions).
+# TODO(b/243646876): Remove `pm.dexopt.post-boot`.
 PRODUCT_SYSTEM_PROPERTIES += \
-    pm.dexopt.post-boot?=extract \
+    pm.dexopt.post-boot?=verify \
+    pm.dexopt.first-boot?=verify \
+    pm.dexopt.boot-after-ota?=verify \
     pm.dexopt.boot-after-mainline-update?=verify \
     pm.dexopt.install?=speed-profile \
     pm.dexopt.install-fast?=skip \
     pm.dexopt.install-bulk?=speed-profile \
     pm.dexopt.install-bulk-secondary?=verify \
     pm.dexopt.install-bulk-downgraded?=verify \
-    pm.dexopt.install-bulk-secondary-downgraded?=extract \
+    pm.dexopt.install-bulk-secondary-downgraded?=verify \
     pm.dexopt.bg-dexopt?=speed-profile \
     pm.dexopt.ab-ota?=speed-profile \
     pm.dexopt.inactive?=verify \
     pm.dexopt.cmdline?=verify \
     pm.dexopt.shared?=speed
 
+ifneq (,$(filter eng,$(TARGET_BUILD_VARIANT)))
+    OVERRIDE_DISABLE_DEXOPT_ALL ?= true
+endif
+
+# OVERRIDE_DISABLE_DEXOPT_ALL disables all dexpreopt (build-time) and dexopt (on-device) activities.
+# This option is for faster iteration during development and should never be enabled for production.
+ifneq (,$(filter true,$(OVERRIDE_DISABLE_DEXOPT_ALL)))
+  PRODUCT_SYSTEM_PROPERTIES += \
+    pm.dexopt.post-boot=skip \
+    pm.dexopt.first-boot=skip \
+    pm.dexopt.boot-after-ota=skip \
+    pm.dexopt.boot-after-mainline-update=skip \
+    pm.dexopt.install=skip \
+    pm.dexopt.install-fast=skip \
+    pm.dexopt.install-bulk=skip \
+    pm.dexopt.install-bulk-secondary=skip \
+    pm.dexopt.install-bulk-downgraded=skip \
+    pm.dexopt.install-bulk-secondary-downgraded=skip \
+    pm.dexopt.bg-dexopt=skip \
+    pm.dexopt.ab-ota=skip \
+    pm.dexopt.inactive=skip \
+    pm.dexopt.cmdline=skip \
+    pm.dexopt.shared=skip
+
+  PRODUCT_SYSTEM_PROPERTIES += dalvik.vm.disable-odrefresh=true
+
+  # Disable all dexpreopt activities except for the ART boot image.
+  # We have to dexpreopt the ART boot image because they are used by ART tests. This should not
+  # be too much of a problem for platform developers because a change to framework code should not
+  # trigger dexpreopt for the ART boot image.
+  WITH_DEXPREOPT_ART_BOOT_IMG_ONLY := true
+endif
+
 # Enable resolution of startup const strings.
 PRODUCT_SYSTEM_PROPERTIES += \
     dalvik.vm.dex2oat-resolve-startup-strings=true
diff --git a/tools/aconfig/Android.bp b/tools/aconfig/Android.bp
index 425d8a9..e2fadb0 100644
--- a/tools/aconfig/Android.bp
+++ b/tools/aconfig/Android.bp
@@ -58,6 +58,7 @@
         "libaconfig_protos",
         "libanyhow",
         "libclap",
+        "libitertools",
         "libprotobuf",
         "libserde",
         "libserde_json",
@@ -131,7 +132,7 @@
     name: "aconfig_host_test_java_library",
     aconfig_declarations: "aconfig.test.flags",
     host_supported: true,
-    test: true,
+    mode: "test",
 }
 
 java_test_host {
@@ -187,7 +188,7 @@
     name: "libaconfig_test_rust_library_with_test_mode",
     crate_name: "aconfig_test_rust_library",
     aconfig_declarations: "aconfig.test.flags",
-    test: true,
+    mode: "test",
 }
 
 rust_test {
diff --git a/tools/aconfig/Cargo.toml b/tools/aconfig/Cargo.toml
index 941b30d..2edf4b8 100644
--- a/tools/aconfig/Cargo.toml
+++ b/tools/aconfig/Cargo.toml
@@ -11,6 +11,7 @@
 [dependencies]
 anyhow = "1.0.69"
 clap = { version = "4.1.8", features = ["derive"] }
+itertools = "0.10.5"
 paste = "1.0.11"
 protobuf = "3.2.0"
 serde = { version = "1.0.152", features = ["derive"] }
diff --git a/tools/aconfig/src/codegen_cpp.rs b/tools/aconfig/src/codegen_cpp.rs
index aeb57a3..33e54a1 100644
--- a/tools/aconfig/src/codegen_cpp.rs
+++ b/tools/aconfig/src/codegen_cpp.rs
@@ -31,9 +31,11 @@
 where
     I: Iterator<Item = &'a ProtoParsedFlag>,
 {
-    let class_elements: Vec<ClassElement> =
-        parsed_flags_iter.map(|pf| create_class_element(package, pf)).collect();
-    let readwrite = class_elements.iter().any(|item| item.readwrite);
+    let mut readwrite_count = 0;
+    let class_elements: Vec<ClassElement> = parsed_flags_iter
+        .map(|pf| create_class_element(package, pf, &mut readwrite_count))
+        .collect();
+    let readwrite = readwrite_count > 0;
     let has_fixed_read_only = class_elements.iter().any(|item| item.is_fixed_read_only);
     let header = package.replace('.', "_");
     let package_macro = header.to_uppercase();
@@ -46,6 +48,7 @@
         package,
         has_fixed_read_only,
         readwrite,
+        readwrite_count,
         for_test: codegen_mode == CodegenMode::Test,
         class_elements,
     };
@@ -88,12 +91,14 @@
     pub package: &'a str,
     pub has_fixed_read_only: bool,
     pub readwrite: bool,
+    pub readwrite_count: i32,
     pub for_test: bool,
     pub class_elements: Vec<ClassElement>,
 }
 
 #[derive(Serialize)]
 pub struct ClassElement {
+    pub readwrite_idx: i32,
     pub readwrite: bool,
     pub is_fixed_read_only: bool,
     pub default_value: String,
@@ -103,8 +108,15 @@
     pub device_config_flag: String,
 }
 
-fn create_class_element(package: &str, pf: &ProtoParsedFlag) -> ClassElement {
+fn create_class_element(package: &str, pf: &ProtoParsedFlag, rw_count: &mut i32) -> ClassElement {
     ClassElement {
+        readwrite_idx: if pf.permission() == ProtoFlagPermission::READ_WRITE {
+            let index = *rw_count;
+            *rw_count += 1;
+            index
+        } else {
+            -1
+        },
         readwrite: pf.permission() == ProtoFlagPermission::READ_WRITE,
         is_fixed_read_only: pf.is_fixed_read_only(),
         default_value: if pf.state() == ProtoFlagState::ENABLED {
@@ -139,8 +151,12 @@
 #ifdef __cplusplus
 
 #include <memory>
+#include <vector>
 
 namespace com::android::aconfig::test {
+
+extern std::vector<int8_t> cache_;
+
 class flag_provider_interface {
 public:
     virtual ~flag_provider_interface() = default;
@@ -149,6 +165,8 @@
 
     virtual bool disabled_rw() = 0;
 
+    virtual bool disabled_rw_2() = 0;
+
     virtual bool enabled_fixed_ro() = 0;
 
     virtual bool enabled_ro() = 0;
@@ -166,6 +184,10 @@
     return provider_->disabled_rw();
 }
 
+inline bool disabled_rw_2() {
+    return provider_->disabled_rw_2();
+}
+
 inline bool enabled_fixed_ro() {
     return COM_ANDROID_ACONFIG_TEST_ENABLED_FIXED_RO;
 }
@@ -187,6 +209,8 @@
 
 bool com_android_aconfig_test_disabled_rw();
 
+bool com_android_aconfig_test_disabled_rw_2();
+
 bool com_android_aconfig_test_enabled_fixed_ro();
 
 bool com_android_aconfig_test_enabled_ro();
@@ -220,6 +244,10 @@
 
     virtual void disabled_rw(bool val) = 0;
 
+    virtual bool disabled_rw_2() = 0;
+
+    virtual void disabled_rw_2(bool val) = 0;
+
     virtual bool enabled_fixed_ro() = 0;
 
     virtual void enabled_fixed_ro(bool val) = 0;
@@ -253,6 +281,14 @@
     provider_->disabled_rw(val);
 }
 
+inline bool disabled_rw_2() {
+    return provider_->disabled_rw_2();
+}
+
+inline void disabled_rw_2(bool val) {
+    provider_->disabled_rw_2(val);
+}
+
 inline bool enabled_fixed_ro() {
     return provider_->enabled_fixed_ro();
 }
@@ -294,6 +330,10 @@
 
 void set_com_android_aconfig_test_disabled_rw(bool val);
 
+bool com_android_aconfig_test_disabled_rw_2();
+
+void set_com_android_aconfig_test_disabled_rw_2(bool val);
+
 bool com_android_aconfig_test_enabled_fixed_ro();
 
 void set_com_android_aconfig_test_enabled_fixed_ro(bool val);
@@ -330,10 +370,23 @@
             }
 
             virtual bool disabled_rw() override {
-                return server_configurable_flags::GetServerConfigurableFlag(
-                    "aconfig_flags.aconfig_test",
-                    "com.android.aconfig.test.disabled_rw",
-                    "false") == "true";
+                if (cache_[0] == -1) {
+                    cache_[0] = server_configurable_flags::GetServerConfigurableFlag(
+                        "aconfig_flags.aconfig_test",
+                        "com.android.aconfig.test.disabled_rw",
+                        "false") == "true";
+                }
+                return cache_[0];
+            }
+
+            virtual bool disabled_rw_2() override {
+                if (cache_[1] == -1) {
+                    cache_[1] = server_configurable_flags::GetServerConfigurableFlag(
+                        "aconfig_flags.other_namespace",
+                        "com.android.aconfig.test.disabled_rw_2",
+                        "false") == "true";
+                }
+                return cache_[1];
             }
 
             virtual bool enabled_fixed_ro() override {
@@ -345,14 +398,19 @@
             }
 
             virtual bool enabled_rw() override {
-                return server_configurable_flags::GetServerConfigurableFlag(
-                    "aconfig_flags.aconfig_test",
-                    "com.android.aconfig.test.enabled_rw",
-                    "true") == "true";
+                if (cache_[2] == -1) {
+                    cache_[2] = server_configurable_flags::GetServerConfigurableFlag(
+                        "aconfig_flags.aconfig_test",
+                        "com.android.aconfig.test.enabled_rw",
+                        "true") == "true";
+                }
+                return cache_[2];
             }
 
     };
 
+    std::vector<int8_t> cache_ = std::vector<int8_t>(3, -1);
+
     std::unique_ptr<flag_provider_interface> provider_ =
         std::make_unique<flag_provider>();
 }
@@ -365,6 +423,10 @@
     return com::android::aconfig::test::disabled_rw();
 }
 
+bool com_android_aconfig_test_disabled_rw_2() {
+    return com::android::aconfig::test::disabled_rw_2();
+}
+
 bool com_android_aconfig_test_enabled_fixed_ro() {
     return COM_ANDROID_ACONFIG_TEST_ENABLED_FIXED_RO;
 }
@@ -425,6 +487,22 @@
                 overrides_["disabled_rw"] = val;
             }
 
+            virtual bool disabled_rw_2() override {
+                auto it = overrides_.find("disabled_rw_2");
+                  if (it != overrides_.end()) {
+                      return it->second;
+                } else {
+                  return server_configurable_flags::GetServerConfigurableFlag(
+                      "aconfig_flags.other_namespace",
+                      "com.android.aconfig.test.disabled_rw_2",
+                      "false") == "true";
+                }
+            }
+
+            virtual void disabled_rw_2(bool val) override {
+                overrides_["disabled_rw_2"] = val;
+            }
+
             virtual bool enabled_fixed_ro() override {
                 auto it = overrides_.find("enabled_fixed_ro");
                   if (it != overrides_.end()) {
@@ -494,6 +572,15 @@
     com::android::aconfig::test::disabled_rw(val);
 }
 
+bool com_android_aconfig_test_disabled_rw_2() {
+    return com::android::aconfig::test::disabled_rw_2();
+}
+
+
+void set_com_android_aconfig_test_disabled_rw_2(bool val) {
+    com::android::aconfig::test::disabled_rw_2(val);
+}
+
 
 bool com_android_aconfig_test_enabled_fixed_ro() {
     return com::android::aconfig::test::enabled_fixed_ro();
diff --git a/tools/aconfig/src/codegen_java.rs b/tools/aconfig/src/codegen_java.rs
index 05ee0d7..5470cb3 100644
--- a/tools/aconfig/src/codegen_java.rs
+++ b/tools/aconfig/src/codegen_java.rs
@@ -16,7 +16,7 @@
 
 use anyhow::Result;
 use serde::Serialize;
-use std::collections::BTreeSet;
+use std::collections::{BTreeMap, BTreeSet};
 use std::path::PathBuf;
 use tinytemplate::TinyTemplate;
 
@@ -34,12 +34,14 @@
 {
     let flag_elements: Vec<FlagElement> =
         parsed_flags_iter.map(|pf| create_flag_element(package, pf)).collect();
+    let namespace_flags = gen_flags_by_namespace(&flag_elements);
     let properties_set: BTreeSet<String> =
         flag_elements.iter().map(|fe| format_property_name(&fe.device_config_namespace)).collect();
     let is_read_write = flag_elements.iter().any(|elem| elem.is_read_write);
     let is_test_mode = codegen_mode == CodegenMode::Test;
     let context = Context {
         flag_elements,
+        namespace_flags,
         is_test_mode,
         is_read_write,
         properties_set,
@@ -72,16 +74,44 @@
         .collect::<Result<Vec<OutputFile>>>()
 }
 
+fn gen_flags_by_namespace(flags: &[FlagElement]) -> Vec<NamespaceFlags> {
+    let mut namespace_to_flag: BTreeMap<String, Vec<FlagElement>> = BTreeMap::new();
+
+    for flag in flags {
+        match namespace_to_flag.get_mut(&flag.device_config_namespace) {
+            Some(flag_list) => flag_list.push(flag.clone()),
+            None => {
+                namespace_to_flag.insert(flag.device_config_namespace.clone(), vec![flag.clone()]);
+            }
+        }
+    }
+
+    namespace_to_flag
+        .iter()
+        .map(|(namespace, flags)| NamespaceFlags {
+            namespace: namespace.to_string(),
+            flags: flags.clone(),
+        })
+        .collect()
+}
+
 #[derive(Serialize)]
 struct Context {
     pub flag_elements: Vec<FlagElement>,
+    pub namespace_flags: Vec<NamespaceFlags>,
     pub is_test_mode: bool,
     pub is_read_write: bool,
     pub properties_set: BTreeSet<String>,
     pub package_name: String,
 }
 
-#[derive(Serialize)]
+#[derive(Serialize, Debug)]
+struct NamespaceFlags {
+    pub namespace: String,
+    pub flags: Vec<FlagElement>,
+}
+
+#[derive(Serialize, Clone, Debug)]
 struct FlagElement {
     pub default_value: bool,
     pub device_config_namespace: String,
@@ -148,6 +178,8 @@
         boolean disabledRo();
         @UnsupportedAppUsage
         boolean disabledRw();
+        @UnsupportedAppUsage
+        boolean disabledRw2();
         @com.android.aconfig.annotations.AssumeTrueForR8
         @UnsupportedAppUsage
         boolean enabledFixedRo();
@@ -170,6 +202,8 @@
         /** @hide */
         public static final String FLAG_DISABLED_RW = "com.android.aconfig.test.disabled_rw";
         /** @hide */
+        public static final String FLAG_DISABLED_RW_2 = "com.android.aconfig.test.disabled_rw_2";
+        /** @hide */
         public static final String FLAG_ENABLED_FIXED_RO = "com.android.aconfig.test.enabled_fixed_ro";
         /** @hide */
         public static final String FLAG_ENABLED_RO = "com.android.aconfig.test.enabled_ro";
@@ -185,6 +219,10 @@
         public static boolean disabledRw() {
             return FEATURE_FLAGS.disabledRw();
         }
+        @UnsupportedAppUsage
+        public static boolean disabledRw2() {
+            return FEATURE_FLAGS.disabledRw2();
+        }
         @com.android.aconfig.annotations.AssumeTrueForR8
         @UnsupportedAppUsage
         public static boolean enabledFixedRo() {
@@ -224,6 +262,11 @@
         }
         @Override
         @UnsupportedAppUsage
+        public boolean disabledRw2() {
+            return getValue(Flags.FLAG_DISABLED_RW_2);
+        }
+        @Override
+        @UnsupportedAppUsage
         public boolean enabledFixedRo() {
             return getValue(Flags.FLAG_ENABLED_FIXED_RO);
         }
@@ -259,6 +302,7 @@
             Map.ofEntries(
                 Map.entry(Flags.FLAG_DISABLED_RO, false),
                 Map.entry(Flags.FLAG_DISABLED_RW, false),
+                Map.entry(Flags.FLAG_DISABLED_RW_2, false),
                 Map.entry(Flags.FLAG_ENABLED_FIXED_RO, false),
                 Map.entry(Flags.FLAG_ENABLED_RO, false),
                 Map.entry(Flags.FLAG_ENABLED_RW, false)
@@ -289,7 +333,52 @@
         import android.provider.DeviceConfig.Properties;
         /** @hide */
         public final class FeatureFlagsImpl implements FeatureFlags {
-            private Properties mPropertiesAconfigTest;
+            private static boolean aconfig_test_is_cached = false;
+            private static boolean other_namespace_is_cached = false;
+            private static boolean disabledRw = false;
+            private static boolean disabledRw2 = false;
+            private static boolean enabledRw = true;
+
+
+            private void load_overrides_aconfig_test() {
+                try {
+                    Properties properties = DeviceConfig.getProperties("aconfig_test");
+                    disabledRw =
+                        properties.getBoolean("com.android.aconfig.test.disabled_rw", false);
+                    enabledRw =
+                        properties.getBoolean("com.android.aconfig.test.enabled_rw", true);
+                } catch (NullPointerException e) {
+                    throw new RuntimeException(
+                        "Cannot read value from namespace aconfig_test "
+                        + "from DeviceConfig. It could be that the code using flag "
+                        + "executed before SettingsProvider initialization. Please use "
+                        + "fixed read-only flag by adding is_fixed_read_only: true in "
+                        + "flag declaration.",
+                        e
+                    );
+                }
+                aconfig_test_is_cached = true;
+            }
+
+            private void load_overrides_other_namespace() {
+                try {
+                    Properties properties = DeviceConfig.getProperties("other_namespace");
+                    disabledRw2 =
+                        properties.getBoolean("com.android.aconfig.test.disabled_rw_2", false);
+                } catch (NullPointerException e) {
+                    throw new RuntimeException(
+                        "Cannot read value from namespace other_namespace "
+                        + "from DeviceConfig. It could be that the code using flag "
+                        + "executed before SettingsProvider initialization. Please use "
+                        + "fixed read-only flag by adding is_fixed_read_only: true in "
+                        + "flag declaration.",
+                        e
+                    );
+                }
+                other_namespace_is_cached = true;
+            }
+
+
             @Override
             @UnsupportedAppUsage
             public boolean disabledRo() {
@@ -298,18 +387,18 @@
             @Override
             @UnsupportedAppUsage
             public boolean disabledRw() {
-                if (mPropertiesAconfigTest == null) {
-                    mPropertiesAconfigTest =
-                        getProperties(
-                            "aconfig_test",
-                            "com.android.aconfig.test.disabled_rw"
-                        );
+                if (!aconfig_test_is_cached) {
+                    load_overrides_aconfig_test();
                 }
-                return mPropertiesAconfigTest
-                    .getBoolean(
-                        "com.android.aconfig.test.disabled_rw",
-                        false
-                    );
+                return disabledRw;
+            }
+            @Override
+            @UnsupportedAppUsage
+            public boolean disabledRw2() {
+                if (!other_namespace_is_cached) {
+                    load_overrides_other_namespace();
+                }
+                return disabledRw2;
             }
             @Override
             @UnsupportedAppUsage
@@ -324,36 +413,10 @@
             @Override
             @UnsupportedAppUsage
             public boolean enabledRw() {
-                if (mPropertiesAconfigTest == null) {
-                    mPropertiesAconfigTest =
-                        getProperties(
-                            "aconfig_test",
-                            "com.android.aconfig.test.enabled_rw"
-                        );
+                if (!aconfig_test_is_cached) {
+                    load_overrides_aconfig_test();
                 }
-                return mPropertiesAconfigTest
-                    .getBoolean(
-                        "com.android.aconfig.test.enabled_rw",
-                        true
-                    );
-            }
-            private Properties getProperties(
-                String namespace,
-                String flagName) {
-                Properties properties = null;
-                try {
-                    properties = DeviceConfig.getProperties(namespace);
-                } catch (NullPointerException e) {
-                    throw new RuntimeException(
-                        "Cannot read value of flag " + flagName + " from DeviceConfig. "
-                        + "It could be that the code using flag executed "
-                        + "before SettingsProvider initialization. "
-                        + "Please use fixed read-only flag by adding "
-                        + "is_fixed_read_only: true in flag declaration.",
-                        e
-                    );
-                }
-                return properties;
+                return enabledRw;
             }
         }
         "#;
@@ -426,6 +489,12 @@
             }
             @Override
             @UnsupportedAppUsage
+            public boolean disabledRw2() {
+                throw new UnsupportedOperationException(
+                    "Method is not implemented.");
+            }
+            @Override
+            @UnsupportedAppUsage
             public boolean enabledFixedRo() {
                 throw new UnsupportedOperationException(
                     "Method is not implemented.");
diff --git a/tools/aconfig/src/codegen_rust.rs b/tools/aconfig/src/codegen_rust.rs
index 4e4c7dd..fb4f981 100644
--- a/tools/aconfig/src/codegen_rust.rs
+++ b/tools/aconfig/src/codegen_rust.rs
@@ -32,10 +32,12 @@
 {
     let template_flags: Vec<TemplateParsedFlag> =
         parsed_flags_iter.map(|pf| TemplateParsedFlag::new(package, pf)).collect();
+    let has_readwrite = template_flags.iter().any(|item| item.readwrite);
     let context = TemplateContext {
         package: package.to_string(),
         template_flags,
         modules: package.split('.').map(|s| s.to_string()).collect::<Vec<_>>(),
+        has_readwrite,
     };
     let mut template = TinyTemplate::new();
     template.add_template(
@@ -55,6 +57,7 @@
     pub package: String,
     pub template_flags: Vec<TemplateParsedFlag>,
     pub modules: Vec<String>,
+    pub has_readwrite: bool,
 }
 
 #[derive(Serialize)]
@@ -94,6 +97,26 @@
 /// flag provider
 pub struct FlagProvider;
 
+lazy_static::lazy_static! {
+    /// flag value cache for disabled_rw
+    static ref CACHED_disabled_rw: bool = flags_rust::GetServerConfigurableFlag(
+        "aconfig_flags.aconfig_test",
+        "com.android.aconfig.test.disabled_rw",
+        "false") == "true";
+
+    /// flag value cache for disabled_rw_2
+    static ref CACHED_disabled_rw_2: bool = flags_rust::GetServerConfigurableFlag(
+        "aconfig_flags.other_namespace",
+        "com.android.aconfig.test.disabled_rw_2",
+        "false") == "true";
+
+    /// flag value cache for enabled_rw
+    static ref CACHED_enabled_rw: bool = flags_rust::GetServerConfigurableFlag(
+        "aconfig_flags.aconfig_test",
+        "com.android.aconfig.test.enabled_rw",
+        "true") == "true";
+}
+
 impl FlagProvider {
     /// query flag disabled_ro
     pub fn disabled_ro(&self) -> bool {
@@ -102,10 +125,12 @@
 
     /// query flag disabled_rw
     pub fn disabled_rw(&self) -> bool {
-        flags_rust::GetServerConfigurableFlag(
-            "aconfig_flags.aconfig_test",
-            "com.android.aconfig.test.disabled_rw",
-            "false") == "true"
+        *CACHED_disabled_rw
+    }
+
+    /// query flag disabled_rw_2
+    pub fn disabled_rw_2(&self) -> bool {
+        *CACHED_disabled_rw_2
     }
 
     /// query flag enabled_fixed_ro
@@ -120,10 +145,7 @@
 
     /// query flag enabled_rw
     pub fn enabled_rw(&self) -> bool {
-        flags_rust::GetServerConfigurableFlag(
-            "aconfig_flags.aconfig_test",
-            "com.android.aconfig.test.enabled_rw",
-            "true") == "true"
+        *CACHED_enabled_rw
     }
 }
 
@@ -142,6 +164,12 @@
     PROVIDER.disabled_rw()
 }
 
+/// query flag disabled_rw_2
+#[inline(always)]
+pub fn disabled_rw_2() -> bool {
+    PROVIDER.disabled_rw_2()
+}
+
 /// query flag enabled_fixed_ro
 #[inline(always)]
 pub fn enabled_fixed_ro() -> bool {
@@ -200,6 +228,21 @@
         self.overrides.insert("disabled_rw", val);
     }
 
+    /// query flag disabled_rw_2
+    pub fn disabled_rw_2(&self) -> bool {
+        self.overrides.get("disabled_rw_2").copied().unwrap_or(
+            flags_rust::GetServerConfigurableFlag(
+                "aconfig_flags.other_namespace",
+                "com.android.aconfig.test.disabled_rw_2",
+                "false") == "true"
+        )
+    }
+
+    /// set flag disabled_rw_2
+    pub fn set_disabled_rw_2(&mut self, val: bool) {
+        self.overrides.insert("disabled_rw_2", val);
+    }
+
     /// query flag enabled_fixed_ro
     pub fn enabled_fixed_ro(&self) -> bool {
         self.overrides.get("enabled_fixed_ro").copied().unwrap_or(
@@ -274,6 +317,18 @@
     PROVIDER.lock().unwrap().set_disabled_rw(val);
 }
 
+/// query flag disabled_rw_2
+#[inline(always)]
+pub fn disabled_rw_2() -> bool {
+    PROVIDER.lock().unwrap().disabled_rw_2()
+}
+
+/// set flag disabled_rw_2
+#[inline(always)]
+pub fn set_disabled_rw_2(val: bool) {
+    PROVIDER.lock().unwrap().set_disabled_rw_2(val);
+}
+
 /// query flag enabled_fixed_ro
 #[inline(always)]
 pub fn enabled_fixed_ro() -> bool {
diff --git a/tools/aconfig/src/commands.rs b/tools/aconfig/src/commands.rs
index 7b05147..63aef29 100644
--- a/tools/aconfig/src/commands.rs
+++ b/tools/aconfig/src/commands.rs
@@ -334,7 +334,7 @@
         assert_eq!(ProtoFlagState::ENABLED, enabled_ro.trace[2].state());
         assert_eq!(ProtoFlagPermission::READ_ONLY, enabled_ro.trace[2].permission());
 
-        assert_eq!(5, parsed_flags.parsed_flag.len());
+        assert_eq!(6, parsed_flags.parsed_flag.len());
         for pf in parsed_flags.parsed_flag.iter() {
             if pf.name() == "enabled_fixed_ro" {
                 continue;
@@ -433,7 +433,7 @@
         let input = parse_test_flags_as_input();
         let bytes = create_device_config_defaults(input).unwrap();
         let text = std::str::from_utf8(&bytes).unwrap();
-        assert_eq!("aconfig_test:com.android.aconfig.test.disabled_rw=disabled\naconfig_test:com.android.aconfig.test.enabled_rw=enabled\n", text);
+        assert_eq!("aconfig_test:com.android.aconfig.test.disabled_rw=disabled\nother_namespace:com.android.aconfig.test.disabled_rw_2=disabled\naconfig_test:com.android.aconfig.test.enabled_rw=enabled\n", text);
     }
 
     #[test]
@@ -441,7 +441,7 @@
         let input = parse_test_flags_as_input();
         let bytes = create_device_config_sysprops(input).unwrap();
         let text = std::str::from_utf8(&bytes).unwrap();
-        assert_eq!("persist.device_config.com.android.aconfig.test.disabled_rw=false\npersist.device_config.com.android.aconfig.test.enabled_rw=true\n", text);
+        assert_eq!("persist.device_config.com.android.aconfig.test.disabled_rw=false\npersist.device_config.com.android.aconfig.test.disabled_rw_2=false\npersist.device_config.com.android.aconfig.test.enabled_rw=true\n", text);
     }
 
     #[test]
diff --git a/tools/aconfig/src/test.rs b/tools/aconfig/src/test.rs
index 9034704..922d4c6 100644
--- a/tools/aconfig/src/test.rs
+++ b/tools/aconfig/src/test.rs
@@ -60,6 +60,26 @@
 }
 parsed_flag {
   package: "com.android.aconfig.test"
+  name: "disabled_rw_2"
+  namespace: "other_namespace"
+  description: "This flag is DISABLED + READ_WRITE"
+  bug: "999"
+  state: DISABLED
+  permission: READ_WRITE
+  trace {
+    source: "tests/test.aconfig"
+    state: DISABLED
+    permission: READ_WRITE
+  }
+  trace {
+    source: "tests/first.values"
+    state: DISABLED
+    permission: READ_WRITE
+  }
+  is_fixed_read_only: false
+}
+parsed_flag {
+  package: "com.android.aconfig.test"
   name: "enabled_fixed_ro"
   namespace: "aconfig_test"
   description: "This flag is fixed READ_ONLY + ENABLED"
diff --git a/tools/aconfig/templates/FeatureFlagsImpl.java.template b/tools/aconfig/templates/FeatureFlagsImpl.java.template
index ff089df..ec8822c 100644
--- a/tools/aconfig/templates/FeatureFlagsImpl.java.template
+++ b/tools/aconfig/templates/FeatureFlagsImpl.java.template
@@ -8,10 +8,41 @@
 {{ endif }}
 /** @hide */
 public final class FeatureFlagsImpl implements FeatureFlags \{
-{{ if is_read_write- }}
-{{ for properties in properties_set }}
-    private Properties {properties};
+{{- if is_read_write }}
+{{- for namespace_with_flags in namespace_flags }}
+    private static boolean {namespace_with_flags.namespace}_is_cached = false;
+{{- endfor- }}
+
+{{ for flag in flag_elements }}
+{{- if flag.is_read_write }}
+    private static boolean {flag.method_name} = {flag.default_value};
+{{- endif- }}
 {{ endfor }}
+
+{{ for namespace_with_flags in namespace_flags }}
+    private void load_overrides_{namespace_with_flags.namespace}() \{
+        try \{
+            Properties properties = DeviceConfig.getProperties("{namespace_with_flags.namespace}");
+
+            {{- for flag in namespace_with_flags.flags }}
+            {{- if flag.is_read_write }}
+            {flag.method_name} =
+                properties.getBoolean("{flag.device_config_flag}", {flag.default_value});
+            {{- endif- }}
+            {{ endfor }}
+        } catch (NullPointerException e) \{
+            throw new RuntimeException(
+                "Cannot read value from namespace {namespace_with_flags.namespace} "
+                + "from DeviceConfig. It could be that the code using flag "
+                + "executed before SettingsProvider initialization. Please use "
+                + "fixed read-only flag by adding is_fixed_read_only: true in "
+                + "flag declaration.",
+                e
+            );
+        }
+        {namespace_with_flags.namespace}_is_cached = true;
+    }
+{{ endfor- }}
 {{ endif- }}
 
 {{ for flag in flag_elements }}
@@ -19,45 +50,15 @@
     @UnsupportedAppUsage
     public boolean {flag.method_name}() \{
     {{ -if flag.is_read_write }}
-        if ({flag.properties} == null) \{
-            {flag.properties} =
-                getProperties(
-                    "{flag.device_config_namespace}",
-                    "{flag.device_config_flag}"
-                );
+        if (!{flag.device_config_namespace}_is_cached) \{
+            load_overrides_{flag.device_config_namespace}();
         }
-        return {flag.properties}
-                .getBoolean(
-                    "{flag.device_config_flag}",
-                    {flag.default_value}
-                );
+        return {flag.method_name};
     {{ else }}
         return {flag.default_value};
     {{ endif- }}
     }
 {{ endfor }}
-
-{{ -if is_read_write }}
-    private Properties getProperties(
-            String namespace,
-            String flagName) \{
-        Properties properties = null;
-        try \{
-            properties = DeviceConfig.getProperties(namespace);
-        } catch (NullPointerException e) \{
-            throw new RuntimeException(
-                "Cannot read value of flag " + flagName + " from DeviceConfig. "
-                + "It could be that the code using flag executed "
-                + "before SettingsProvider initialization. "
-                + "Please use fixed read-only flag by adding "
-                + "is_fixed_read_only: true in flag declaration.",
-                e
-            );
-        }
-
-        return properties;
-    }
-{{ endif- }}
 }
 {{ else }}
 {#- Generate only stub if in test mode #}
@@ -70,6 +71,6 @@
         throw new UnsupportedOperationException(
             "Method is not implemented.");
     }
-{{ endfor }}
+{{ endfor- }}
 }
 {{ endif }}
diff --git a/tools/aconfig/templates/cpp_exported_header.template b/tools/aconfig/templates/cpp_exported_header.template
index 6413699..d19c0fa 100644
--- a/tools/aconfig/templates/cpp_exported_header.template
+++ b/tools/aconfig/templates/cpp_exported_header.template
@@ -18,10 +18,16 @@
 #ifdef __cplusplus
 
 #include <memory>
-
+{{ if not for_test- }}
+#include <vector>
+{{ -endif }}
 
 namespace {cpp_namespace} \{
 
+{{ if not for_test- }}
+extern std::vector<int8_t> cache_;
+{{ -endif }}
+
 class flag_provider_interface \{
 public:
     virtual ~flag_provider_interface() = default;
diff --git a/tools/aconfig/templates/cpp_source_file.template b/tools/aconfig/templates/cpp_source_file.template
index 0f1b845..91e828a 100644
--- a/tools/aconfig/templates/cpp_source_file.template
+++ b/tools/aconfig/templates/cpp_source_file.template
@@ -53,10 +53,13 @@
         {{ for item in class_elements}}
         virtual bool {item.flag_name}() override \{
             {{ if item.readwrite- }}
-            return server_configurable_flags::GetServerConfigurableFlag(
-                "aconfig_flags.{item.device_config_namespace}",
-                "{item.device_config_flag}",
-                "{item.default_value}") == "true";
+            if (cache_[{item.readwrite_idx}] == -1) \{
+                cache_[{item.readwrite_idx}] = server_configurable_flags::GetServerConfigurableFlag(
+                    "aconfig_flags.{item.device_config_namespace}",
+                    "{item.device_config_flag}",
+                    "{item.default_value}") == "true";
+            }
+            return cache_[{item.readwrite_idx}];
             {{ -else- }}
             {{ if item.is_fixed_read_only }}
             return {package_macro}_{item.flag_macro};
@@ -68,13 +71,14 @@
         {{ endfor }}
     };
 
+    std::vector<int8_t> cache_ = std::vector<int8_t>({readwrite_count}, -1);
 
 {{ -endif }}
 
-
 std::unique_ptr<flag_provider_interface> provider_ =
     std::make_unique<flag_provider>();
 
+
 }
 
 
diff --git a/tools/aconfig/templates/rust_prod.template b/tools/aconfig/templates/rust_prod.template
index e22ad6f..30ea646 100644
--- a/tools/aconfig/templates/rust_prod.template
+++ b/tools/aconfig/templates/rust_prod.template
@@ -3,16 +3,27 @@
 /// flag provider
 pub struct FlagProvider;
 
+{{ if has_readwrite - }}
+lazy_static::lazy_static! \{
+    {{ for flag in template_flags }}
+    {{ if flag.readwrite -}}
+    /// flag value cache for {flag.name}
+    static ref CACHED_{flag.name}: bool = flags_rust::GetServerConfigurableFlag(
+        "aconfig_flags.{flag.device_config_namespace}",
+        "{flag.device_config_flag}",
+        "{flag.default_value}") == "true";
+    {{ -endif }}
+    {{ endfor }}
+}
+{{ -endif }}
+
 impl FlagProvider \{
 
     {{ for flag in template_flags }}
     /// query flag {flag.name}
     pub fn {flag.name}(&self) -> bool \{
     {{ if flag.readwrite -}}
-        flags_rust::GetServerConfigurableFlag(
-          "aconfig_flags.{flag.device_config_namespace}",
-          "{flag.device_config_flag}",
-          "{flag.default_value}") == "true"
+        *CACHED_{flag.name}
     {{ -else- }}
         {flag.default_value}
     {{ -endif }}
diff --git a/tools/aconfig/tests/first.values b/tools/aconfig/tests/first.values
index a450f78..448fb3a 100644
--- a/tools/aconfig/tests/first.values
+++ b/tools/aconfig/tests/first.values
@@ -18,6 +18,12 @@
 }
 flag_value {
     package: "com.android.aconfig.test"
+    name: "disabled_rw_2"
+    state: DISABLED
+    permission: READ_WRITE
+}
+flag_value {
+    package: "com.android.aconfig.test"
     name: "enabled_fixed_ro"
     state: ENABLED
     permission: READ_ONLY
diff --git a/tools/aconfig/tests/test.aconfig b/tools/aconfig/tests/test.aconfig
index aaa6df5..3076f5e 100644
--- a/tools/aconfig/tests/test.aconfig
+++ b/tools/aconfig/tests/test.aconfig
@@ -51,3 +51,10 @@
     bug: ""
     is_fixed_read_only: true
 }
+
+flag {
+    name: "disabled_rw_2"
+    namespace: "other_namespace"
+    description: "This flag is DISABLED + READ_WRITE"
+    bug: "999"
+}
diff --git a/tools/finalization/environment.sh b/tools/finalization/environment.sh
index 9714ac4..d9c42c8 100755
--- a/tools/finalization/environment.sh
+++ b/tools/finalization/environment.sh
@@ -22,3 +22,5 @@
 # 'sdk' - SDK/API is finalized
 # 'rel' - branch is finalized, switched to REL
 export FINAL_STATE='unfinalized'
+
+export BUILD_FROM_SOURCE_STUB=true
\ No newline at end of file
diff --git a/tools/metadata/Android.bp b/tools/metadata/Android.bp
new file mode 100644
index 0000000..b2fabec
--- /dev/null
+++ b/tools/metadata/Android.bp
@@ -0,0 +1,14 @@
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+blueprint_go_binary {
+    name: "metadata",
+    deps: [
+            "soong-testing-test_spec_proto",
+            "golang-protobuf-proto",
+        ],
+    srcs: [
+        "generator.go",
+    ]
+}
\ No newline at end of file
diff --git a/tools/metadata/OWNERS b/tools/metadata/OWNERS
new file mode 100644
index 0000000..03bcdf1
--- /dev/null
+++ b/tools/metadata/OWNERS
@@ -0,0 +1,4 @@
+dariofreni@google.com
+joeo@google.com
+ronish@google.com
+caditya@google.com
diff --git a/tools/metadata/generator.go b/tools/metadata/generator.go
new file mode 100644
index 0000000..eb87755
--- /dev/null
+++ b/tools/metadata/generator.go
@@ -0,0 +1,177 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"sort"
+	"strings"
+	"sync"
+
+	"android/soong/testing/test_spec_proto"
+	"google.golang.org/protobuf/proto"
+)
+
+type keyToLocksMap struct {
+	locks sync.Map
+}
+
+func (kl *keyToLocksMap) GetLockForKey(key string) *sync.Mutex {
+	mutex, _ := kl.locks.LoadOrStore(key, &sync.Mutex{})
+	return mutex.(*sync.Mutex)
+}
+
+func getSortedKeys(syncMap *sync.Map) []string {
+	var allKeys []string
+	syncMap.Range(
+		func(key, _ interface{}) bool {
+			allKeys = append(allKeys, key.(string))
+			return true
+		},
+	)
+
+	sort.Strings(allKeys)
+	return allKeys
+}
+
+func writeOutput(
+	outputFile string,
+	allMetadata []*test_spec_proto.TestSpec_OwnershipMetadata,
+) {
+	testSpec := &test_spec_proto.TestSpec{
+		OwnershipMetadataList: allMetadata,
+	}
+	data, err := proto.Marshal(testSpec)
+	if err != nil {
+		log.Fatal(err)
+	}
+	file, err := os.Create(outputFile)
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer file.Close()
+
+	_, err = file.Write(data)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func readFileToString(filePath string) string {
+	file, err := os.Open(filePath)
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer file.Close()
+
+	data, err := io.ReadAll(file)
+	if err != nil {
+		log.Fatal(err)
+	}
+	return string(data)
+}
+
+func processTestSpecProtobuf(
+	filePath string, ownershipMetadataMap *sync.Map, keyLocks *keyToLocksMap,
+	errCh chan error, wg *sync.WaitGroup,
+) {
+	defer wg.Done()
+
+	fileContent := strings.TrimRight(readFileToString(filePath), "\n")
+	testData := test_spec_proto.TestSpec{}
+	err := proto.Unmarshal([]byte(fileContent), &testData)
+	if err != nil {
+		errCh <- err
+		return
+	}
+
+	ownershipMetadata := testData.GetOwnershipMetadataList()
+	for _, metadata := range ownershipMetadata {
+		key := metadata.GetTargetName()
+		lock := keyLocks.GetLockForKey(key)
+		lock.Lock()
+
+		value, loaded := ownershipMetadataMap.LoadOrStore(
+			key, []*test_spec_proto.TestSpec_OwnershipMetadata{metadata},
+		)
+		if loaded {
+			existingMetadata := value.([]*test_spec_proto.TestSpec_OwnershipMetadata)
+			isDuplicate := false
+			for _, existing := range existingMetadata {
+				if metadata.GetTrendyTeamId() != existing.GetTrendyTeamId() {
+					errCh <- fmt.Errorf(
+						"Conflicting trendy team IDs found for %s at:\n%s with teamId"+
+							": %s,\n%s with teamId: %s",
+						key,
+						metadata.GetPath(), metadata.GetTrendyTeamId(), existing.GetPath(),
+						existing.GetTrendyTeamId(),
+					)
+
+					lock.Unlock()
+					return
+				}
+				if metadata.GetTrendyTeamId() == existing.GetTrendyTeamId() && metadata.GetPath() == existing.GetPath() {
+					isDuplicate = true
+					break
+				}
+			}
+			if !isDuplicate {
+				existingMetadata = append(existingMetadata, metadata)
+				ownershipMetadataMap.Store(key, existingMetadata)
+			}
+		}
+
+		lock.Unlock()
+	}
+}
+
+func main() {
+	inputFile := flag.String("inputFile", "", "Input file path")
+	outputFile := flag.String("outputFile", "", "Output file path")
+	rule := flag.String("rule", "", "Metadata rule (Hint: test_spec or code_metadata)")
+	flag.Parse()
+
+	if *inputFile == "" || *outputFile == "" || *rule == "" {
+		fmt.Println("Usage: metadata -rule <rule> -inputFile <input file path> -outputFile <output file path>")
+		os.Exit(1)
+	}
+
+	inputFileData := strings.TrimRight(readFileToString(*inputFile), "\n")
+	filePaths := strings.Split(inputFileData, "\n")
+	ownershipMetadataMap := &sync.Map{}
+	keyLocks := &keyToLocksMap{}
+	errCh := make(chan error, len(filePaths))
+	var wg sync.WaitGroup
+
+	switch *rule {
+	case "test_spec":
+		for _, filePath := range filePaths {
+			wg.Add(1)
+			go processTestSpecProtobuf(filePath, ownershipMetadataMap, keyLocks, errCh, &wg)
+		}
+
+		wg.Wait()
+		close(errCh)
+
+		for err := range errCh {
+			log.Fatal(err)
+		}
+
+		allKeys := getSortedKeys(ownershipMetadataMap)
+		var allMetadata []*test_spec_proto.TestSpec_OwnershipMetadata
+
+		for _, key := range allKeys {
+			value, _ := ownershipMetadataMap.Load(key)
+			metadataList := value.([]*test_spec_proto.TestSpec_OwnershipMetadata)
+			allMetadata = append(allMetadata, metadataList...)
+		}
+
+		writeOutput(*outputFile, allMetadata)
+		break
+	case "code_metadata":
+	default:
+		log.Fatalf("No specific processing implemented for rule '%s'.\n", *rule)
+	}
+}
diff --git a/tools/metadata/go.mod b/tools/metadata/go.mod
new file mode 100644
index 0000000..e9d04b1
--- /dev/null
+++ b/tools/metadata/go.mod
@@ -0,0 +1,7 @@
+module android/soong/tools/metadata
+
+require google.golang.org/protobuf v0.0.0
+
+replace google.golang.org/protobuf v0.0.0 => ../../../external/golang-protobuf
+
+go 1.18
\ No newline at end of file
diff --git a/tools/metadata/go.work b/tools/metadata/go.work
new file mode 100644
index 0000000..23875da
--- /dev/null
+++ b/tools/metadata/go.work
@@ -0,0 +1,10 @@
+go 1.18
+
+use (
+	.
+	../../../../external/golang-protobuf
+	../../../soong/testing/test_spec_proto
+
+)
+
+replace google.golang.org/protobuf v0.0.0 => ../../../../external/golang-protobuf
diff --git a/tools/metadata/testdata/expectedOutputFile.txt b/tools/metadata/testdata/expectedOutputFile.txt
new file mode 100644
index 0000000..b0d382f
--- /dev/null
+++ b/tools/metadata/testdata/expectedOutputFile.txt
@@ -0,0 +1,22 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Android.bp12346
+.
+java-test-module-name-six
+Aqwerty.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-two
+Android.bp12345
+.
+java-test-module-name-two
+Asdfghj.bp12345
+.
+java-test-module-name-two
+Azxcvbn.bp12345
\ No newline at end of file
diff --git a/tools/metadata/testdata/file1.txt b/tools/metadata/testdata/file1.txt
new file mode 100644
index 0000000..81beed0
--- /dev/null
+++ b/tools/metadata/testdata/file1.txt
@@ -0,0 +1,13 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-two
+Android.bp12345
+.
+java-test-module-name-two
+Asdfghj.bp12345
+.
+java-test-module-name-two
+Azxcvbn.bp12345
diff --git a/tools/metadata/testdata/file2.txt b/tools/metadata/testdata/file2.txt
new file mode 100644
index 0000000..32a753f
--- /dev/null
+++ b/tools/metadata/testdata/file2.txt
@@ -0,0 +1,25 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Android.bp12346
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Aqwerty.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
diff --git a/tools/metadata/testdata/file3.txt b/tools/metadata/testdata/file3.txt
new file mode 100644
index 0000000..81beed0
--- /dev/null
+++ b/tools/metadata/testdata/file3.txt
@@ -0,0 +1,13 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-two
+Android.bp12345
+.
+java-test-module-name-two
+Asdfghj.bp12345
+.
+java-test-module-name-two
+Azxcvbn.bp12345
diff --git a/tools/metadata/testdata/file4.txt b/tools/metadata/testdata/file4.txt
new file mode 100644
index 0000000..6a75900
--- /dev/null
+++ b/tools/metadata/testdata/file4.txt
@@ -0,0 +1,25 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Android.bp12346
+.
+java-test-module-name-one
+Android.bp12346
+.
+java-test-module-name-six
+Aqwerty.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
diff --git a/tools/metadata/testdata/generatedOutputFile.txt b/tools/metadata/testdata/generatedOutputFile.txt
new file mode 100644
index 0000000..b0d382f
--- /dev/null
+++ b/tools/metadata/testdata/generatedOutputFile.txt
@@ -0,0 +1,22 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Android.bp12346
+.
+java-test-module-name-six
+Aqwerty.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-two
+Android.bp12345
+.
+java-test-module-name-two
+Asdfghj.bp12345
+.
+java-test-module-name-two
+Azxcvbn.bp12345
\ No newline at end of file
diff --git a/tools/metadata/testdata/inputFiles.txt b/tools/metadata/testdata/inputFiles.txt
new file mode 100644
index 0000000..61e6a8d
--- /dev/null
+++ b/tools/metadata/testdata/inputFiles.txt
@@ -0,0 +1,2 @@
+file1.txt
+file2.txt
\ No newline at end of file
diff --git a/tools/metadata/testdata/inputFilesNegativeCase.txt b/tools/metadata/testdata/inputFilesNegativeCase.txt
new file mode 100644
index 0000000..17a9480
--- /dev/null
+++ b/tools/metadata/testdata/inputFilesNegativeCase.txt
@@ -0,0 +1,2 @@
+file3.txt
+file4.txt
\ No newline at end of file
diff --git a/tools/metadata/testdata/metadata_test.go b/tools/metadata/testdata/metadata_test.go
new file mode 100644
index 0000000..03c4f29
--- /dev/null
+++ b/tools/metadata/testdata/metadata_test.go
@@ -0,0 +1,65 @@
+package main
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os/exec"
+	"strings"
+	"testing"
+)
+
+func TestMetadata(t *testing.T) {
+	cmd := exec.Command(
+		"metadata", "-rule", "test_spec", "-inputFile", "./inputFiles.txt", "-outputFile",
+		"./generatedOutputFile.txt",
+	)
+	stderr, err := cmd.CombinedOutput()
+	if err != nil {
+		t.Fatalf("Error running metadata command: %s. Error: %v", stderr, err)
+	}
+
+	// Read the contents of the expected output file
+	expectedOutput, err := ioutil.ReadFile("./expectedOutputFile.txt")
+	if err != nil {
+		t.Fatalf("Error reading expected output file: %s", err)
+	}
+
+	// Read the contents of the generated output file
+	generatedOutput, err := ioutil.ReadFile("./generatedOutputFile.txt")
+	if err != nil {
+		t.Fatalf("Error reading generated output file: %s", err)
+	}
+
+	fmt.Println()
+
+	// Compare the contents
+	if string(expectedOutput) != string(generatedOutput) {
+		t.Errorf("Generated file contents do not match the expected output")
+	}
+}
+
+func TestMetadataNegativeCase(t *testing.T) {
+	cmd := exec.Command(
+		"metadata", "-rule", "test_spec", "-inputFile", "./inputFilesNegativeCase.txt", "-outputFile",
+		"./generatedOutputFileNegativeCase.txt",
+	)
+	stderr, err := cmd.CombinedOutput()
+	if err == nil {
+		t.Fatalf(
+			"Expected an error, but the metadata command executed successfully. Output: %s",
+			stderr,
+		)
+	}
+
+	expectedError := "Conflicting trendy team IDs found for java-test-module" +
+		"-name-one at:\nAndroid.bp with teamId: 12346," +
+		"\nAndroid.bp with teamId: 12345"
+	if !strings.Contains(
+		strings.TrimSpace(string(stderr)), strings.TrimSpace(expectedError),
+	) {
+		t.Errorf(
+			"Unexpected error message. Expected to contain: %s, Got: %s",
+			expectedError, stderr,
+		)
+	}
+}
diff --git a/tools/metadata/testdata/outputFile.txt b/tools/metadata/testdata/outputFile.txt
new file mode 100644
index 0000000..b0d382f
--- /dev/null
+++ b/tools/metadata/testdata/outputFile.txt
@@ -0,0 +1,22 @@
+
+.
+java-test-module-name-one
+Android.bp12345
+.
+java-test-module-name-six
+Android.bp12346
+.
+java-test-module-name-six
+Aqwerty.bp12346
+.
+java-test-module-name-six
+Apoiuyt.bp12346
+.
+java-test-module-name-two
+Android.bp12345
+.
+java-test-module-name-two
+Asdfghj.bp12345
+.
+java-test-module-name-two
+Azxcvbn.bp12345
\ No newline at end of file
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index 31a460d..32829c1 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -39,54 +39,26 @@
         val = val + ",adb"
       prop_list.put("persist.sys.usb.config", val)
 
-def validate_grf_props(prop_list, sdk_version):
+def validate_grf_props(prop_list):
   """Validate GRF properties if exist.
 
-  If ro.board.first_api_level is defined, check if its value is valid for the
-  sdk version. This is only for the release version.
-  Also, validate the value of ro.board.api_level if defined.
+  If ro.board.first_api_level is defined, check if its value is valid.
 
   Returns:
     True if the GRF properties are valid.
   """
   grf_api_level = prop_list.get_value("ro.board.first_api_level")
   board_api_level = prop_list.get_value("ro.board.api_level")
-  platform_version_codename = prop_list.get_value("ro.build.version.codename")
 
-  if not grf_api_level:
-    if board_api_level:
-      sys.stderr.write("error: non-GRF device must not define "
-                       "ro.board.api_level\n")
-      return False
-    # non-GRF device skips the GRF validation test
-    return True
-
-  grf_api_level = int(grf_api_level)
-  if board_api_level:
+  if grf_api_level and board_api_level:
+    grf_api_level = int(grf_api_level)
     board_api_level = int(board_api_level)
     if board_api_level < grf_api_level:
-      sys.stderr.write("error: ro.board.api_level(%d) must be greater than "
+      sys.stderr.write("error: ro.board.api_level(%d) must not be less than "
                        "ro.board.first_api_level(%d)\n"
                        % (board_api_level, grf_api_level))
       return False
 
-  # skip sdk version validation for dev-stage non-REL devices
-  if platform_version_codename != "REL":
-    return True
-
-  if grf_api_level > sdk_version:
-    sys.stderr.write("error: ro.board.first_api_level(%d) must be less than "
-                     "or equal to ro.build.version.sdk(%d)\n"
-                     % (grf_api_level, sdk_version))
-    return False
-
-  if board_api_level:
-    if board_api_level > sdk_version:
-      sys.stderr.write("error: ro.board.api_level(%d) must be less than or "
-                       "equal to ro.build.version.sdk(%d)\n"
-                       % (board_api_level, sdk_version))
-      return False
-
   return True
 
 def validate(prop_list):
@@ -271,7 +243,7 @@
   mangle_build_prop(props)
   if not override_optional_props(props, args.allow_dup):
     sys.exit(1)
-  if not validate_grf_props(props, args.sdk_version):
+  if not validate_grf_props(props):
     sys.exit(1)
   if not validate(props):
     sys.exit(1)
diff --git a/tools/rbcrun/host.go b/tools/rbcrun/host.go
index 1d68d43..8cd2845 100644
--- a/tools/rbcrun/host.go
+++ b/tools/rbcrun/host.go
@@ -24,18 +24,17 @@
 	"strings"
 
 	"go.starlark.net/starlark"
-	"go.starlark.net/starlarkjson"
 	"go.starlark.net/starlarkstruct"
 )
 
 type ExecutionMode int
 const (
 	ExecutionModeRbc ExecutionMode = iota
-	ExecutionModeMake ExecutionMode = iota
+	ExecutionModeScl ExecutionMode = iota
 )
 
 const allowExternalEntrypointKey = "allowExternalEntrypoint"
-const callerDirKey = "callerDir"
+const callingFileKey = "callingFile"
 const executionModeKey = "executionMode"
 const shellKey = "shell"
 
@@ -58,9 +57,16 @@
 	"rblf_wildcard": starlark.NewBuiltin("rblf_wildcard", wildcard),
 }
 
-var makeBuiltins starlark.StringDict = starlark.StringDict{
+var sclBuiltins starlark.StringDict = starlark.StringDict{
 	"struct":   starlark.NewBuiltin("struct", starlarkstruct.Make),
-	"json": starlarkjson.Module,
+}
+
+func isSymlink(filepath string) (bool, error) {
+	if info, err := os.Lstat(filepath); err == nil {
+		return info.Mode() & os.ModeSymlink != 0, nil
+	} else {
+		return false, err
+	}
 }
 
 // Takes a module name (the first argument to the load() function) and returns the path
@@ -128,7 +134,8 @@
 			module = module[:pipePos]
 		}
 	}
-	modulePath, err := cleanModuleName(module, thread.Local(callerDirKey).(string), allowExternalEntrypoint)
+	callingFile := thread.Local(callingFileKey).(string)
+	modulePath, err := cleanModuleName(module, filepath.Dir(callingFile), allowExternalEntrypoint)
 	if err != nil {
 		return nil, err
 	}
@@ -150,6 +157,20 @@
 
 		// Load or return default
 		if mustLoad {
+			if strings.HasSuffix(callingFile, ".scl") && !strings.HasSuffix(modulePath, ".scl") {
+				return nil, fmt.Errorf(".scl files can only load other .scl files: %q loads %q", callingFile, modulePath)
+			}
+			// Switch into scl mode from here on
+			if strings.HasSuffix(modulePath, ".scl") {
+				mode = ExecutionModeScl
+			}
+
+			if sym, err := isSymlink(modulePath); sym && err == nil {
+				return nil, fmt.Errorf("symlinks to starlark files are not allowed. Instead, load the target file and re-export its symbols: %s", modulePath)
+			} else if err != nil {
+				return nil, err
+			}
+
 			childThread := &starlark.Thread{Name: "exec " + module, Load: thread.Load}
 			// Cheating for the sake of testing:
 			// propagate starlarktest's Reporter key, otherwise testing
@@ -161,14 +182,14 @@
 
 			// Only the entrypoint starlark file allows external loads.
 			childThread.SetLocal(allowExternalEntrypointKey, false)
-			childThread.SetLocal(callerDirKey, filepath.Dir(modulePath))
+			childThread.SetLocal(callingFileKey, modulePath)
 			childThread.SetLocal(executionModeKey, mode)
 			childThread.SetLocal(shellKey, thread.Local(shellKey))
 			if mode == ExecutionModeRbc {
 				globals, err := starlark.ExecFile(childThread, modulePath, nil, rbcBuiltins)
 				e = &modentry{globals, err}
-			} else if mode == ExecutionModeMake {
-				globals, err := starlark.ExecFile(childThread, modulePath, nil, makeBuiltins)
+			} else if mode == ExecutionModeScl {
+				globals, err := starlark.ExecFile(childThread, modulePath, nil, sclBuiltins)
 				e = &modentry{globals, err}
 			} else {
 				return nil, fmt.Errorf("unknown executionMode %d", mode)
@@ -338,7 +359,7 @@
 			if mode == ExecutionModeRbc {
 				// In rbc mode, rblf_log is used to print to stderr
 				fmt.Println(msg)
-			} else if mode == ExecutionModeMake {
+			} else if mode == ExecutionModeScl {
 				fmt.Fprintln(os.Stderr, msg)
 			}
 		},
@@ -360,18 +381,28 @@
 		return nil, nil, err
 	}
 
+	if sym, err := isSymlink(filename); sym && err == nil {
+		return nil, nil, fmt.Errorf("symlinks to starlark files are not allowed. Instead, load the target file and re-export its symbols: %s", filename)
+	} else if err != nil {
+		return nil, nil, err
+	}
+
+	if mode == ExecutionModeScl && !strings.HasSuffix(filename, ".scl") {
+		return nil, nil, fmt.Errorf("filename must end in .scl: %s", filename)
+	}
+
 	// Add top-level file to cache for cycle detection purposes
 	moduleCache[filename] = nil
 
 	var results starlark.StringDict
 	mainThread.SetLocal(allowExternalEntrypointKey, allowExternalEntrypoint)
-	mainThread.SetLocal(callerDirKey, filepath.Dir(filename))
+	mainThread.SetLocal(callingFileKey, filename)
 	mainThread.SetLocal(executionModeKey, mode)
 	mainThread.SetLocal(shellKey, shellPath)
 	if mode == ExecutionModeRbc {
 		results, err = starlark.ExecFile(mainThread, filename, src, rbcBuiltins)
-	} else if mode == ExecutionModeMake {
-		results, err = starlark.ExecFile(mainThread, filename, src, makeBuiltins)
+	} else if mode == ExecutionModeScl {
+		results, err = starlark.ExecFile(mainThread, filename, src, sclBuiltins)
 	} else {
 		return results, nil, fmt.Errorf("unknown executionMode %d", mode)
 	}
diff --git a/tools/rbcrun/host_test.go b/tools/rbcrun/host_test.go
index 10ce55e..38b2923 100644
--- a/tools/rbcrun/host_test.go
+++ b/tools/rbcrun/host_test.go
@@ -19,6 +19,7 @@
 	"os"
 	"path/filepath"
 	"runtime"
+	"strings"
 	"testing"
 
 	"go.starlark.net/resolve"
@@ -126,7 +127,7 @@
 		t.Fatal(err)
 	}
 	thread.SetLocal(allowExternalEntrypointKey, false)
-	thread.SetLocal(callerDirKey, dir)
+	thread.SetLocal(callingFileKey, "testdata/load.star")
 	thread.SetLocal(executionModeKey, ExecutionModeRbc)
 	if _, err := starlark.ExecFile(thread, "testdata/load.star", nil, rbcBuiltins); err != nil {
 		if err, ok := err.(*starlark.EvalError); ok {
@@ -136,6 +137,70 @@
 	}
 }
 
+func TestBzlLoadsScl(t *testing.T) {
+	moduleCache = make(map[string]*modentry)
+	dir := dataDir()
+	if err := os.Chdir(filepath.Dir(dir)); err != nil {
+		t.Fatal(err)
+	}
+	vars, _, err := Run("testdata/bzl_loads_scl.bzl", nil, ExecutionModeRbc, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if val, ok := vars["foo"]; !ok {
+		t.Fatalf("Failed to load foo variable")
+	} else if val.(starlark.String) != "bar" {
+		t.Fatalf("Expected \"bar\", got %q", val)
+	}
+}
+
+func TestNonEntrypointBzlLoadsScl(t *testing.T) {
+	moduleCache = make(map[string]*modentry)
+	dir := dataDir()
+	if err := os.Chdir(filepath.Dir(dir)); err != nil {
+		t.Fatal(err)
+	}
+	vars, _, err := Run("testdata/bzl_loads_scl_2.bzl", nil, ExecutionModeRbc, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if val, ok := vars["foo"]; !ok {
+		t.Fatalf("Failed to load foo variable")
+	} else if val.(starlark.String) != "bar" {
+		t.Fatalf("Expected \"bar\", got %q", val)
+	}
+}
+
+func TestSclLoadsBzl(t *testing.T) {
+	moduleCache = make(map[string]*modentry)
+	dir := dataDir()
+	if err := os.Chdir(filepath.Dir(dir)); err != nil {
+		t.Fatal(err)
+	}
+	_, _, err := Run("testdata/scl_incorrectly_loads_bzl.scl", nil, ExecutionModeScl, false)
+	if err == nil {
+		t.Fatal("Expected failure")
+	}
+	if !strings.Contains(err.Error(), ".scl files can only load other .scl files") {
+		t.Fatalf("Expected error to contain \".scl files can only load other .scl files\": %q", err.Error())
+	}
+}
+
+func TestCantLoadSymlink(t *testing.T) {
+	moduleCache = make(map[string]*modentry)
+	dir := dataDir()
+	if err := os.Chdir(filepath.Dir(dir)); err != nil {
+		t.Fatal(err)
+	}
+	_, _, err := Run("testdata/test_scl_symlink.scl", nil, ExecutionModeScl, false)
+	if err == nil {
+		t.Fatal("Expected failure")
+	}
+	if !strings.Contains(err.Error(), "symlinks to starlark files are not allowed") {
+		t.Fatalf("Expected error to contain \"symlinks to starlark files are not allowed\": %q", err.Error())
+	}
+}
+
 func TestShell(t *testing.T) {
 	exerciseStarlarkTestFile(t, "testdata/shell.star")
 }
diff --git a/tools/rbcrun/rbcrun/rbcrun.go b/tools/rbcrun/rbcrun/rbcrun.go
index a15b867..8c372c7 100644
--- a/tools/rbcrun/rbcrun/rbcrun.go
+++ b/tools/rbcrun/rbcrun/rbcrun.go
@@ -55,13 +55,13 @@
 	case "rbc":
 		return rbcrun.ExecutionModeRbc
 	case "make":
-		return rbcrun.ExecutionModeMake
+		return rbcrun.ExecutionModeScl
 	case "":
 		quit("-mode flag is required.")
 	default:
 		quit("Unknown -mode value %q, expected 1 of \"rbc\", \"make\"", *modeFlag)
 	}
-	return rbcrun.ExecutionModeMake
+	return rbcrun.ExecutionModeScl
 }
 
 var makeStringReplacer = strings.NewReplacer("#", "\\#", "$", "$$")
@@ -175,7 +175,7 @@
 			quit("%s\n", err)
 		}
 	}
-	if mode == rbcrun.ExecutionModeMake {
+	if mode == rbcrun.ExecutionModeScl {
 		if err := printVarsInMakeFormat(variables); err != nil {
 			quit("%s\n", err)
 		}
diff --git a/tools/rbcrun/testdata/bzl_loads_scl.bzl b/tools/rbcrun/testdata/bzl_loads_scl.bzl
new file mode 100644
index 0000000..e8deca3
--- /dev/null
+++ b/tools/rbcrun/testdata/bzl_loads_scl.bzl
@@ -0,0 +1,3 @@
+load(":test_scl.scl", _foo = "foo")
+
+foo = _foo
diff --git a/tools/rbcrun/testdata/bzl_loads_scl_2.bzl b/tools/rbcrun/testdata/bzl_loads_scl_2.bzl
new file mode 100644
index 0000000..9a680ed
--- /dev/null
+++ b/tools/rbcrun/testdata/bzl_loads_scl_2.bzl
@@ -0,0 +1,3 @@
+load(":bzl_loads_scl.bzl", _foo = "foo")
+
+foo = _foo
diff --git a/tools/rbcrun/testdata/scl_incorrectly_loads_bzl.scl b/tools/rbcrun/testdata/scl_incorrectly_loads_bzl.scl
new file mode 100644
index 0000000..9a680ed
--- /dev/null
+++ b/tools/rbcrun/testdata/scl_incorrectly_loads_bzl.scl
@@ -0,0 +1,3 @@
+load(":bzl_loads_scl.bzl", _foo = "foo")
+
+foo = _foo
diff --git a/tools/rbcrun/testdata/test_scl.scl b/tools/rbcrun/testdata/test_scl.scl
new file mode 100644
index 0000000..6360ccb
--- /dev/null
+++ b/tools/rbcrun/testdata/test_scl.scl
@@ -0,0 +1,2 @@
+
+foo = "bar"
diff --git a/tools/rbcrun/testdata/test_scl_symlink.scl b/tools/rbcrun/testdata/test_scl_symlink.scl
new file mode 120000
index 0000000..3f5aef4
--- /dev/null
+++ b/tools/rbcrun/testdata/test_scl_symlink.scl
@@ -0,0 +1 @@
+test_scl.scl
\ No newline at end of file
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index ad014af..ee266b7 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -483,13 +483,8 @@
     defaults: ["releasetools_binary_defaults"],
     srcs: [
         "make_recovery_patch.py",
-        "non_ab_ota.py",
-        "edify_generator.py",
-        "check_target_files_vintf.py",
     ],
     libs: [
-        "ota_utils_lib",
-        "ota_metadata_proto",
         "releasetools_common",
     ],
 }
diff --git a/tools/releasetools/check_target_files_vintf.py b/tools/releasetools/check_target_files_vintf.py
index e7d3a18..33624f5 100755
--- a/tools/releasetools/check_target_files_vintf.py
+++ b/tools/releasetools/check_target_files_vintf.py
@@ -31,6 +31,7 @@
 import zipfile
 
 import common
+from apex_manifest import ParseApexManifest
 
 logger = logging.getLogger(__name__)
 
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 462c3bf..8ce6083 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -15,6 +15,7 @@
 from __future__ import print_function
 
 import base64
+import collections
 import copy
 import datetime
 import errno
@@ -22,6 +23,7 @@
 import getopt
 import getpass
 import gzip
+import imp
 import json
 import logging
 import logging.config
@@ -34,17 +36,26 @@
 import stat
 import sys
 import tempfile
+import threading
+import time
 import zipfile
+
+from typing import Iterable, Callable
 from dataclasses import dataclass
 from hashlib import sha1, sha256
 
 import images
 import sparse_img
-
+from blockimgdiff import BlockImageDiff
 
 logger = logging.getLogger(__name__)
 
 
+@dataclass
+class OptionHandler:
+  extra_long_opts: Iterable[str]
+  handler: Callable
+
 class Options(object):
 
   def __init__(self):
@@ -149,6 +160,35 @@
         self.partition, self.rollback_index_location, self.pubkey_path)
 
 
+class ErrorCode(object):
+  """Define error_codes for failures that happen during the actual
+  update package installation.
+
+  Error codes 0-999 are reserved for failures before the package
+  installation (i.e. low battery, package verification failure).
+  Detailed code in 'bootable/recovery/error_code.h' """
+
+  SYSTEM_VERIFICATION_FAILURE = 1000
+  SYSTEM_UPDATE_FAILURE = 1001
+  SYSTEM_UNEXPECTED_CONTENTS = 1002
+  SYSTEM_NONZERO_CONTENTS = 1003
+  SYSTEM_RECOVER_FAILURE = 1004
+  VENDOR_VERIFICATION_FAILURE = 2000
+  VENDOR_UPDATE_FAILURE = 2001
+  VENDOR_UNEXPECTED_CONTENTS = 2002
+  VENDOR_NONZERO_CONTENTS = 2003
+  VENDOR_RECOVER_FAILURE = 2004
+  OEM_PROP_MISMATCH = 3000
+  FINGERPRINT_MISMATCH = 3001
+  THUMBPRINT_MISMATCH = 3002
+  OLDER_BUILD = 3003
+  DEVICE_MISMATCH = 3004
+  BAD_PATCH_FILE = 3005
+  INSUFFICIENT_CACHE_SPACE = 3006
+  TUNE_PARTITION_FAILURE = 3007
+  APPLY_PATCH_FAILURE = 3008
+
+
 class ExternalError(RuntimeError):
   pass
 
@@ -2758,12 +2798,19 @@
 def ParseOptions(argv,
                  docstring,
                  extra_opts="", extra_long_opts=(),
-                 extra_option_handler=None):
+                 extra_option_handler: Iterable[OptionHandler] = None):
   """Parse the options in argv and return any arguments that aren't
   flags.  docstring is the calling module's docstring, to be displayed
   for errors and -h.  extra_opts and extra_long_opts are for flags
   defined by the caller, which are processed by passing them to
   extra_option_handler."""
+  extra_long_opts = list(extra_long_opts)
+  if not isinstance(extra_option_handler, Iterable):
+    extra_option_handler = [extra_option_handler]
+
+  for handler in extra_option_handler:
+    if isinstance(handler, OptionHandler):
+      extra_long_opts.extend(handler.extra_long_opts)
 
   try:
     opts, args = getopt.getopt(
@@ -2825,8 +2872,19 @@
     elif o in ("--logfile",):
       OPTIONS.logfile = a
     else:
-      if extra_option_handler is None or not extra_option_handler(o, a):
-        assert False, "unknown option \"%s\"" % (o,)
+      if extra_option_handler is None:
+        raise ValueError("unknown option \"%s\"" % (o,))
+      success = False
+      for handler in extra_option_handler:
+        if isinstance(handler, OptionHandler):
+          if handler.handler(o, a):
+            success = True
+            break
+        elif handler(o, a):
+          success = True
+      if not success:
+        raise ValueError("unknown option \"%s\"" % (o,))
+
 
   if OPTIONS.search_path:
     os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
@@ -3104,6 +3162,107 @@
   zipfile.ZIP64_LIMIT = saved_zip64_limit
 
 
+class DeviceSpecificParams(object):
+  module = None
+
+  def __init__(self, **kwargs):
+    """Keyword arguments to the constructor become attributes of this
+    object, which is passed to all functions in the device-specific
+    module."""
+    for k, v in kwargs.items():
+      setattr(self, k, v)
+    self.extras = OPTIONS.extras
+
+    if self.module is None:
+      path = OPTIONS.device_specific
+      if not path:
+        return
+      try:
+        if os.path.isdir(path):
+          info = imp.find_module("releasetools", [path])
+        else:
+          d, f = os.path.split(path)
+          b, x = os.path.splitext(f)
+          if x == ".py":
+            f = b
+          info = imp.find_module(f, [d])
+        logger.info("loaded device-specific extensions from %s", path)
+        self.module = imp.load_module("device_specific", *info)
+      except ImportError:
+        logger.info("unable to load device-specific module; assuming none")
+
+  def _DoCall(self, function_name, *args, **kwargs):
+    """Call the named function in the device-specific module, passing
+    the given args and kwargs.  The first argument to the call will be
+    the DeviceSpecific object itself.  If there is no module, or the
+    module does not define the function, return the value of the
+    'default' kwarg (which itself defaults to None)."""
+    if self.module is None or not hasattr(self.module, function_name):
+      return kwargs.get("default")
+    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
+
+  def FullOTA_Assertions(self):
+    """Called after emitting the block of assertions at the top of a
+    full OTA package.  Implementations can add whatever additional
+    assertions they like."""
+    return self._DoCall("FullOTA_Assertions")
+
+  def FullOTA_InstallBegin(self):
+    """Called at the start of full OTA installation."""
+    return self._DoCall("FullOTA_InstallBegin")
+
+  def FullOTA_GetBlockDifferences(self):
+    """Called during full OTA installation and verification.
+    Implementation should return a list of BlockDifference objects describing
+    the update on each additional partitions.
+    """
+    return self._DoCall("FullOTA_GetBlockDifferences")
+
+  def FullOTA_InstallEnd(self):
+    """Called at the end of full OTA installation; typically this is
+    used to install the image for the device's baseband processor."""
+    return self._DoCall("FullOTA_InstallEnd")
+
+  def IncrementalOTA_Assertions(self):
+    """Called after emitting the block of assertions at the top of an
+    incremental OTA package.  Implementations can add whatever
+    additional assertions they like."""
+    return self._DoCall("IncrementalOTA_Assertions")
+
+  def IncrementalOTA_VerifyBegin(self):
+    """Called at the start of the verification phase of incremental
+    OTA installation; additional checks can be placed here to abort
+    the script before any changes are made."""
+    return self._DoCall("IncrementalOTA_VerifyBegin")
+
+  def IncrementalOTA_VerifyEnd(self):
+    """Called at the end of the verification phase of incremental OTA
+    installation; additional checks can be placed here to abort the
+    script before any changes are made."""
+    return self._DoCall("IncrementalOTA_VerifyEnd")
+
+  def IncrementalOTA_InstallBegin(self):
+    """Called at the start of incremental OTA installation (after
+    verification is complete)."""
+    return self._DoCall("IncrementalOTA_InstallBegin")
+
+  def IncrementalOTA_GetBlockDifferences(self):
+    """Called during incremental OTA installation and verification.
+    Implementation should return a list of BlockDifference objects describing
+    the update on each additional partitions.
+    """
+    return self._DoCall("IncrementalOTA_GetBlockDifferences")
+
+  def IncrementalOTA_InstallEnd(self):
+    """Called at the end of incremental OTA installation; typically
+    this is used to install the image for the device's baseband
+    processor."""
+    return self._DoCall("IncrementalOTA_InstallEnd")
+
+  def VerifyOTA_Assertions(self):
+    return self._DoCall("VerifyOTA_Assertions")
+
+
 class File(object):
   def __init__(self, name, data, compress_size=None):
     self.name = name
@@ -3133,11 +3292,454 @@
     ZipWriteStr(z, self.name, self.data, compress_type=compression)
 
 
+DIFF_PROGRAM_BY_EXT = {
+    ".gz": "imgdiff",
+    ".zip": ["imgdiff", "-z"],
+    ".jar": ["imgdiff", "-z"],
+    ".apk": ["imgdiff", "-z"],
+    ".img": "imgdiff",
+}
+
+
+class Difference(object):
+  def __init__(self, tf, sf, diff_program=None):
+    self.tf = tf
+    self.sf = sf
+    self.patch = None
+    self.diff_program = diff_program
+
+  def ComputePatch(self):
+    """Compute the patch (as a string of data) needed to turn sf into
+    tf.  Returns the same tuple as GetPatch()."""
+
+    tf = self.tf
+    sf = self.sf
+
+    if self.diff_program:
+      diff_program = self.diff_program
+    else:
+      ext = os.path.splitext(tf.name)[1]
+      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
+
+    ttemp = tf.WriteToTemp()
+    stemp = sf.WriteToTemp()
+
+    ext = os.path.splitext(tf.name)[1]
+
+    try:
+      ptemp = tempfile.NamedTemporaryFile()
+      if isinstance(diff_program, list):
+        cmd = copy.copy(diff_program)
+      else:
+        cmd = [diff_program]
+      cmd.append(stemp.name)
+      cmd.append(ttemp.name)
+      cmd.append(ptemp.name)
+      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+      err = []
+
+      def run():
+        _, e = p.communicate()
+        if e:
+          err.append(e)
+      th = threading.Thread(target=run)
+      th.start()
+      th.join(timeout=300)   # 5 mins
+      if th.is_alive():
+        logger.warning("diff command timed out")
+        p.terminate()
+        th.join(5)
+        if th.is_alive():
+          p.kill()
+          th.join()
+
+      if p.returncode != 0:
+        logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
+        self.patch = None
+        return None, None, None
+      diff = ptemp.read()
+    finally:
+      ptemp.close()
+      stemp.close()
+      ttemp.close()
+
+    self.patch = diff
+    return self.tf, self.sf, self.patch
+
+  def GetPatch(self):
+    """Returns a tuple of (target_file, source_file, patch_data).
+
+    patch_data may be None if ComputePatch hasn't been called, or if
+    computing the patch failed.
+    """
+    return self.tf, self.sf, self.patch
+
+
+def ComputeDifferences(diffs):
+  """Call ComputePatch on all the Difference objects in 'diffs'."""
+  logger.info("%d diffs to compute", len(diffs))
+
+  # Do the largest files first, to try and reduce the long-pole effect.
+  by_size = [(i.tf.size, i) for i in diffs]
+  by_size.sort(reverse=True)
+  by_size = [i[1] for i in by_size]
+
+  lock = threading.Lock()
+  diff_iter = iter(by_size)   # accessed under lock
+
+  def worker():
+    try:
+      lock.acquire()
+      for d in diff_iter:
+        lock.release()
+        start = time.time()
+        d.ComputePatch()
+        dur = time.time() - start
+        lock.acquire()
+
+        tf, sf, patch = d.GetPatch()
+        if sf.name == tf.name:
+          name = tf.name
+        else:
+          name = "%s (%s)" % (tf.name, sf.name)
+        if patch is None:
+          logger.error("patching failed! %40s", name)
+        else:
+          logger.info(
+              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
+              tf.size, 100.0 * len(patch) / tf.size, name)
+      lock.release()
+    except Exception:
+      logger.exception("Failed to compute diff from worker")
+      raise
+
+  # start worker threads; wait for them all to finish.
+  threads = [threading.Thread(target=worker)
+             for i in range(OPTIONS.worker_threads)]
+  for th in threads:
+    th.start()
+  while threads:
+    threads.pop().join()
+
+
+class BlockDifference(object):
+  def __init__(self, partition, tgt, src=None, check_first_block=False,
+               version=None, disable_imgdiff=False):
+    self.tgt = tgt
+    self.src = src
+    self.partition = partition
+    self.check_first_block = check_first_block
+    self.disable_imgdiff = disable_imgdiff
+
+    if version is None:
+      version = max(
+          int(i) for i in
+          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+    assert version >= 3
+    self.version = version
+
+    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
+                       version=self.version,
+                       disable_imgdiff=self.disable_imgdiff)
+    self.path = os.path.join(MakeTempDir(), partition)
+    b.Compute(self.path)
+    self._required_cache = b.max_stashed_size
+    self.touched_src_ranges = b.touched_src_ranges
+    self.touched_src_sha1 = b.touched_src_sha1
+
+    # On devices with dynamic partitions, for new partitions,
+    # src is None but OPTIONS.source_info_dict is not.
+    if OPTIONS.source_info_dict is None:
+      is_dynamic_build = OPTIONS.info_dict.get(
+          "use_dynamic_partitions") == "true"
+      is_dynamic_source = False
+    else:
+      is_dynamic_build = OPTIONS.source_info_dict.get(
+          "use_dynamic_partitions") == "true"
+      is_dynamic_source = partition in shlex.split(
+          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
+
+    is_dynamic_target = partition in shlex.split(
+        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
+
+    # For dynamic partitions builds, check partition list in both source
+    # and target build because new partitions may be added, and existing
+    # partitions may be removed.
+    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
+
+    if is_dynamic:
+      self.device = 'map_partition("%s")' % partition
+    else:
+      if OPTIONS.source_info_dict is None:
+        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
+                                              OPTIONS.info_dict)
+      else:
+        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
+                                              OPTIONS.source_info_dict)
+      self.device = device_expr
+
+  @property
+  def required_cache(self):
+    return self._required_cache
+
+  def WriteScript(self, script, output_zip, progress=None,
+                  write_verify_script=False):
+    if not self.src:
+      # write the output unconditionally
+      script.Print("Patching %s image unconditionally..." % (self.partition,))
+    else:
+      script.Print("Patching %s image after verification." % (self.partition,))
+
+    if progress:
+      script.ShowProgress(progress, 0)
+    self._WriteUpdate(script, output_zip)
+
+    if write_verify_script:
+      self.WritePostInstallVerifyScript(script)
+
+  def WriteStrictVerifyScript(self, script):
+    """Verify all the blocks in the care_map, including clobbered blocks.
+
+    This differs from the WriteVerifyScript() function: a) it prints different
+    error messages; b) it doesn't allow half-way updated images to pass the
+    verification."""
+
+    partition = self.partition
+    script.Print("Verifying %s..." % (partition,))
+    ranges = self.tgt.care_map
+    ranges_str = ranges.to_string_raw()
+    script.AppendExtra(
+        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
+        'ui_print("%s has unexpected contents.");' % (
+            self.device, ranges_str,
+            self.tgt.TotalSha1(include_clobbered_blocks=True),
+            self.partition))
+    script.AppendExtra("")
+
+  def WriteVerifyScript(self, script, touched_blocks_only=False):
+    partition = self.partition
+
+    # full OTA
+    if not self.src:
+      script.Print("Image %s will be patched unconditionally." % (partition,))
+
+    # incremental OTA
+    else:
+      if touched_blocks_only:
+        ranges = self.touched_src_ranges
+        expected_sha1 = self.touched_src_sha1
+      else:
+        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
+        expected_sha1 = self.src.TotalSha1()
+
+      # No blocks to be checked, skipping.
+      if not ranges:
+        return
+
+      ranges_str = ranges.to_string_raw()
+      script.AppendExtra(
+          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
+          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
+          '"%s.patch.dat")) then' % (
+              self.device, ranges_str, expected_sha1,
+              self.device, partition, partition, partition))
+      script.Print('Verified %s image...' % (partition,))
+      script.AppendExtra('else')
+
+      if self.version >= 4:
+
+        # Bug: 21124327
+        # When generating incrementals for the system and vendor partitions in
+        # version 4 or newer, explicitly check the first block (which contains
+        # the superblock) of the partition to see if it's what we expect. If
+        # this check fails, give an explicit log message about the partition
+        # having been remounted R/W (the most likely explanation).
+        if self.check_first_block:
+          script.AppendExtra('check_first_block(%s);' % (self.device,))
+
+        # If version >= 4, try block recovery before abort update
+        if partition == "system":
+          code = ErrorCode.SYSTEM_RECOVER_FAILURE
+        else:
+          code = ErrorCode.VENDOR_RECOVER_FAILURE
+        script.AppendExtra((
+            'ifelse (block_image_recover({device}, "{ranges}") && '
+            'block_image_verify({device}, '
+            'package_extract_file("{partition}.transfer.list"), '
+            '"{partition}.new.dat", "{partition}.patch.dat"), '
+            'ui_print("{partition} recovered successfully."), '
+            'abort("E{code}: {partition} partition fails to recover"));\n'
+            'endif;').format(device=self.device, ranges=ranges_str,
+                             partition=partition, code=code))
+
+      # Abort the OTA update. Note that the incremental OTA cannot be applied
+      # even if it may match the checksum of the target partition.
+      # a) If version < 3, operations like move and erase will make changes
+      #    unconditionally and damage the partition.
+      # b) If version >= 3, it won't even reach here.
+      else:
+        if partition == "system":
+          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
+        else:
+          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
+        script.AppendExtra((
+            'abort("E%d: %s partition has unexpected contents");\n'
+            'endif;') % (code, partition))
+
+  def WritePostInstallVerifyScript(self, script):
+    partition = self.partition
+    script.Print('Verifying the updated %s image...' % (partition,))
+    # Unlike pre-install verification, clobbered_blocks should not be ignored.
+    ranges = self.tgt.care_map
+    ranges_str = ranges.to_string_raw()
+    script.AppendExtra(
+        'if range_sha1(%s, "%s") == "%s" then' % (
+            self.device, ranges_str,
+            self.tgt.TotalSha1(include_clobbered_blocks=True)))
+
+    # Bug: 20881595
+    # Verify that extended blocks are really zeroed out.
+    if self.tgt.extended:
+      ranges_str = self.tgt.extended.to_string_raw()
+      script.AppendExtra(
+          'if range_sha1(%s, "%s") == "%s" then' % (
+              self.device, ranges_str,
+              self._HashZeroBlocks(self.tgt.extended.size())))
+      script.Print('Verified the updated %s image.' % (partition,))
+      if partition == "system":
+        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
+      else:
+        code = ErrorCode.VENDOR_NONZERO_CONTENTS
+      script.AppendExtra(
+          'else\n'
+          '  abort("E%d: %s partition has unexpected non-zero contents after '
+          'OTA update");\n'
+          'endif;' % (code, partition))
+    else:
+      script.Print('Verified the updated %s image.' % (partition,))
+
+    if partition == "system":
+      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
+    else:
+      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
+
+    script.AppendExtra(
+        'else\n'
+        '  abort("E%d: %s partition has unexpected contents after OTA '
+        'update");\n'
+        'endif;' % (code, partition))
+
+  def _WriteUpdate(self, script, output_zip):
+    ZipWrite(output_zip,
+             '{}.transfer.list'.format(self.path),
+             '{}.transfer.list'.format(self.partition))
+
+    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
+    # its size. Quailty 9 almost triples the compression time but doesn't
+    # further reduce the size too much. For a typical 1.8G system.new.dat
+    #                       zip  | brotli(quality 6)  | brotli(quality 9)
+    #   compressed_size:    942M | 869M (~8% reduced) | 854M
+    #   compression_time:   75s  | 265s               | 719s
+    #   decompression_time: 15s  | 25s                | 25s
+
+    if not self.src:
+      brotli_cmd = ['brotli', '--quality=6',
+                    '--output={}.new.dat.br'.format(self.path),
+                    '{}.new.dat'.format(self.path)]
+      print("Compressing {}.new.dat with brotli".format(self.partition))
+      RunAndCheckOutput(brotli_cmd)
+
+      new_data_name = '{}.new.dat.br'.format(self.partition)
+      ZipWrite(output_zip,
+               '{}.new.dat.br'.format(self.path),
+               new_data_name,
+               compress_type=zipfile.ZIP_STORED)
+    else:
+      new_data_name = '{}.new.dat'.format(self.partition)
+      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
+
+    ZipWrite(output_zip,
+             '{}.patch.dat'.format(self.path),
+             '{}.patch.dat'.format(self.partition),
+             compress_type=zipfile.ZIP_STORED)
+
+    if self.partition == "system":
+      code = ErrorCode.SYSTEM_UPDATE_FAILURE
+    else:
+      code = ErrorCode.VENDOR_UPDATE_FAILURE
+
+    call = ('block_image_update({device}, '
+            'package_extract_file("{partition}.transfer.list"), '
+            '"{new_data_name}", "{partition}.patch.dat") ||\n'
+            '  abort("E{code}: Failed to update {partition} image.");'.format(
+                device=self.device, partition=self.partition,
+                new_data_name=new_data_name, code=code))
+    script.AppendExtra(script.WordWrap(call))
+
+  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
+    data = source.ReadRangeSet(ranges)
+    ctx = sha1()
+
+    for p in data:
+      ctx.update(p)
+
+    return ctx.hexdigest()
+
+  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
+    """Return the hash value for all zero blocks."""
+    zero_block = '\x00' * 4096
+    ctx = sha1()
+    for _ in range(num_blocks):
+      ctx.update(zero_block)
+
+    return ctx.hexdigest()
+
+
 # Expose these two classes to support vendor-specific scripts
 DataImage = images.DataImage
 EmptyImage = images.EmptyImage
 
 
+# map recovery.fstab's fs_types to mount/format "partition types"
+PARTITION_TYPES = {
+    "ext4": "EMMC",
+    "emmc": "EMMC",
+    "f2fs": "EMMC",
+    "squashfs": "EMMC",
+    "erofs": "EMMC"
+}
+
+
+def GetTypeAndDevice(mount_point, info, check_no_slot=True):
+  """
+  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
+  backwards compatibility. It aborts if the fstab entry has slotselect option
+  (unless check_no_slot is explicitly set to False).
+  """
+  fstab = info["fstab"]
+  if fstab:
+    if check_no_slot:
+      assert not fstab[mount_point].slotselect, \
+          "Use GetTypeAndDeviceExpr instead"
+    return (PARTITION_TYPES[fstab[mount_point].fs_type],
+            fstab[mount_point].device)
+  raise KeyError
+
+
+def GetTypeAndDeviceExpr(mount_point, info):
+  """
+  Return the filesystem of the partition, and an edify expression that evaluates
+  to the device at runtime.
+  """
+  fstab = info["fstab"]
+  if fstab:
+    p = fstab[mount_point]
+    device_expr = '"%s"' % fstab[mount_point].device
+    if p.slotselect:
+      device_expr = 'add_slot_suffix(%s)' % device_expr
+    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
+  raise KeyError
+
 
 def GetEntryForDevice(fstab, device):
   """
@@ -3213,6 +3815,349 @@
   return output
 
 
+def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
+                      info_dict=None):
+  """Generates the recovery-from-boot patch and writes the script to output.
+
+  Most of the space in the boot and recovery images is just the kernel, which is
+  identical for the two, so the resulting patch should be efficient. Add it to
+  the output zip, along with a shell script that is run from init.rc on first
+  boot to actually do the patching and install the new recovery image.
+
+  Args:
+    input_dir: The top-level input directory of the target-files.zip.
+    output_sink: The callback function that writes the result.
+    recovery_img: File object for the recovery image.
+    boot_img: File objects for the boot image.
+    info_dict: A dict returned by common.LoadInfoDict() on the input
+        target_files. Will use OPTIONS.info_dict if None has been given.
+  """
+  if info_dict is None:
+    info_dict = OPTIONS.info_dict
+
+  full_recovery_image = info_dict.get("full_recovery_image") == "true"
+  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
+
+  if board_uses_vendorimage:
+    # In this case, the output sink is rooted at VENDOR
+    recovery_img_path = "etc/recovery.img"
+    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
+    sh_dir = "bin"
+  else:
+    # In this case the output sink is rooted at SYSTEM
+    recovery_img_path = "vendor/etc/recovery.img"
+    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
+    sh_dir = "vendor/bin"
+
+  if full_recovery_image:
+    output_sink(recovery_img_path, recovery_img.data)
+
+  else:
+    system_root_image = info_dict.get("system_root_image") == "true"
+    include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
+    include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
+    path = os.path.join(input_dir, recovery_resource_dat_path)
+    # With system-root-image, boot and recovery images will have mismatching
+    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
+    # to handle such a case.
+    if system_root_image or include_recovery_dtbo or include_recovery_acpio:
+      diff_program = ["bsdiff"]
+      bonus_args = ""
+      assert not os.path.exists(path)
+    else:
+      diff_program = ["imgdiff"]
+      if os.path.exists(path):
+        diff_program.append("-b")
+        diff_program.append(path)
+        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
+      else:
+        bonus_args = ""
+
+    d = Difference(recovery_img, boot_img, diff_program=diff_program)
+    _, _, patch = d.ComputePatch()
+    output_sink("recovery-from-boot.p", patch)
+
+  try:
+    # The following GetTypeAndDevice()s need to use the path in the target
+    # info_dict instead of source_info_dict.
+    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
+                                              check_no_slot=False)
+    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
+                                                      check_no_slot=False)
+  except KeyError:
+    return
+
+  if full_recovery_image:
+
+    # Note that we use /vendor to refer to the recovery resources. This will
+    # work for a separate vendor partition mounted at /vendor or a
+    # /system/vendor subdirectory on the system partition, for which init will
+    # create a symlink from /vendor to /system/vendor.
+
+    sh = """#!/vendor/bin/sh
+if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
+  applypatch \\
+          --flash /vendor/etc/recovery.img \\
+          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
+      log -t recovery "Installing new recovery image: succeeded" || \\
+      log -t recovery "Installing new recovery image: failed"
+else
+  log -t recovery "Recovery image already installed"
+fi
+""" % {'type': recovery_type,
+       'device': recovery_device,
+       'sha1': recovery_img.sha1,
+       'size': recovery_img.size}
+  else:
+    sh = """#!/vendor/bin/sh
+if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
+  applypatch %(bonus_args)s \\
+          --patch /vendor/recovery-from-boot.p \\
+          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
+          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
+      log -t recovery "Installing new recovery image: succeeded" || \\
+      log -t recovery "Installing new recovery image: failed"
+else
+  log -t recovery "Recovery image already installed"
+fi
+""" % {'boot_size': boot_img.size,
+       'boot_sha1': boot_img.sha1,
+       'recovery_size': recovery_img.size,
+       'recovery_sha1': recovery_img.sha1,
+       'boot_type': boot_type,
+       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
+       'recovery_type': recovery_type,
+       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
+       'bonus_args': bonus_args}
+
+  # The install script location moved from /system/etc to /system/bin in the L
+  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
+  sh_location = os.path.join(sh_dir, "install-recovery.sh")
+
+  logger.info("putting script in %s", sh_location)
+
+  output_sink(sh_location, sh.encode())
+
+
+class DynamicPartitionUpdate(object):
+  def __init__(self, src_group=None, tgt_group=None, progress=None,
+               block_difference=None):
+    self.src_group = src_group
+    self.tgt_group = tgt_group
+    self.progress = progress
+    self.block_difference = block_difference
+
+  @property
+  def src_size(self):
+    if not self.block_difference:
+      return 0
+    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
+
+  @property
+  def tgt_size(self):
+    if not self.block_difference:
+      return 0
+    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
+
+  @staticmethod
+  def _GetSparseImageSize(img):
+    if not img:
+      return 0
+    return img.blocksize * img.total_blocks
+
+
+class DynamicGroupUpdate(object):
+  def __init__(self, src_size=None, tgt_size=None):
+    # None: group does not exist. 0: no size limits.
+    self.src_size = src_size
+    self.tgt_size = tgt_size
+
+
+class DynamicPartitionsDifference(object):
+  def __init__(self, info_dict, block_diffs, progress_dict=None,
+               source_info_dict=None):
+    if progress_dict is None:
+      progress_dict = {}
+
+    self._remove_all_before_apply = False
+    if source_info_dict is None:
+      self._remove_all_before_apply = True
+      source_info_dict = {}
+
+    block_diff_dict = collections.OrderedDict(
+        [(e.partition, e) for e in block_diffs])
+
+    assert len(block_diff_dict) == len(block_diffs), \
+        "Duplicated BlockDifference object for {}".format(
+            [partition for partition, count in
+             collections.Counter(e.partition for e in block_diffs).items()
+             if count > 1])
+
+    self._partition_updates = collections.OrderedDict()
+
+    for p, block_diff in block_diff_dict.items():
+      self._partition_updates[p] = DynamicPartitionUpdate()
+      self._partition_updates[p].block_difference = block_diff
+
+    for p, progress in progress_dict.items():
+      if p in self._partition_updates:
+        self._partition_updates[p].progress = progress
+
+    tgt_groups = shlex.split(info_dict.get(
+        "super_partition_groups", "").strip())
+    src_groups = shlex.split(source_info_dict.get(
+        "super_partition_groups", "").strip())
+
+    for g in tgt_groups:
+      for p in shlex.split(info_dict.get(
+              "super_%s_partition_list" % g, "").strip()):
+        assert p in self._partition_updates, \
+            "{} is in target super_{}_partition_list but no BlockDifference " \
+            "object is provided.".format(p, g)
+        self._partition_updates[p].tgt_group = g
+
+    for g in src_groups:
+      for p in shlex.split(source_info_dict.get(
+              "super_%s_partition_list" % g, "").strip()):
+        assert p in self._partition_updates, \
+            "{} is in source super_{}_partition_list but no BlockDifference " \
+            "object is provided.".format(p, g)
+        self._partition_updates[p].src_group = g
+
+    target_dynamic_partitions = set(shlex.split(info_dict.get(
+        "dynamic_partition_list", "").strip()))
+    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
+                                  if u.tgt_size)
+    assert block_diffs_with_target == target_dynamic_partitions, \
+        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
+            list(target_dynamic_partitions), list(block_diffs_with_target))
+
+    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
+        "dynamic_partition_list", "").strip()))
+    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
+                                  if u.src_size)
+    assert block_diffs_with_source == source_dynamic_partitions, \
+        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
+            list(source_dynamic_partitions), list(block_diffs_with_source))
+
+    if self._partition_updates:
+      logger.info("Updating dynamic partitions %s",
+                  self._partition_updates.keys())
+
+    self._group_updates = collections.OrderedDict()
+
+    for g in tgt_groups:
+      self._group_updates[g] = DynamicGroupUpdate()
+      self._group_updates[g].tgt_size = int(info_dict.get(
+          "super_%s_group_size" % g, "0").strip())
+
+    for g in src_groups:
+      if g not in self._group_updates:
+        self._group_updates[g] = DynamicGroupUpdate()
+      self._group_updates[g].src_size = int(source_info_dict.get(
+          "super_%s_group_size" % g, "0").strip())
+
+    self._Compute()
+
+  def WriteScript(self, script, output_zip, write_verify_script=False):
+    script.Comment('--- Start patching dynamic partitions ---')
+    for p, u in self._partition_updates.items():
+      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
+        script.Comment('Patch partition %s' % p)
+        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
+                                       write_verify_script=False)
+
+    op_list_path = MakeTempFile()
+    with open(op_list_path, 'w') as f:
+      for line in self._op_list:
+        f.write('{}\n'.format(line))
+
+    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
+
+    script.Comment('Update dynamic partition metadata')
+    script.AppendExtra('assert(update_dynamic_partitions('
+                       'package_extract_file("dynamic_partitions_op_list")));')
+
+    if write_verify_script:
+      for p, u in self._partition_updates.items():
+        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
+          u.block_difference.WritePostInstallVerifyScript(script)
+          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
+
+    for p, u in self._partition_updates.items():
+      if u.tgt_size and u.src_size <= u.tgt_size:
+        script.Comment('Patch partition %s' % p)
+        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
+                                       write_verify_script=write_verify_script)
+        if write_verify_script:
+          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
+
+    script.Comment('--- End patching dynamic partitions ---')
+
+  def _Compute(self):
+    self._op_list = list()
+
+    def append(line):
+      self._op_list.append(line)
+
+    def comment(line):
+      self._op_list.append("# %s" % line)
+
+    if self._remove_all_before_apply:
+      comment('Remove all existing dynamic partitions and groups before '
+              'applying full OTA')
+      append('remove_all_groups')
+
+    for p, u in self._partition_updates.items():
+      if u.src_group and not u.tgt_group:
+        append('remove %s' % p)
+
+    for p, u in self._partition_updates.items():
+      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
+        comment('Move partition %s from %s to default' % (p, u.src_group))
+        append('move %s default' % p)
+
+    for p, u in self._partition_updates.items():
+      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
+        comment('Shrink partition %s from %d to %d' %
+                (p, u.src_size, u.tgt_size))
+        append('resize %s %s' % (p, u.tgt_size))
+
+    for g, u in self._group_updates.items():
+      if u.src_size is not None and u.tgt_size is None:
+        append('remove_group %s' % g)
+      if (u.src_size is not None and u.tgt_size is not None and
+              u.src_size > u.tgt_size):
+        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
+        append('resize_group %s %d' % (g, u.tgt_size))
+
+    for g, u in self._group_updates.items():
+      if u.src_size is None and u.tgt_size is not None:
+        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
+        append('add_group %s %d' % (g, u.tgt_size))
+      if (u.src_size is not None and u.tgt_size is not None and
+              u.src_size < u.tgt_size):
+        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
+        append('resize_group %s %d' % (g, u.tgt_size))
+
+    for p, u in self._partition_updates.items():
+      if u.tgt_group and not u.src_group:
+        comment('Add partition %s to group %s' % (p, u.tgt_group))
+        append('add %s %s' % (p, u.tgt_group))
+
+    for p, u in self._partition_updates.items():
+      if u.tgt_size and u.src_size < u.tgt_size:
+        comment('Grow partition %s from %d to %d' %
+                (p, u.src_size, u.tgt_size))
+        append('resize %s %d' % (p, u.tgt_size))
+
+    for p, u in self._partition_updates.items():
+      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
+        comment('Move partition %s from default to %s' %
+                (p, u.tgt_group))
+        append('move %s %s' % (p, u.tgt_group))
+
+
 def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
   """
   Get build.prop from ramdisk within the boot image
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 0a7653c..033c02e 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -16,45 +16,6 @@
 
 import common
 
-# map recovery.fstab's fs_types to mount/format "partition types"
-PARTITION_TYPES = {
-    "ext4": "EMMC",
-    "emmc": "EMMC",
-    "f2fs": "EMMC",
-    "squashfs": "EMMC",
-    "erofs": "EMMC"
-}
-
-
-class ErrorCode(object):
-  """Define error_codes for failures that happen during the actual
-  update package installation.
-
-  Error codes 0-999 are reserved for failures before the package
-  installation (i.e. low battery, package verification failure).
-  Detailed code in 'bootable/recovery/error_code.h' """
-
-  SYSTEM_VERIFICATION_FAILURE = 1000
-  SYSTEM_UPDATE_FAILURE = 1001
-  SYSTEM_UNEXPECTED_CONTENTS = 1002
-  SYSTEM_NONZERO_CONTENTS = 1003
-  SYSTEM_RECOVER_FAILURE = 1004
-  VENDOR_VERIFICATION_FAILURE = 2000
-  VENDOR_UPDATE_FAILURE = 2001
-  VENDOR_UNEXPECTED_CONTENTS = 2002
-  VENDOR_NONZERO_CONTENTS = 2003
-  VENDOR_RECOVER_FAILURE = 2004
-  OEM_PROP_MISMATCH = 3000
-  FINGERPRINT_MISMATCH = 3001
-  THUMBPRINT_MISMATCH = 3002
-  OLDER_BUILD = 3003
-  DEVICE_MISMATCH = 3004
-  BAD_PATCH_FILE = 3005
-  INSUFFICIENT_CACHE_SPACE = 3006
-  TUNE_PARTITION_FAILURE = 3007
-  APPLY_PATCH_FAILURE = 3008
-
-
 class EdifyGenerator(object):
   """Class to generate scripts in the 'edify' recovery script language
   used from donut onwards."""
@@ -127,7 +88,7 @@
         'abort("E{code}: This package expects the value \\"{values}\\" for '
         '\\"{name}\\"; this has value \\"" + '
         '{get_prop_command} + "\\".");').format(
-            code=ErrorCode.OEM_PROP_MISMATCH,
+            code=common.ErrorCode.OEM_PROP_MISMATCH,
             get_prop_command=get_prop_command, name=name,
             values='\\" or \\"'.join(values))
     self.script.append(cmd)
@@ -140,7 +101,7 @@
                              for i in fp]) +
            ' ||\n    abort("E%d: Package expects build fingerprint of %s; '
            'this device has " + getprop("ro.build.fingerprint") + ".");') % (
-               ErrorCode.FINGERPRINT_MISMATCH, " or ".join(fp))
+               common.ErrorCode.FINGERPRINT_MISMATCH, " or ".join(fp))
     self.script.append(cmd)
 
   def AssertSomeThumbprint(self, *fp):
@@ -151,7 +112,7 @@
                              for i in fp]) +
            ' ||\n    abort("E%d: Package expects build thumbprint of %s; this '
            'device has " + getprop("ro.build.thumbprint") + ".");') % (
-               ErrorCode.THUMBPRINT_MISMATCH, " or ".join(fp))
+               common.ErrorCode.THUMBPRINT_MISMATCH, " or ".join(fp))
     self.script.append(cmd)
 
   def AssertFingerprintOrThumbprint(self, fp, tp):
@@ -172,14 +133,14 @@
         ('(!less_than_int(%s, getprop("ro.build.date.utc"))) || '
          'abort("E%d: Can\'t install this package (%s) over newer '
          'build (" + getprop("ro.build.date") + ").");') % (
-             timestamp, ErrorCode.OLDER_BUILD, timestamp_text))
+             timestamp, common.ErrorCode.OLDER_BUILD, timestamp_text))
 
   def AssertDevice(self, device):
     """Assert that the device identifier is the given string."""
     cmd = ('getprop("ro.product.device") == "%s" || '
            'abort("E%d: This package is for \\"%s\\" devices; '
            'this is a \\"" + getprop("ro.product.device") + "\\".");') % (
-               device, ErrorCode.DEVICE_MISMATCH, device)
+               device, common.ErrorCode.DEVICE_MISMATCH, device)
     self.script.append(cmd)
 
   def AssertSomeBootloader(self, *bootloaders):
@@ -246,7 +207,7 @@
         'unexpected contents."));').format(
             target=target_expr,
             source=source_expr,
-            code=ErrorCode.BAD_PATCH_FILE)))
+            code=common.ErrorCode.BAD_PATCH_FILE)))
 
   def CacheFreeSpaceCheck(self, amount):
     """Check that there's at least 'amount' space that can be made
@@ -255,7 +216,7 @@
     self.script.append(('apply_patch_space(%d) || abort("E%d: Not enough free '
                         'space on /cache to apply patches.");') % (
                             amount,
-                            ErrorCode.INSUFFICIENT_CACHE_SPACE))
+                            common.ErrorCode.INSUFFICIENT_CACHE_SPACE))
 
   def Mount(self, mount_point, mount_options_by_format=""):
     """Mount the partition with the given mount_point.
@@ -277,7 +238,7 @@
       if p.context is not None:
         mount_flags = p.context + ("," + mount_flags if mount_flags else "")
       self.script.append('mount("%s", "%s", %s, "%s", "%s");' % (
-          p.fs_type, PARTITION_TYPES[p.fs_type],
+          p.fs_type, common.PARTITION_TYPES[p.fs_type],
           self._GetSlotSuffixDeviceForEntry(p),
           p.mount_point, mount_flags))
       self.mounts.add(p.mount_point)
@@ -303,7 +264,7 @@
         'tune2fs(' + "".join(['"%s", ' % (i,) for i in options]) +
         '%s) || abort("E%d: Failed to tune partition %s");' % (
             self._GetSlotSuffixDeviceForEntry(p),
-            ErrorCode.TUNE_PARTITION_FAILURE, partition))
+            common.ErrorCode.TUNE_PARTITION_FAILURE, partition))
 
   def FormatPartition(self, partition):
     """Format the given partition, specified by its mount point (eg,
@@ -313,7 +274,7 @@
     if fstab:
       p = fstab[partition]
       self.script.append('format("%s", "%s", %s, "%s", "%s");' %
-                         (p.fs_type, PARTITION_TYPES[p.fs_type],
+                         (p.fs_type, common.PARTITION_TYPES[p.fs_type],
                           self._GetSlotSuffixDeviceForEntry(p),
                           p.length, p.mount_point))
 
@@ -393,7 +354,7 @@
             target=target_expr,
             source=source_expr,
             patch=patch_expr,
-            code=ErrorCode.APPLY_PATCH_FAILURE)))
+            code=common.ErrorCode.APPLY_PATCH_FAILURE)))
 
   def _GetSlotSuffixDeviceForEntry(self, entry=None):
     """
@@ -427,7 +388,7 @@
     fstab = self.fstab
     if fstab:
       p = fstab[mount_point]
-      partition_type = PARTITION_TYPES[p.fs_type]
+      partition_type = common.PARTITION_TYPES[p.fs_type]
       device = self._GetSlotSuffixDeviceForEntry(p)
       args = {'device': device, 'fn': fn}
       if partition_type == "EMMC":
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
index 397bf23..1497d69 100644
--- a/tools/releasetools/make_recovery_patch.py
+++ b/tools/releasetools/make_recovery_patch.py
@@ -21,7 +21,6 @@
 import sys
 
 import common
-from non_ab_ota import MakeRecoveryPatch
 
 if sys.hexversion < 0x02070000:
   print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -61,7 +60,7 @@
                            *fn.split("/")), "wb") as f:
       f.write(data)
 
-  MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img)
+  common.MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img)
 
 
 if __name__ == '__main__':
diff --git a/tools/releasetools/non_ab_ota.py b/tools/releasetools/non_ab_ota.py
index 80c3083..667891c 100644
--- a/tools/releasetools/non_ab_ota.py
+++ b/tools/releasetools/non_ab_ota.py
@@ -13,25 +13,17 @@
 # limitations under the License.
 
 import collections
-import copy
-import imp
 import logging
 import os
-import time
-import threading
-import tempfile
 import zipfile
-import subprocess
-import shlex
 
 import common
 import edify_generator
-from edify_generator import ErrorCode, PARTITION_TYPES
+import verity_utils
 from check_target_files_vintf import CheckVintfIfTrebleEnabled, HasPartition
-from common import OPTIONS, Run, MakeTempDir, RunAndCheckOutput, ZipWrite, MakeTempFile
+from common import OPTIONS
 from ota_utils import UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata, PropertyFiles
-from blockimgdiff import BlockImageDiff
-from hashlib import sha1
+import subprocess
 
 logger = logging.getLogger(__name__)
 
@@ -59,10 +51,10 @@
     check_first_block = partition_source_info.fs_type == "ext4"
     # Disable imgdiff because it relies on zlib to produce stable output
     # across different versions, which is often not the case.
-    return BlockDifference(name, partition_tgt, partition_src,
-                           check_first_block,
-                           version=blockimgdiff_version,
-                           disable_imgdiff=True)
+    return common.BlockDifference(name, partition_tgt, partition_src,
+                                  check_first_block,
+                                  version=blockimgdiff_version,
+                                  disable_imgdiff=True)
 
   if source_zip:
     # See notes in common.GetUserImage()
@@ -84,8 +76,8 @@
       tgt = common.GetUserImage(partition, OPTIONS.input_tmp, target_zip,
                                 info_dict=target_info,
                                 reset_file_map=True)
-      block_diff_dict[partition] = BlockDifference(partition, tgt,
-                                                   src=None)
+      block_diff_dict[partition] = common.BlockDifference(partition, tgt,
+                                                          src=None)
     # Incremental OTA update.
     else:
       block_diff_dict[partition] = GetIncrementalBlockDifferenceForPartition(
@@ -103,7 +95,7 @@
     function_name = "FullOTA_GetBlockDifferences"
 
   if device_specific_diffs:
-    assert all(isinstance(diff, BlockDifference)
+    assert all(isinstance(diff, common.BlockDifference)
                for diff in device_specific_diffs), \
         "{} is not returning a list of BlockDifference objects".format(
             function_name)
@@ -139,7 +131,7 @@
   output_zip = zipfile.ZipFile(
       staging_file, "w", compression=zipfile.ZIP_DEFLATED)
 
-  device_specific = DeviceSpecificParams(
+  device_specific = common.DeviceSpecificParams(
       input_zip=input_zip,
       input_version=target_api_version,
       output_zip=output_zip,
@@ -225,7 +217,7 @@
   if target_info.get('use_dynamic_partitions') == "true":
     # Use empty source_info_dict to indicate that all partitions / groups must
     # be re-added.
-    dynamic_partitions_diff = DynamicPartitionsDifference(
+    dynamic_partitions_diff = common.DynamicPartitionsDifference(
         info_dict=OPTIONS.info_dict,
         block_diffs=block_diff_dict.values(),
         progress_dict=progress_dict)
@@ -317,7 +309,7 @@
   output_zip = zipfile.ZipFile(
       staging_file, "w", compression=zipfile.ZIP_DEFLATED)
 
-  device_specific = DeviceSpecificParams(
+  device_specific = common.DeviceSpecificParams(
       source_zip=source_zip,
       source_version=source_api_version,
       source_tmp=OPTIONS.source_tmp,
@@ -412,9 +404,9 @@
   required_cache_sizes = [diff.required_cache for diff in
                           block_diff_dict.values()]
   if updating_boot:
-    boot_type, boot_device_expr = GetTypeAndDeviceExpr("/boot",
-                                                       source_info)
-    d = Difference(target_boot, source_boot, "bsdiff")
+    boot_type, boot_device_expr = common.GetTypeAndDeviceExpr("/boot",
+                                                              source_info)
+    d = common.Difference(target_boot, source_boot, "bsdiff")
     _, _, d = d.ComputePatch()
     if d is None:
       include_full_boot = True
@@ -469,7 +461,7 @@
     if OPTIONS.target_info_dict.get("use_dynamic_partitions") != "true":
       raise RuntimeError(
           "can't generate incremental that disables dynamic partitions")
-    dynamic_partitions_diff = DynamicPartitionsDifference(
+    dynamic_partitions_diff = common.DynamicPartitionsDifference(
         info_dict=OPTIONS.target_info_dict,
         source_info_dict=OPTIONS.source_info_dict,
         block_diffs=block_diff_dict.values(),
@@ -695,881 +687,3 @@
 
   namelist = target_files_zip.namelist()
   return patch in namelist or img in namelist
-
-
-class DeviceSpecificParams(object):
-  module = None
-
-  def __init__(self, **kwargs):
-    """Keyword arguments to the constructor become attributes of this
-    object, which is passed to all functions in the device-specific
-    module."""
-    for k, v in kwargs.items():
-      setattr(self, k, v)
-    self.extras = OPTIONS.extras
-
-    if self.module is None:
-      path = OPTIONS.device_specific
-      if not path:
-        return
-      try:
-        if os.path.isdir(path):
-          info = imp.find_module("releasetools", [path])
-        else:
-          d, f = os.path.split(path)
-          b, x = os.path.splitext(f)
-          if x == ".py":
-            f = b
-          info = imp.find_module(f, [d])
-        logger.info("loaded device-specific extensions from %s", path)
-        self.module = imp.load_module("device_specific", *info)
-      except ImportError:
-        logger.info("unable to load device-specific module; assuming none")
-
-  def _DoCall(self, function_name, *args, **kwargs):
-    """Call the named function in the device-specific module, passing
-    the given args and kwargs.  The first argument to the call will be
-    the DeviceSpecific object itself.  If there is no module, or the
-    module does not define the function, return the value of the
-    'default' kwarg (which itself defaults to None)."""
-    if self.module is None or not hasattr(self.module, function_name):
-      return kwargs.get("default")
-    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
-
-  def FullOTA_Assertions(self):
-    """Called after emitting the block of assertions at the top of a
-    full OTA package.  Implementations can add whatever additional
-    assertions they like."""
-    return self._DoCall("FullOTA_Assertions")
-
-  def FullOTA_InstallBegin(self):
-    """Called at the start of full OTA installation."""
-    return self._DoCall("FullOTA_InstallBegin")
-
-  def FullOTA_GetBlockDifferences(self):
-    """Called during full OTA installation and verification.
-    Implementation should return a list of BlockDifference objects describing
-    the update on each additional partitions.
-    """
-    return self._DoCall("FullOTA_GetBlockDifferences")
-
-  def FullOTA_InstallEnd(self):
-    """Called at the end of full OTA installation; typically this is
-    used to install the image for the device's baseband processor."""
-    return self._DoCall("FullOTA_InstallEnd")
-
-  def IncrementalOTA_Assertions(self):
-    """Called after emitting the block of assertions at the top of an
-    incremental OTA package.  Implementations can add whatever
-    additional assertions they like."""
-    return self._DoCall("IncrementalOTA_Assertions")
-
-  def IncrementalOTA_VerifyBegin(self):
-    """Called at the start of the verification phase of incremental
-    OTA installation; additional checks can be placed here to abort
-    the script before any changes are made."""
-    return self._DoCall("IncrementalOTA_VerifyBegin")
-
-  def IncrementalOTA_VerifyEnd(self):
-    """Called at the end of the verification phase of incremental OTA
-    installation; additional checks can be placed here to abort the
-    script before any changes are made."""
-    return self._DoCall("IncrementalOTA_VerifyEnd")
-
-  def IncrementalOTA_InstallBegin(self):
-    """Called at the start of incremental OTA installation (after
-    verification is complete)."""
-    return self._DoCall("IncrementalOTA_InstallBegin")
-
-  def IncrementalOTA_GetBlockDifferences(self):
-    """Called during incremental OTA installation and verification.
-    Implementation should return a list of BlockDifference objects describing
-    the update on each additional partitions.
-    """
-    return self._DoCall("IncrementalOTA_GetBlockDifferences")
-
-  def IncrementalOTA_InstallEnd(self):
-    """Called at the end of incremental OTA installation; typically
-    this is used to install the image for the device's baseband
-    processor."""
-    return self._DoCall("IncrementalOTA_InstallEnd")
-
-  def VerifyOTA_Assertions(self):
-    return self._DoCall("VerifyOTA_Assertions")
-
-
-DIFF_PROGRAM_BY_EXT = {
-    ".gz": "imgdiff",
-    ".zip": ["imgdiff", "-z"],
-    ".jar": ["imgdiff", "-z"],
-    ".apk": ["imgdiff", "-z"],
-    ".img": "imgdiff",
-}
-
-
-class Difference(object):
-  def __init__(self, tf, sf, diff_program=None):
-    self.tf = tf
-    self.sf = sf
-    self.patch = None
-    self.diff_program = diff_program
-
-  def ComputePatch(self):
-    """Compute the patch (as a string of data) needed to turn sf into
-    tf.  Returns the same tuple as GetPatch()."""
-
-    tf = self.tf
-    sf = self.sf
-
-    if self.diff_program:
-      diff_program = self.diff_program
-    else:
-      ext = os.path.splitext(tf.name)[1]
-      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
-
-    ttemp = tf.WriteToTemp()
-    stemp = sf.WriteToTemp()
-
-    ext = os.path.splitext(tf.name)[1]
-
-    try:
-      ptemp = tempfile.NamedTemporaryFile()
-      if isinstance(diff_program, list):
-        cmd = copy.copy(diff_program)
-      else:
-        cmd = [diff_program]
-      cmd.append(stemp.name)
-      cmd.append(ttemp.name)
-      cmd.append(ptemp.name)
-      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-      err = []
-
-      def run():
-        _, e = p.communicate()
-        if e:
-          err.append(e)
-      th = threading.Thread(target=run)
-      th.start()
-      th.join(timeout=300)   # 5 mins
-      if th.is_alive():
-        logger.warning("diff command timed out")
-        p.terminate()
-        th.join(5)
-        if th.is_alive():
-          p.kill()
-          th.join()
-
-      if p.returncode != 0:
-        logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
-        self.patch = None
-        return None, None, None
-      diff = ptemp.read()
-    finally:
-      ptemp.close()
-      stemp.close()
-      ttemp.close()
-
-    self.patch = diff
-    return self.tf, self.sf, self.patch
-
-  def GetPatch(self):
-    """Returns a tuple of (target_file, source_file, patch_data).
-
-    patch_data may be None if ComputePatch hasn't been called, or if
-    computing the patch failed.
-    """
-    return self.tf, self.sf, self.patch
-
-
-def ComputeDifferences(diffs):
-  """Call ComputePatch on all the Difference objects in 'diffs'."""
-  logger.info("%d diffs to compute", len(diffs))
-
-  # Do the largest files first, to try and reduce the long-pole effect.
-  by_size = [(i.tf.size, i) for i in diffs]
-  by_size.sort(reverse=True)
-  by_size = [i[1] for i in by_size]
-
-  lock = threading.Lock()
-  diff_iter = iter(by_size)   # accessed under lock
-
-  def worker():
-    try:
-      lock.acquire()
-      for d in diff_iter:
-        lock.release()
-        start = time.time()
-        d.ComputePatch()
-        dur = time.time() - start
-        lock.acquire()
-
-        tf, sf, patch = d.GetPatch()
-        if sf.name == tf.name:
-          name = tf.name
-        else:
-          name = "%s (%s)" % (tf.name, sf.name)
-        if patch is None:
-          logger.error("patching failed! %40s", name)
-        else:
-          logger.info(
-              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
-              tf.size, 100.0 * len(patch) / tf.size, name)
-      lock.release()
-    except Exception:
-      logger.exception("Failed to compute diff from worker")
-      raise
-
-  # start worker threads; wait for them all to finish.
-  threads = [threading.Thread(target=worker)
-             for i in range(OPTIONS.worker_threads)]
-  for th in threads:
-    th.start()
-  while threads:
-    threads.pop().join()
-
-
-class BlockDifference(object):
-  def __init__(self, partition, tgt, src=None, check_first_block=False,
-               version=None, disable_imgdiff=False):
-    self.tgt = tgt
-    self.src = src
-    self.partition = partition
-    self.check_first_block = check_first_block
-    self.disable_imgdiff = disable_imgdiff
-
-    if version is None:
-      version = max(
-          int(i) for i in
-          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
-    assert version >= 3
-    self.version = version
-
-    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
-                       version=self.version,
-                       disable_imgdiff=self.disable_imgdiff)
-    self.path = os.path.join(MakeTempDir(), partition)
-    b.Compute(self.path)
-    self._required_cache = b.max_stashed_size
-    self.touched_src_ranges = b.touched_src_ranges
-    self.touched_src_sha1 = b.touched_src_sha1
-
-    # On devices with dynamic partitions, for new partitions,
-    # src is None but OPTIONS.source_info_dict is not.
-    if OPTIONS.source_info_dict is None:
-      is_dynamic_build = OPTIONS.info_dict.get(
-          "use_dynamic_partitions") == "true"
-      is_dynamic_source = False
-    else:
-      is_dynamic_build = OPTIONS.source_info_dict.get(
-          "use_dynamic_partitions") == "true"
-      is_dynamic_source = partition in shlex.split(
-          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
-
-    is_dynamic_target = partition in shlex.split(
-        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
-
-    # For dynamic partitions builds, check partition list in both source
-    # and target build because new partitions may be added, and existing
-    # partitions may be removed.
-    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
-
-    if is_dynamic:
-      self.device = 'map_partition("%s")' % partition
-    else:
-      if OPTIONS.source_info_dict is None:
-        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
-                                              OPTIONS.info_dict)
-      else:
-        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
-                                              OPTIONS.source_info_dict)
-      self.device = device_expr
-
-  @property
-  def required_cache(self):
-    return self._required_cache
-
-  def WriteScript(self, script, output_zip, progress=None,
-                  write_verify_script=False):
-    if not self.src:
-      # write the output unconditionally
-      script.Print("Patching %s image unconditionally..." % (self.partition,))
-    else:
-      script.Print("Patching %s image after verification." % (self.partition,))
-
-    if progress:
-      script.ShowProgress(progress, 0)
-    self._WriteUpdate(script, output_zip)
-
-    if write_verify_script:
-      self.WritePostInstallVerifyScript(script)
-
-  def WriteStrictVerifyScript(self, script):
-    """Verify all the blocks in the care_map, including clobbered blocks.
-
-    This differs from the WriteVerifyScript() function: a) it prints different
-    error messages; b) it doesn't allow half-way updated images to pass the
-    verification."""
-
-    partition = self.partition
-    script.Print("Verifying %s..." % (partition,))
-    ranges = self.tgt.care_map
-    ranges_str = ranges.to_string_raw()
-    script.AppendExtra(
-        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
-        'ui_print("%s has unexpected contents.");' % (
-            self.device, ranges_str,
-            self.tgt.TotalSha1(include_clobbered_blocks=True),
-            self.partition))
-    script.AppendExtra("")
-
-  def WriteVerifyScript(self, script, touched_blocks_only=False):
-    partition = self.partition
-
-    # full OTA
-    if not self.src:
-      script.Print("Image %s will be patched unconditionally." % (partition,))
-
-    # incremental OTA
-    else:
-      if touched_blocks_only:
-        ranges = self.touched_src_ranges
-        expected_sha1 = self.touched_src_sha1
-      else:
-        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
-        expected_sha1 = self.src.TotalSha1()
-
-      # No blocks to be checked, skipping.
-      if not ranges:
-        return
-
-      ranges_str = ranges.to_string_raw()
-      script.AppendExtra(
-          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
-          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
-          '"%s.patch.dat")) then' % (
-              self.device, ranges_str, expected_sha1,
-              self.device, partition, partition, partition))
-      script.Print('Verified %s image...' % (partition,))
-      script.AppendExtra('else')
-
-      if self.version >= 4:
-
-        # Bug: 21124327
-        # When generating incrementals for the system and vendor partitions in
-        # version 4 or newer, explicitly check the first block (which contains
-        # the superblock) of the partition to see if it's what we expect. If
-        # this check fails, give an explicit log message about the partition
-        # having been remounted R/W (the most likely explanation).
-        if self.check_first_block:
-          script.AppendExtra('check_first_block(%s);' % (self.device,))
-
-        # If version >= 4, try block recovery before abort update
-        if partition == "system":
-          code = ErrorCode.SYSTEM_RECOVER_FAILURE
-        else:
-          code = ErrorCode.VENDOR_RECOVER_FAILURE
-        script.AppendExtra((
-            'ifelse (block_image_recover({device}, "{ranges}") && '
-            'block_image_verify({device}, '
-            'package_extract_file("{partition}.transfer.list"), '
-            '"{partition}.new.dat", "{partition}.patch.dat"), '
-            'ui_print("{partition} recovered successfully."), '
-            'abort("E{code}: {partition} partition fails to recover"));\n'
-            'endif;').format(device=self.device, ranges=ranges_str,
-                             partition=partition, code=code))
-
-      # Abort the OTA update. Note that the incremental OTA cannot be applied
-      # even if it may match the checksum of the target partition.
-      # a) If version < 3, operations like move and erase will make changes
-      #    unconditionally and damage the partition.
-      # b) If version >= 3, it won't even reach here.
-      else:
-        if partition == "system":
-          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
-        else:
-          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
-        script.AppendExtra((
-            'abort("E%d: %s partition has unexpected contents");\n'
-            'endif;') % (code, partition))
-
-  def WritePostInstallVerifyScript(self, script):
-    partition = self.partition
-    script.Print('Verifying the updated %s image...' % (partition,))
-    # Unlike pre-install verification, clobbered_blocks should not be ignored.
-    ranges = self.tgt.care_map
-    ranges_str = ranges.to_string_raw()
-    script.AppendExtra(
-        'if range_sha1(%s, "%s") == "%s" then' % (
-            self.device, ranges_str,
-            self.tgt.TotalSha1(include_clobbered_blocks=True)))
-
-    # Bug: 20881595
-    # Verify that extended blocks are really zeroed out.
-    if self.tgt.extended:
-      ranges_str = self.tgt.extended.to_string_raw()
-      script.AppendExtra(
-          'if range_sha1(%s, "%s") == "%s" then' % (
-              self.device, ranges_str,
-              self._HashZeroBlocks(self.tgt.extended.size())))
-      script.Print('Verified the updated %s image.' % (partition,))
-      if partition == "system":
-        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
-      else:
-        code = ErrorCode.VENDOR_NONZERO_CONTENTS
-      script.AppendExtra(
-          'else\n'
-          '  abort("E%d: %s partition has unexpected non-zero contents after '
-          'OTA update");\n'
-          'endif;' % (code, partition))
-    else:
-      script.Print('Verified the updated %s image.' % (partition,))
-
-    if partition == "system":
-      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
-    else:
-      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
-
-    script.AppendExtra(
-        'else\n'
-        '  abort("E%d: %s partition has unexpected contents after OTA '
-        'update");\n'
-        'endif;' % (code, partition))
-
-  def _WriteUpdate(self, script, output_zip):
-    ZipWrite(output_zip,
-             '{}.transfer.list'.format(self.path),
-             '{}.transfer.list'.format(self.partition))
-
-    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
-    # its size. Quailty 9 almost triples the compression time but doesn't
-    # further reduce the size too much. For a typical 1.8G system.new.dat
-    #                       zip  | brotli(quality 6)  | brotli(quality 9)
-    #   compressed_size:    942M | 869M (~8% reduced) | 854M
-    #   compression_time:   75s  | 265s               | 719s
-    #   decompression_time: 15s  | 25s                | 25s
-
-    if not self.src:
-      brotli_cmd = ['brotli', '--quality=6',
-                    '--output={}.new.dat.br'.format(self.path),
-                    '{}.new.dat'.format(self.path)]
-      print("Compressing {}.new.dat with brotli".format(self.partition))
-      RunAndCheckOutput(brotli_cmd)
-
-      new_data_name = '{}.new.dat.br'.format(self.partition)
-      ZipWrite(output_zip,
-               '{}.new.dat.br'.format(self.path),
-               new_data_name,
-               compress_type=zipfile.ZIP_STORED)
-    else:
-      new_data_name = '{}.new.dat'.format(self.partition)
-      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
-
-    ZipWrite(output_zip,
-             '{}.patch.dat'.format(self.path),
-             '{}.patch.dat'.format(self.partition),
-             compress_type=zipfile.ZIP_STORED)
-
-    if self.partition == "system":
-      code = ErrorCode.SYSTEM_UPDATE_FAILURE
-    else:
-      code = ErrorCode.VENDOR_UPDATE_FAILURE
-
-    call = ('block_image_update({device}, '
-            'package_extract_file("{partition}.transfer.list"), '
-            '"{new_data_name}", "{partition}.patch.dat") ||\n'
-            '  abort("E{code}: Failed to update {partition} image.");'.format(
-                device=self.device, partition=self.partition,
-                new_data_name=new_data_name, code=code))
-    script.AppendExtra(script.WordWrap(call))
-
-  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
-    data = source.ReadRangeSet(ranges)
-    ctx = sha1()
-
-    for p in data:
-      ctx.update(p)
-
-    return ctx.hexdigest()
-
-  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
-    """Return the hash value for all zero blocks."""
-    zero_block = '\x00' * 4096
-    ctx = sha1()
-    for _ in range(num_blocks):
-      ctx.update(zero_block)
-
-    return ctx.hexdigest()
-
-
-def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
-                      info_dict=None):
-  """Generates the recovery-from-boot patch and writes the script to output.
-
-  Most of the space in the boot and recovery images is just the kernel, which is
-  identical for the two, so the resulting patch should be efficient. Add it to
-  the output zip, along with a shell script that is run from init.rc on first
-  boot to actually do the patching and install the new recovery image.
-
-  Args:
-    input_dir: The top-level input directory of the target-files.zip.
-    output_sink: The callback function that writes the result.
-    recovery_img: File object for the recovery image.
-    boot_img: File objects for the boot image.
-    info_dict: A dict returned by common.LoadInfoDict() on the input
-        target_files. Will use OPTIONS.info_dict if None has been given.
-  """
-  if info_dict is None:
-    info_dict = OPTIONS.info_dict
-
-  full_recovery_image = info_dict.get("full_recovery_image") == "true"
-  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
-
-  if board_uses_vendorimage:
-    # In this case, the output sink is rooted at VENDOR
-    recovery_img_path = "etc/recovery.img"
-    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
-    sh_dir = "bin"
-  else:
-    # In this case the output sink is rooted at SYSTEM
-    recovery_img_path = "vendor/etc/recovery.img"
-    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
-    sh_dir = "vendor/bin"
-
-  if full_recovery_image:
-    output_sink(recovery_img_path, recovery_img.data)
-
-  else:
-    system_root_image = info_dict.get("system_root_image") == "true"
-    include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
-    include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
-    path = os.path.join(input_dir, recovery_resource_dat_path)
-    # With system-root-image, boot and recovery images will have mismatching
-    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
-    # to handle such a case.
-    if system_root_image or include_recovery_dtbo or include_recovery_acpio:
-      diff_program = ["bsdiff"]
-      bonus_args = ""
-      assert not os.path.exists(path)
-    else:
-      diff_program = ["imgdiff"]
-      if os.path.exists(path):
-        diff_program.append("-b")
-        diff_program.append(path)
-        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
-      else:
-        bonus_args = ""
-
-    d = Difference(recovery_img, boot_img, diff_program=diff_program)
-    _, _, patch = d.ComputePatch()
-    output_sink("recovery-from-boot.p", patch)
-
-  try:
-    # The following GetTypeAndDevice()s need to use the path in the target
-    # info_dict instead of source_info_dict.
-    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
-                                              check_no_slot=False)
-    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
-                                                      check_no_slot=False)
-  except KeyError:
-    return
-
-  if full_recovery_image:
-
-    # Note that we use /vendor to refer to the recovery resources. This will
-    # work for a separate vendor partition mounted at /vendor or a
-    # /system/vendor subdirectory on the system partition, for which init will
-    # create a symlink from /vendor to /system/vendor.
-
-    sh = """#!/vendor/bin/sh
-if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
-  applypatch \\
-          --flash /vendor/etc/recovery.img \\
-          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
-      log -t recovery "Installing new recovery image: succeeded" || \\
-      log -t recovery "Installing new recovery image: failed"
-else
-  log -t recovery "Recovery image already installed"
-fi
-""" % {'type': recovery_type,
-       'device': recovery_device,
-       'sha1': recovery_img.sha1,
-       'size': recovery_img.size}
-  else:
-    sh = """#!/vendor/bin/sh
-if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
-  applypatch %(bonus_args)s \\
-          --patch /vendor/recovery-from-boot.p \\
-          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
-          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
-      log -t recovery "Installing new recovery image: succeeded" || \\
-      log -t recovery "Installing new recovery image: failed"
-else
-  log -t recovery "Recovery image already installed"
-fi
-""" % {'boot_size': boot_img.size,
-       'boot_sha1': boot_img.sha1,
-       'recovery_size': recovery_img.size,
-       'recovery_sha1': recovery_img.sha1,
-       'boot_type': boot_type,
-       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
-       'recovery_type': recovery_type,
-       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
-       'bonus_args': bonus_args}
-
-  # The install script location moved from /system/etc to /system/bin in the L
-  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
-  sh_location = os.path.join(sh_dir, "install-recovery.sh")
-
-  logger.info("putting script in %s", sh_location)
-
-  output_sink(sh_location, sh.encode())
-
-
-class DynamicPartitionUpdate(object):
-  def __init__(self, src_group=None, tgt_group=None, progress=None,
-               block_difference=None):
-    self.src_group = src_group
-    self.tgt_group = tgt_group
-    self.progress = progress
-    self.block_difference = block_difference
-
-  @property
-  def src_size(self):
-    if not self.block_difference:
-      return 0
-    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
-
-  @property
-  def tgt_size(self):
-    if not self.block_difference:
-      return 0
-    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
-
-  @staticmethod
-  def _GetSparseImageSize(img):
-    if not img:
-      return 0
-    return img.blocksize * img.total_blocks
-
-
-class DynamicGroupUpdate(object):
-  def __init__(self, src_size=None, tgt_size=None):
-    # None: group does not exist. 0: no size limits.
-    self.src_size = src_size
-    self.tgt_size = tgt_size
-
-
-class DynamicPartitionsDifference(object):
-  def __init__(self, info_dict, block_diffs, progress_dict=None,
-               source_info_dict=None):
-    if progress_dict is None:
-      progress_dict = {}
-
-    self._remove_all_before_apply = False
-    if source_info_dict is None:
-      self._remove_all_before_apply = True
-      source_info_dict = {}
-
-    block_diff_dict = collections.OrderedDict(
-        [(e.partition, e) for e in block_diffs])
-
-    assert len(block_diff_dict) == len(block_diffs), \
-        "Duplicated BlockDifference object for {}".format(
-            [partition for partition, count in
-             collections.Counter(e.partition for e in block_diffs).items()
-             if count > 1])
-
-    self._partition_updates = collections.OrderedDict()
-
-    for p, block_diff in block_diff_dict.items():
-      self._partition_updates[p] = DynamicPartitionUpdate()
-      self._partition_updates[p].block_difference = block_diff
-
-    for p, progress in progress_dict.items():
-      if p in self._partition_updates:
-        self._partition_updates[p].progress = progress
-
-    tgt_groups = shlex.split(info_dict.get(
-        "super_partition_groups", "").strip())
-    src_groups = shlex.split(source_info_dict.get(
-        "super_partition_groups", "").strip())
-
-    for g in tgt_groups:
-      for p in shlex.split(info_dict.get(
-              "super_%s_partition_list" % g, "").strip()):
-        assert p in self._partition_updates, \
-            "{} is in target super_{}_partition_list but no BlockDifference " \
-            "object is provided.".format(p, g)
-        self._partition_updates[p].tgt_group = g
-
-    for g in src_groups:
-      for p in shlex.split(source_info_dict.get(
-              "super_%s_partition_list" % g, "").strip()):
-        assert p in self._partition_updates, \
-            "{} is in source super_{}_partition_list but no BlockDifference " \
-            "object is provided.".format(p, g)
-        self._partition_updates[p].src_group = g
-
-    target_dynamic_partitions = set(shlex.split(info_dict.get(
-        "dynamic_partition_list", "").strip()))
-    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
-                                  if u.tgt_size)
-    assert block_diffs_with_target == target_dynamic_partitions, \
-        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
-            list(target_dynamic_partitions), list(block_diffs_with_target))
-
-    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
-        "dynamic_partition_list", "").strip()))
-    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
-                                  if u.src_size)
-    assert block_diffs_with_source == source_dynamic_partitions, \
-        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
-            list(source_dynamic_partitions), list(block_diffs_with_source))
-
-    if self._partition_updates:
-      logger.info("Updating dynamic partitions %s",
-                  self._partition_updates.keys())
-
-    self._group_updates = collections.OrderedDict()
-
-    for g in tgt_groups:
-      self._group_updates[g] = DynamicGroupUpdate()
-      self._group_updates[g].tgt_size = int(info_dict.get(
-          "super_%s_group_size" % g, "0").strip())
-
-    for g in src_groups:
-      if g not in self._group_updates:
-        self._group_updates[g] = DynamicGroupUpdate()
-      self._group_updates[g].src_size = int(source_info_dict.get(
-          "super_%s_group_size" % g, "0").strip())
-
-    self._Compute()
-
-  def WriteScript(self, script, output_zip, write_verify_script=False):
-    script.Comment('--- Start patching dynamic partitions ---')
-    for p, u in self._partition_updates.items():
-      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
-        script.Comment('Patch partition %s' % p)
-        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
-                                       write_verify_script=False)
-
-    op_list_path = MakeTempFile()
-    with open(op_list_path, 'w') as f:
-      for line in self._op_list:
-        f.write('{}\n'.format(line))
-
-    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
-
-    script.Comment('Update dynamic partition metadata')
-    script.AppendExtra('assert(update_dynamic_partitions('
-                       'package_extract_file("dynamic_partitions_op_list")));')
-
-    if write_verify_script:
-      for p, u in self._partition_updates.items():
-        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
-          u.block_difference.WritePostInstallVerifyScript(script)
-          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
-
-    for p, u in self._partition_updates.items():
-      if u.tgt_size and u.src_size <= u.tgt_size:
-        script.Comment('Patch partition %s' % p)
-        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
-                                       write_verify_script=write_verify_script)
-        if write_verify_script:
-          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
-
-    script.Comment('--- End patching dynamic partitions ---')
-
-  def _Compute(self):
-    self._op_list = list()
-
-    def append(line):
-      self._op_list.append(line)
-
-    def comment(line):
-      self._op_list.append("# %s" % line)
-
-    if self._remove_all_before_apply:
-      comment('Remove all existing dynamic partitions and groups before '
-              'applying full OTA')
-      append('remove_all_groups')
-
-    for p, u in self._partition_updates.items():
-      if u.src_group and not u.tgt_group:
-        append('remove %s' % p)
-
-    for p, u in self._partition_updates.items():
-      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
-        comment('Move partition %s from %s to default' % (p, u.src_group))
-        append('move %s default' % p)
-
-    for p, u in self._partition_updates.items():
-      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
-        comment('Shrink partition %s from %d to %d' %
-                (p, u.src_size, u.tgt_size))
-        append('resize %s %s' % (p, u.tgt_size))
-
-    for g, u in self._group_updates.items():
-      if u.src_size is not None and u.tgt_size is None:
-        append('remove_group %s' % g)
-      if (u.src_size is not None and u.tgt_size is not None and
-              u.src_size > u.tgt_size):
-        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
-        append('resize_group %s %d' % (g, u.tgt_size))
-
-    for g, u in self._group_updates.items():
-      if u.src_size is None and u.tgt_size is not None:
-        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
-        append('add_group %s %d' % (g, u.tgt_size))
-      if (u.src_size is not None and u.tgt_size is not None and
-              u.src_size < u.tgt_size):
-        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
-        append('resize_group %s %d' % (g, u.tgt_size))
-
-    for p, u in self._partition_updates.items():
-      if u.tgt_group and not u.src_group:
-        comment('Add partition %s to group %s' % (p, u.tgt_group))
-        append('add %s %s' % (p, u.tgt_group))
-
-    for p, u in self._partition_updates.items():
-      if u.tgt_size and u.src_size < u.tgt_size:
-        comment('Grow partition %s from %d to %d' %
-                (p, u.src_size, u.tgt_size))
-        append('resize %s %d' % (p, u.tgt_size))
-
-    for p, u in self._partition_updates.items():
-      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
-        comment('Move partition %s from default to %s' %
-                (p, u.tgt_group))
-        append('move %s %s' % (p, u.tgt_group))
-
-
-def GetTypeAndDevice(mount_point, info, check_no_slot=True):
-  """
-  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
-  backwards compatibility. It aborts if the fstab entry has slotselect option
-  (unless check_no_slot is explicitly set to False).
-  """
-  fstab = info["fstab"]
-  if fstab:
-    if check_no_slot:
-      assert not fstab[mount_point].slotselect, \
-          "Use GetTypeAndDeviceExpr instead"
-    return (PARTITION_TYPES[fstab[mount_point].fs_type],
-            fstab[mount_point].device)
-  raise KeyError
-
-
-def GetTypeAndDeviceExpr(mount_point, info):
-  """
-  Return the filesystem of the partition, and an edify expression that evaluates
-  to the device at runtime.
-  """
-  fstab = info["fstab"]
-  if fstab:
-    p = fstab[mount_point]
-    device_expr = '"%s"' % fstab[mount_point].device
-    if p.slotselect:
-      device_expr = 'add_slot_suffix(%s)' % device_expr
-    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
-  raise KeyError
diff --git a/tools/releasetools/ota_from_raw_img.py b/tools/releasetools/ota_from_raw_img.py
index 0c1c05a..c186940 100644
--- a/tools/releasetools/ota_from_raw_img.py
+++ b/tools/releasetools/ota_from_raw_img.py
@@ -68,6 +68,11 @@
   if args.verbose:
     logger.setLevel(logging.INFO)
   logger.info(args)
+  old_imgs = [""] * len(args.images)
+  for (i, img) in enumerate(args.images):
+    if ":" in img:
+      old_imgs[i], args.images[i] = img.split(":", maxsplit=1)
+
   if not args.partition_names:
     args.partition_names = [os.path.os.path.splitext(os.path.basename(path))[
         0] for path in args.images]
@@ -79,6 +84,7 @@
     cmd.append("--partition_names=" + ",".join(args.partition_names))
     cmd.append("--dynamic_partition_info_file=" +
                dynamic_partition_info_file.name)
+    cmd.append("--old_partitions=" + ",".join(old_imgs))
     cmd.append("--new_partitions=" + ",".join(args.images))
     cmd.append("--out_file=" + unsigned_payload.name)
     cmd.append("--is_partial_update")
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index de0e187..fa4ed09 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -265,7 +265,6 @@
 import os
 import os.path
 import re
-import shlex
 import shutil
 import subprocess
 import sys
@@ -274,6 +273,7 @@
 import care_map_pb2
 import common
 import ota_utils
+import payload_signer
 from ota_utils import (VABC_COMPRESSION_PARAM_SUPPORT, FinalizeMetadata, GetPackageMetadata,
                        PayloadGenerator, SECURITY_PATCH_LEVEL_PROP_NAME, ExtractTargetFiles, CopyTargetFilesDir)
 from common import DoesInputFileContain, IsSparseImage
@@ -308,9 +308,6 @@
 OPTIONS.cache_size = None
 OPTIONS.stash_threshold = 0.8
 OPTIONS.log_diff = None
-OPTIONS.payload_signer = None
-OPTIONS.payload_signer_args = []
-OPTIONS.payload_signer_maximum_signature_size = None
 OPTIONS.extracted_input = None
 OPTIONS.skip_postinstall = False
 OPTIONS.skip_compatibility_check = False
@@ -1125,9 +1122,7 @@
 def main(argv):
 
   def option_handler(o, a):
-    if o in ("-k", "--package_key"):
-      OPTIONS.package_key = a
-    elif o in ("-i", "--incremental_from"):
+    if o in ("-i", "--incremental_from"):
       OPTIONS.incremental_source = a
     elif o == "--full_radio":
       OPTIONS.full_radio = True
@@ -1172,17 +1167,6 @@
                          "a float" % (a, o))
     elif o == "--log_diff":
       OPTIONS.log_diff = a
-    elif o == "--payload_signer":
-      OPTIONS.payload_signer = a
-    elif o == "--payload_signer_args":
-      OPTIONS.payload_signer_args = shlex.split(a)
-    elif o == "--payload_signer_maximum_signature_size":
-      OPTIONS.payload_signer_maximum_signature_size = a
-    elif o == "--payload_signer_key_size":
-      # TODO(Xunchang) remove this option after cleaning up the callers.
-      logger.warning("The option '--payload_signer_key_size' is deprecated."
-                     " Use '--payload_signer_maximum_signature_size' instead.")
-      OPTIONS.payload_signer_maximum_signature_size = a
     elif o == "--extracted_input_target_files":
       OPTIONS.extracted_input = a
     elif o == "--skip_postinstall":
@@ -1258,7 +1242,6 @@
   args = common.ParseOptions(argv, __doc__,
                              extra_opts="b:k:i:d:e:t:2o:",
                              extra_long_opts=[
-                                 "package_key=",
                                  "incremental_from=",
                                  "full_radio",
                                  "full_bootloader",
@@ -1277,10 +1260,6 @@
                                  "verify",
                                  "stash_threshold=",
                                  "log_diff=",
-                                 "payload_signer=",
-                                 "payload_signer_args=",
-                                 "payload_signer_maximum_signature_size=",
-                                 "payload_signer_key_size=",
                                  "extracted_input_target_files=",
                                  "skip_postinstall",
                                  "retrofit_dynamic_partitions",
@@ -1304,7 +1283,7 @@
                                  "vabc_compression_param=",
                                  "security_patch_level=",
                                  "max_threads=",
-                             ], extra_option_handler=option_handler)
+                             ], extra_option_handler=[option_handler, payload_signer.signer_options])
   common.InitLogging()
 
   if len(args) != 2:
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 6ca9d64..0a6ff39 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -37,7 +37,6 @@
 OPTIONS.wipe_user_data = False
 OPTIONS.downgrade = False
 OPTIONS.key_passwords = {}
-OPTIONS.package_key = None
 OPTIONS.incremental_source = None
 OPTIONS.retrofit_dynamic_partitions = False
 OPTIONS.output_metadata_path = None
diff --git a/tools/releasetools/payload_signer.py b/tools/releasetools/payload_signer.py
index bbd2896..a5d09e1 100644
--- a/tools/releasetools/payload_signer.py
+++ b/tools/releasetools/payload_signer.py
@@ -16,10 +16,51 @@
 
 import common
 import logging
-from common import OPTIONS
+import shlex
+from common import OPTIONS, OptionHandler
 
 logger = logging.getLogger(__name__)
 
+OPTIONS.payload_signer = None
+OPTIONS.payload_signer_args = []
+OPTIONS.payload_signer_maximum_signature_size = None
+OPTIONS.package_key = None
+
+
+class SignerOptions(OptionHandler):
+
+  @staticmethod
+  def ParseOptions(o, a):
+    if o in ("-k", "--package_key"):
+      OPTIONS.package_key = a
+    elif o == "--payload_signer":
+      OPTIONS.payload_signer = a
+    elif o == "--payload_signer_args":
+      OPTIONS.payload_signer_args = shlex.split(a)
+    elif o == "--payload_signer_maximum_signature_size":
+      OPTIONS.payload_signer_maximum_signature_size = a
+    elif o == "--payload_signer_key_size":
+      # TODO(xunchang) remove this option after cleaning up the callers.
+      logger.warning("The option '--payload_signer_key_size' is deprecated."
+                      " Use '--payload_signer_maximum_signature_size' instead.")
+      OPTIONS.payload_signer_maximum_signature_size = a
+    else:
+      return False
+    return True
+
+  def __init__(self):
+    super().__init__(
+      ["payload_signer=",
+       "package_key=",
+       "payload_signer_args=",
+       "payload_signer_maximum_signature_size=",
+       "payload_signer_key_size="],
+       SignerOptions.ParseOptions
+    )
+
+
+signer_options = SignerOptions()
+
 
 class PayloadSigner(object):
   """A class that wraps the payload signing works.
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 8052821..14f0e88 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -26,6 +26,7 @@
 import common
 import test_utils
 import validate_target_files
+from images import EmptyImage, DataImage
 from rangelib import RangeSet
 
 
@@ -1670,6 +1671,292 @@
                       test_file.name, 'generic_kernel')
 
 
+class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
+  """Checks the format of install-recovery.sh.
+
+  Its format should match between common.py and validate_target_files.py.
+  """
+
+  def setUp(self):
+    self._tempdir = common.MakeTempDir()
+    # Create a fake dict that contains the fstab info for boot&recovery.
+    self._info = {"fstab": {}}
+    fake_fstab = [
+        "/dev/soc.0/by-name/boot /boot emmc defaults defaults",
+        "/dev/soc.0/by-name/recovery /recovery emmc defaults defaults"]
+    self._info["fstab"] = common.LoadRecoveryFSTab("\n".join, 2, fake_fstab)
+    # Construct the gzipped recovery.img and boot.img
+    self.recovery_data = bytearray([
+        0x1f, 0x8b, 0x08, 0x00, 0x81, 0x11, 0x02, 0x5a, 0x00, 0x03, 0x2b, 0x4a,
+        0x4d, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0x04, 0x00, 0xc9, 0x93, 0x43, 0xf3,
+        0x08, 0x00, 0x00, 0x00
+    ])
+    # echo -n "boot" | gzip -f | hd
+    self.boot_data = bytearray([
+        0x1f, 0x8b, 0x08, 0x00, 0x8c, 0x12, 0x02, 0x5a, 0x00, 0x03, 0x4b, 0xca,
+        0xcf, 0x2f, 0x01, 0x00, 0xc4, 0xae, 0xed, 0x46, 0x04, 0x00, 0x00, 0x00
+    ])
+
+  def _out_tmp_sink(self, name, data, prefix="SYSTEM"):
+    loc = os.path.join(self._tempdir, prefix, name)
+    if not os.path.exists(os.path.dirname(loc)):
+      os.makedirs(os.path.dirname(loc))
+    with open(loc, "wb") as f:
+      f.write(data)
+
+  def test_full_recovery(self):
+    recovery_image = common.File("recovery.img", self.recovery_data)
+    boot_image = common.File("boot.img", self.boot_data)
+    self._info["full_recovery_image"] = "true"
+
+    common.MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
+                             recovery_image, boot_image, self._info)
+    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
+                                                        self._info)
+
+  @test_utils.SkipIfExternalToolsUnavailable()
+  def test_recovery_from_boot(self):
+    recovery_image = common.File("recovery.img", self.recovery_data)
+    self._out_tmp_sink("recovery.img", recovery_image.data, "IMAGES")
+    boot_image = common.File("boot.img", self.boot_data)
+    self._out_tmp_sink("boot.img", boot_image.data, "IMAGES")
+
+    common.MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
+                             recovery_image, boot_image, self._info)
+    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
+                                                        self._info)
+    # Validate 'recovery-from-boot' with bonus argument.
+    self._out_tmp_sink("etc/recovery-resource.dat", b"bonus", "SYSTEM")
+    common.MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
+                             recovery_image, boot_image, self._info)
+    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
+                                                        self._info)
+
+
+class MockBlockDifference(object):
+
+  def __init__(self, partition, tgt, src=None):
+    self.partition = partition
+    self.tgt = tgt
+    self.src = src
+
+  def WriteScript(self, script, _, progress=None,
+                  write_verify_script=False):
+    if progress:
+      script.AppendExtra("progress({})".format(progress))
+    script.AppendExtra("patch({});".format(self.partition))
+    if write_verify_script:
+      self.WritePostInstallVerifyScript(script)
+
+  def WritePostInstallVerifyScript(self, script):
+    script.AppendExtra("verify({});".format(self.partition))
+
+
+class FakeSparseImage(object):
+
+  def __init__(self, size):
+    self.blocksize = 4096
+    self.total_blocks = size // 4096
+    assert size % 4096 == 0, "{} is not a multiple of 4096".format(size)
+
+
+class DynamicPartitionsDifferenceTest(test_utils.ReleaseToolsTestCase):
+
+  @staticmethod
+  def get_op_list(output_path):
+    with zipfile.ZipFile(output_path, allowZip64=True) as output_zip:
+      with output_zip.open('dynamic_partitions_op_list') as op_list:
+        return [line.decode().strip() for line in op_list.readlines()
+                if not line.startswith(b'#')]
+
+  def setUp(self):
+    self.script = test_utils.MockScriptWriter()
+    self.output_path = common.MakeTempFile(suffix='.zip')
+
+  def test_full(self):
+    target_info = common.LoadDictionaryFromLines("""
+dynamic_partition_list=system vendor
+super_partition_groups=group_foo
+super_group_foo_group_size={group_size}
+super_group_foo_partition_list=system vendor
+""".format(group_size=4 * GiB).split("\n"))
+    block_diffs = [MockBlockDifference("system", FakeSparseImage(3 * GiB)),
+                   MockBlockDifference("vendor", FakeSparseImage(1 * GiB))]
+
+    dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs)
+    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+    self.assertEqual(str(self.script).strip(), """
+assert(update_dynamic_partitions(package_extract_file("dynamic_partitions_op_list")));
+patch(system);
+verify(system);
+unmap_partition("system");
+patch(vendor);
+verify(vendor);
+unmap_partition("vendor");
+""".strip())
+
+    lines = self.get_op_list(self.output_path)
+
+    remove_all_groups = lines.index("remove_all_groups")
+    add_group = lines.index("add_group group_foo 4294967296")
+    add_vendor = lines.index("add vendor group_foo")
+    add_system = lines.index("add system group_foo")
+    resize_vendor = lines.index("resize vendor 1073741824")
+    resize_system = lines.index("resize system 3221225472")
+
+    self.assertLess(remove_all_groups, add_group,
+                    "Should add groups after removing all groups")
+    self.assertLess(add_group, min(add_vendor, add_system),
+                    "Should add partitions after adding group")
+    self.assertLess(add_system, resize_system,
+                    "Should resize system after adding it")
+    self.assertLess(add_vendor, resize_vendor,
+                    "Should resize vendor after adding it")
+
+  def test_inc_groups(self):
+    source_info = common.LoadDictionaryFromLines("""
+super_partition_groups=group_foo group_bar group_baz
+super_group_foo_group_size={group_foo_size}
+super_group_bar_group_size={group_bar_size}
+""".format(group_foo_size=4 * GiB, group_bar_size=3 * GiB).split("\n"))
+    target_info = common.LoadDictionaryFromLines("""
+super_partition_groups=group_foo group_baz group_qux
+super_group_foo_group_size={group_foo_size}
+super_group_baz_group_size={group_baz_size}
+super_group_qux_group_size={group_qux_size}
+""".format(group_foo_size=3 * GiB, group_baz_size=4 * GiB,
+           group_qux_size=1 * GiB).split("\n"))
+
+    dp_diff = common.DynamicPartitionsDifference(target_info,
+                                                 block_diffs=[],
+                                                 source_info_dict=source_info)
+    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+    lines = self.get_op_list(self.output_path)
+
+    removed = lines.index("remove_group group_bar")
+    shrunk = lines.index("resize_group group_foo 3221225472")
+    grown = lines.index("resize_group group_baz 4294967296")
+    added = lines.index("add_group group_qux 1073741824")
+
+    self.assertLess(max(removed, shrunk),
+                    min(grown, added),
+                    "ops that remove / shrink partitions must precede ops that "
+                    "grow / add partitions")
+
+  def test_incremental(self):
+    source_info = common.LoadDictionaryFromLines("""
+dynamic_partition_list=system vendor product system_ext
+super_partition_groups=group_foo
+super_group_foo_group_size={group_foo_size}
+super_group_foo_partition_list=system vendor product system_ext
+""".format(group_foo_size=4 * GiB).split("\n"))
+    target_info = common.LoadDictionaryFromLines("""
+dynamic_partition_list=system vendor product odm
+super_partition_groups=group_foo group_bar
+super_group_foo_group_size={group_foo_size}
+super_group_foo_partition_list=system vendor odm
+super_group_bar_group_size={group_bar_size}
+super_group_bar_partition_list=product
+""".format(group_foo_size=3 * GiB, group_bar_size=1 * GiB).split("\n"))
+
+    block_diffs = [MockBlockDifference("system", FakeSparseImage(1536 * MiB),
+                                       src=FakeSparseImage(1024 * MiB)),
+                   MockBlockDifference("vendor", FakeSparseImage(512 * MiB),
+                                       src=FakeSparseImage(1024 * MiB)),
+                   MockBlockDifference("product", FakeSparseImage(1024 * MiB),
+                                       src=FakeSparseImage(1024 * MiB)),
+                   MockBlockDifference("system_ext", None,
+                                       src=FakeSparseImage(1024 * MiB)),
+                   MockBlockDifference("odm", FakeSparseImage(1024 * MiB),
+                                       src=None)]
+
+    dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs,
+                                                 source_info_dict=source_info)
+    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+    metadata_idx = self.script.lines.index(
+        'assert(update_dynamic_partitions(package_extract_file('
+        '"dynamic_partitions_op_list")));')
+    self.assertLess(self.script.lines.index('patch(vendor);'), metadata_idx)
+    self.assertLess(metadata_idx, self.script.lines.index('verify(vendor);'))
+    for p in ("product", "system", "odm"):
+      patch_idx = self.script.lines.index("patch({});".format(p))
+      verify_idx = self.script.lines.index("verify({});".format(p))
+      self.assertLess(metadata_idx, patch_idx,
+                      "Should patch {} after updating metadata".format(p))
+      self.assertLess(patch_idx, verify_idx,
+                      "Should verify {} after patching".format(p))
+
+    self.assertNotIn("patch(system_ext);", self.script.lines)
+
+    lines = self.get_op_list(self.output_path)
+
+    remove = lines.index("remove system_ext")
+    move_product_out = lines.index("move product default")
+    shrink = lines.index("resize vendor 536870912")
+    shrink_group = lines.index("resize_group group_foo 3221225472")
+    add_group_bar = lines.index("add_group group_bar 1073741824")
+    add_odm = lines.index("add odm group_foo")
+    grow_existing = lines.index("resize system 1610612736")
+    grow_added = lines.index("resize odm 1073741824")
+    move_product_in = lines.index("move product group_bar")
+
+    max_idx_move_partition_out_foo = max(remove, move_product_out, shrink)
+    min_idx_move_partition_in_foo = min(add_odm, grow_existing, grow_added)
+
+    self.assertLess(max_idx_move_partition_out_foo, shrink_group,
+                    "Must shrink group after partitions inside group are shrunk"
+                    " / removed")
+
+    self.assertLess(add_group_bar, move_product_in,
+                    "Must add partitions to group after group is added")
+
+    self.assertLess(max_idx_move_partition_out_foo,
+                    min_idx_move_partition_in_foo,
+                    "Must shrink partitions / remove partitions from group"
+                    "before adding / moving partitions into group")
+
+  def test_remove_partition(self):
+    source_info = common.LoadDictionaryFromLines("""
+blockimgdiff_versions=3,4
+use_dynamic_partitions=true
+dynamic_partition_list=foo
+super_partition_groups=group_foo
+super_group_foo_group_size={group_foo_size}
+super_group_foo_partition_list=foo
+""".format(group_foo_size=4 * GiB).split("\n"))
+    target_info = common.LoadDictionaryFromLines("""
+blockimgdiff_versions=3,4
+use_dynamic_partitions=true
+super_partition_groups=group_foo
+super_group_foo_group_size={group_foo_size}
+""".format(group_foo_size=4 * GiB).split("\n"))
+
+    common.OPTIONS.info_dict = target_info
+    common.OPTIONS.target_info_dict = target_info
+    common.OPTIONS.source_info_dict = source_info
+    common.OPTIONS.cache_size = 4 * 4096
+
+    block_diffs = [common.BlockDifference("foo", EmptyImage(),
+                                          src=DataImage("source", pad=True))]
+
+    dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs,
+                                                 source_info_dict=source_info)
+    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
+      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
+
+    self.assertNotIn("block_image_update", str(self.script),
+                     "Removed partition should not be patched.")
+
+    lines = self.get_op_list(self.output_path)
+    self.assertEqual(lines, ["remove foo"])
+
+
 class PartitionBuildPropsTest(test_utils.ReleaseToolsTestCase):
   def setUp(self):
     self.odm_build_prop = [
diff --git a/tools/releasetools/test_non_ab_ota.py b/tools/releasetools/test_non_ab_ota.py
index 7a5ccd3..5207e2f 100644
--- a/tools/releasetools/test_non_ab_ota.py
+++ b/tools/releasetools/test_non_ab_ota.py
@@ -15,24 +15,19 @@
 #
 
 import copy
-import os
 import zipfile
 
 import common
 import test_utils
-import validate_target_files
 
-from images import EmptyImage, DataImage
-from non_ab_ota import NonAbOtaPropertyFiles, WriteFingerprintAssertion, BlockDifference, DynamicPartitionsDifference, MakeRecoveryPatch
+from non_ab_ota import NonAbOtaPropertyFiles, WriteFingerprintAssertion
 from test_utils import PropertyFilesTestCase
 
 
 class NonAbOtaPropertyFilesTest(PropertyFilesTestCase):
   """Additional validity checks specialized for NonAbOtaPropertyFiles."""
-
   def setUp(self):
-    common.OPTIONS.no_signing = False
-
+     common.OPTIONS.no_signing = False
   def test_init(self):
     property_files = NonAbOtaPropertyFiles()
     self.assertEqual('ota-property-files', property_files.name)
@@ -60,8 +55,7 @@
     with zipfile.ZipFile(zip_file) as zip_fp:
       raw_metadata = property_files.GetPropertyFilesString(
           zip_fp, reserve_space=False)
-      property_files_string = property_files.Finalize(
-          zip_fp, len(raw_metadata))
+      property_files_string = property_files.Finalize(zip_fp, len(raw_metadata))
     tokens = self._parse_property_files_string(property_files_string)
 
     self.assertEqual(2, len(tokens))
@@ -83,7 +77,6 @@
 
       property_files.Verify(zip_fp, raw_metadata)
 
-
 class NonAbOTATest(test_utils.ReleaseToolsTestCase):
   TEST_TARGET_INFO_DICT = {
       'build.prop': common.PartitionBuildProps.FromDictionary(
@@ -105,7 +98,7 @@
       ),
       'vendor.build.prop': common.PartitionBuildProps.FromDictionary(
           'vendor', {
-              'ro.vendor.build.fingerprint': 'vendor-build-fingerprint'}
+               'ro.vendor.build.fingerprint': 'vendor-build-fingerprint'}
       ),
       'property1': 'value1',
       'property2': 4096,
@@ -125,7 +118,6 @@
           'ro.product.device': 'device3',
       },
   ]
-
   def test_WriteFingerprintAssertion_without_oem_props(self):
     target_info = common.BuildInfo(self.TEST_TARGET_INFO_DICT, None)
     source_info_dict = copy.deepcopy(self.TEST_TARGET_INFO_DICT)
@@ -178,296 +170,3 @@
         [('AssertSomeThumbprint', 'build-thumbprint',
           'source-build-thumbprint')],
         script_writer.lines)
-
-
-KiB = 1024
-MiB = 1024 * KiB
-GiB = 1024 * MiB
-
-
-class MockBlockDifference(object):
-
-  def __init__(self, partition, tgt, src=None):
-    self.partition = partition
-    self.tgt = tgt
-    self.src = src
-
-  def WriteScript(self, script, _, progress=None,
-                  write_verify_script=False):
-    if progress:
-      script.AppendExtra("progress({})".format(progress))
-    script.AppendExtra("patch({});".format(self.partition))
-    if write_verify_script:
-      self.WritePostInstallVerifyScript(script)
-
-  def WritePostInstallVerifyScript(self, script):
-    script.AppendExtra("verify({});".format(self.partition))
-
-
-class FakeSparseImage(object):
-
-  def __init__(self, size):
-    self.blocksize = 4096
-    self.total_blocks = size // 4096
-    assert size % 4096 == 0, "{} is not a multiple of 4096".format(size)
-
-
-class DynamicPartitionsDifferenceTest(test_utils.ReleaseToolsTestCase):
-
-  @staticmethod
-  def get_op_list(output_path):
-    with zipfile.ZipFile(output_path, allowZip64=True) as output_zip:
-      with output_zip.open('dynamic_partitions_op_list') as op_list:
-        return [line.decode().strip() for line in op_list.readlines()
-                if not line.startswith(b'#')]
-
-  def setUp(self):
-    self.script = test_utils.MockScriptWriter()
-    self.output_path = common.MakeTempFile(suffix='.zip')
-
-  def test_full(self):
-    target_info = common.LoadDictionaryFromLines("""
-dynamic_partition_list=system vendor
-super_partition_groups=group_foo
-super_group_foo_group_size={group_size}
-super_group_foo_partition_list=system vendor
-""".format(group_size=4 * GiB).split("\n"))
-    block_diffs = [MockBlockDifference("system", FakeSparseImage(3 * GiB)),
-                   MockBlockDifference("vendor", FakeSparseImage(1 * GiB))]
-
-    dp_diff = DynamicPartitionsDifference(target_info, block_diffs)
-    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
-      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
-    self.assertEqual(str(self.script).strip(), """
-assert(update_dynamic_partitions(package_extract_file("dynamic_partitions_op_list")));
-patch(system);
-verify(system);
-unmap_partition("system");
-patch(vendor);
-verify(vendor);
-unmap_partition("vendor");
-""".strip())
-
-    lines = self.get_op_list(self.output_path)
-
-    remove_all_groups = lines.index("remove_all_groups")
-    add_group = lines.index("add_group group_foo 4294967296")
-    add_vendor = lines.index("add vendor group_foo")
-    add_system = lines.index("add system group_foo")
-    resize_vendor = lines.index("resize vendor 1073741824")
-    resize_system = lines.index("resize system 3221225472")
-
-    self.assertLess(remove_all_groups, add_group,
-                    "Should add groups after removing all groups")
-    self.assertLess(add_group, min(add_vendor, add_system),
-                    "Should add partitions after adding group")
-    self.assertLess(add_system, resize_system,
-                    "Should resize system after adding it")
-    self.assertLess(add_vendor, resize_vendor,
-                    "Should resize vendor after adding it")
-
-  def test_inc_groups(self):
-    source_info = common.LoadDictionaryFromLines("""
-super_partition_groups=group_foo group_bar group_baz
-super_group_foo_group_size={group_foo_size}
-super_group_bar_group_size={group_bar_size}
-""".format(group_foo_size=4 * GiB, group_bar_size=3 * GiB).split("\n"))
-    target_info = common.LoadDictionaryFromLines("""
-super_partition_groups=group_foo group_baz group_qux
-super_group_foo_group_size={group_foo_size}
-super_group_baz_group_size={group_baz_size}
-super_group_qux_group_size={group_qux_size}
-""".format(group_foo_size=3 * GiB, group_baz_size=4 * GiB,
-           group_qux_size=1 * GiB).split("\n"))
-
-    dp_diff = DynamicPartitionsDifference(target_info,
-                                                 block_diffs=[],
-                                                 source_info_dict=source_info)
-    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
-      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
-    lines = self.get_op_list(self.output_path)
-
-    removed = lines.index("remove_group group_bar")
-    shrunk = lines.index("resize_group group_foo 3221225472")
-    grown = lines.index("resize_group group_baz 4294967296")
-    added = lines.index("add_group group_qux 1073741824")
-
-    self.assertLess(max(removed, shrunk),
-                    min(grown, added),
-                    "ops that remove / shrink partitions must precede ops that "
-                    "grow / add partitions")
-
-  def test_incremental(self):
-    source_info = common.LoadDictionaryFromLines("""
-dynamic_partition_list=system vendor product system_ext
-super_partition_groups=group_foo
-super_group_foo_group_size={group_foo_size}
-super_group_foo_partition_list=system vendor product system_ext
-""".format(group_foo_size=4 * GiB).split("\n"))
-    target_info = common.LoadDictionaryFromLines("""
-dynamic_partition_list=system vendor product odm
-super_partition_groups=group_foo group_bar
-super_group_foo_group_size={group_foo_size}
-super_group_foo_partition_list=system vendor odm
-super_group_bar_group_size={group_bar_size}
-super_group_bar_partition_list=product
-""".format(group_foo_size=3 * GiB, group_bar_size=1 * GiB).split("\n"))
-
-    block_diffs = [MockBlockDifference("system", FakeSparseImage(1536 * MiB),
-                                       src=FakeSparseImage(1024 * MiB)),
-                   MockBlockDifference("vendor", FakeSparseImage(512 * MiB),
-                                       src=FakeSparseImage(1024 * MiB)),
-                   MockBlockDifference("product", FakeSparseImage(1024 * MiB),
-                                       src=FakeSparseImage(1024 * MiB)),
-                   MockBlockDifference("system_ext", None,
-                                       src=FakeSparseImage(1024 * MiB)),
-                   MockBlockDifference("odm", FakeSparseImage(1024 * MiB),
-                                       src=None)]
-
-    dp_diff = DynamicPartitionsDifference(target_info, block_diffs,
-                                                 source_info_dict=source_info)
-    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
-      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
-    metadata_idx = self.script.lines.index(
-        'assert(update_dynamic_partitions(package_extract_file('
-        '"dynamic_partitions_op_list")));')
-    self.assertLess(self.script.lines.index('patch(vendor);'), metadata_idx)
-    self.assertLess(metadata_idx, self.script.lines.index('verify(vendor);'))
-    for p in ("product", "system", "odm"):
-      patch_idx = self.script.lines.index("patch({});".format(p))
-      verify_idx = self.script.lines.index("verify({});".format(p))
-      self.assertLess(metadata_idx, patch_idx,
-                      "Should patch {} after updating metadata".format(p))
-      self.assertLess(patch_idx, verify_idx,
-                      "Should verify {} after patching".format(p))
-
-    self.assertNotIn("patch(system_ext);", self.script.lines)
-
-    lines = self.get_op_list(self.output_path)
-
-    remove = lines.index("remove system_ext")
-    move_product_out = lines.index("move product default")
-    shrink = lines.index("resize vendor 536870912")
-    shrink_group = lines.index("resize_group group_foo 3221225472")
-    add_group_bar = lines.index("add_group group_bar 1073741824")
-    add_odm = lines.index("add odm group_foo")
-    grow_existing = lines.index("resize system 1610612736")
-    grow_added = lines.index("resize odm 1073741824")
-    move_product_in = lines.index("move product group_bar")
-
-    max_idx_move_partition_out_foo = max(remove, move_product_out, shrink)
-    min_idx_move_partition_in_foo = min(add_odm, grow_existing, grow_added)
-
-    self.assertLess(max_idx_move_partition_out_foo, shrink_group,
-                    "Must shrink group after partitions inside group are shrunk"
-                    " / removed")
-
-    self.assertLess(add_group_bar, move_product_in,
-                    "Must add partitions to group after group is added")
-
-    self.assertLess(max_idx_move_partition_out_foo,
-                    min_idx_move_partition_in_foo,
-                    "Must shrink partitions / remove partitions from group"
-                    "before adding / moving partitions into group")
-
-  def test_remove_partition(self):
-    source_info = common.LoadDictionaryFromLines("""
-blockimgdiff_versions=3,4
-use_dynamic_partitions=true
-dynamic_partition_list=foo
-super_partition_groups=group_foo
-super_group_foo_group_size={group_foo_size}
-super_group_foo_partition_list=foo
-""".format(group_foo_size=4 * GiB).split("\n"))
-    target_info = common.LoadDictionaryFromLines("""
-blockimgdiff_versions=3,4
-use_dynamic_partitions=true
-super_partition_groups=group_foo
-super_group_foo_group_size={group_foo_size}
-""".format(group_foo_size=4 * GiB).split("\n"))
-
-    common.OPTIONS.info_dict = target_info
-    common.OPTIONS.target_info_dict = target_info
-    common.OPTIONS.source_info_dict = source_info
-    common.OPTIONS.cache_size = 4 * 4096
-
-    block_diffs = [BlockDifference("foo", EmptyImage(),
-                                   src=DataImage("source", pad=True))]
-
-    dp_diff = DynamicPartitionsDifference(target_info, block_diffs,
-                                          source_info_dict=source_info)
-    with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
-      dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
-
-    self.assertNotIn("block_image_update", str(self.script),
-                     "Removed partition should not be patched.")
-
-    lines = self.get_op_list(self.output_path)
-    self.assertEqual(lines, ["remove foo"])
-
-
-
-class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
-  """Checks the format of install-recovery.sh.
-
-  Its format should match between common.py and validate_target_files.py.
-  """
-
-  def setUp(self):
-    self._tempdir = common.MakeTempDir()
-    # Create a fake dict that contains the fstab info for boot&recovery.
-    self._info = {"fstab": {}}
-    fake_fstab = [
-        "/dev/soc.0/by-name/boot /boot emmc defaults defaults",
-        "/dev/soc.0/by-name/recovery /recovery emmc defaults defaults"]
-    self._info["fstab"] = common.LoadRecoveryFSTab("\n".join, 2, fake_fstab)
-    # Construct the gzipped recovery.img and boot.img
-    self.recovery_data = bytearray([
-        0x1f, 0x8b, 0x08, 0x00, 0x81, 0x11, 0x02, 0x5a, 0x00, 0x03, 0x2b, 0x4a,
-        0x4d, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0x04, 0x00, 0xc9, 0x93, 0x43, 0xf3,
-        0x08, 0x00, 0x00, 0x00
-    ])
-    # echo -n "boot" | gzip -f | hd
-    self.boot_data = bytearray([
-        0x1f, 0x8b, 0x08, 0x00, 0x8c, 0x12, 0x02, 0x5a, 0x00, 0x03, 0x4b, 0xca,
-        0xcf, 0x2f, 0x01, 0x00, 0xc4, 0xae, 0xed, 0x46, 0x04, 0x00, 0x00, 0x00
-    ])
-
-  def _out_tmp_sink(self, name, data, prefix="SYSTEM"):
-    loc = os.path.join(self._tempdir, prefix, name)
-    if not os.path.exists(os.path.dirname(loc)):
-      os.makedirs(os.path.dirname(loc))
-    with open(loc, "wb") as f:
-      f.write(data)
-
-  def test_full_recovery(self):
-    recovery_image = common.File("recovery.img", self.recovery_data)
-    boot_image = common.File("boot.img", self.boot_data)
-    self._info["full_recovery_image"] = "true"
-
-    MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
-                             recovery_image, boot_image, self._info)
-    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
-                                                        self._info)
-
-  @test_utils.SkipIfExternalToolsUnavailable()
-  def test_recovery_from_boot(self):
-    recovery_image = common.File("recovery.img", self.recovery_data)
-    self._out_tmp_sink("recovery.img", recovery_image.data, "IMAGES")
-    boot_image = common.File("boot.img", self.boot_data)
-    self._out_tmp_sink("boot.img", boot_image.data, "IMAGES")
-
-    MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
-                             recovery_image, boot_image, self._info)
-    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
-                                                        self._info)
-    # Validate 'recovery-from-boot' with bonus argument.
-    self._out_tmp_sink("etc/recovery-resource.dat", b"bonus", "SYSTEM")
-    MakeRecoveryPatch(self._tempdir, self._out_tmp_sink,
-                             recovery_image, boot_image, self._info)
-    validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
-                                                        self._info)
-
diff --git a/tools/test_post_process_props.py b/tools/test_post_process_props.py
index 439fc9f..2addefc 100644
--- a/tools/test_post_process_props.py
+++ b/tools/test_post_process_props.py
@@ -255,29 +255,17 @@
     stderr_redirect = io.StringIO()
     with contextlib.redirect_stderr(stderr_redirect):
       props = PropList("hello")
-      props.put("ro.board.first_api_level","25")
+      props.put("ro.board.first_api_level","202504")
       props.put("ro.build.version.codename", "REL")
 
-      # ro.board.first_api_level must be less than or equal to the sdk version
-      self.assertFalse(validate_grf_props(props, 20))
-      self.assertTrue(validate_grf_props(props, 26))
-      self.assertTrue(validate_grf_props(props, 35))
-
       # manually set ro.board.api_level to an invalid value
-      props.put("ro.board.api_level","20")
-      self.assertFalse(validate_grf_props(props, 26))
+      props.put("ro.board.api_level","202404")
+      self.assertFalse(validate_grf_props(props))
 
       props.get_all_props()[-1].make_as_comment()
       # manually set ro.board.api_level to a valid value
-      props.put("ro.board.api_level","26")
-      self.assertTrue(validate_grf_props(props, 26))
-      # ro.board.api_level must be less than or equal to the sdk version
-      self.assertFalse(validate_grf_props(props, 25))
-
-      # allow setting future api_level before release
-      props.get_all_props()[-2].make_as_comment()
-      props.put("ro.build.version.codename", "NonRel")
-      self.assertTrue(validate_grf_props(props, 24))
+      props.put("ro.board.api_level","202504")
+      self.assertTrue(validate_grf_props(props))
 
 if __name__ == '__main__':
     unittest.main(verbosity=2)