Merge "Revert^2 "[CrashRecovery] Add module to BCP and SSCP"" into main
diff --git a/core/Makefile b/core/Makefile
index b9f7f22..44e92d8 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -287,6 +287,11 @@
 endif
 endif
 
+# Do this early because sysprop.mk depends on BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC.
+ifeq (default,$(ENABLE_UFFD_GC))
+BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC := $(OUT_DIR)/soong/dexpreopt/kernel_version_for_uffd_gc.txt
+endif # ENABLE_UFFD_GC
+
 include $(BUILD_SYSTEM)/sysprop.mk
 
 # ----------------------------------------------------------------
@@ -5256,6 +5261,34 @@
 
 endif # PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS
 
+ifeq (default,$(ENABLE_UFFD_GC))
+
+ifneq (,$(BUILT_KERNEL_VERSION_FILE))
+$(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC): $(BUILT_KERNEL_VERSION_FILE)
+$(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC):
+	cp $(BUILT_KERNEL_VERSION_FILE) $(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC)
+else
+# We make this a warning rather than an error to avoid breaking too many builds. When it happens,
+# we use a placeholder as the kernel version, which is consumed by uffd_gc_utils.py.
+$(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC):
+	echo $$'\
+Unable to determine UFFD GC flag because the kernel version is not available and\n\
+PRODUCT_ENABLE_UFFD_GC is "default".\n\
+You can fix this by:\n\
+  1. [Recommended] Making the kernel version available.\n\
+    (1). Set PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS to "true".\n\
+    (2). If you are still getting this message after doing so, see the warning about\n\
+         PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS in the build logs.\n\
+  or\n\
+  2. Explicitly setting PRODUCT_ENABLE_UFFD_GC to "true" or "false" based on the kernel version.\n\
+    (1). Set PRODUCT_ENABLE_UFFD_GC to "true" if the kernel is a GKI kernel and is android12-5.4\n\
+         or above, or a non-GKI kernel that supports userfaultfd(2) and MREMAP_DONTUNMAP.\n\
+    (2). Set PRODUCT_ENABLE_UFFD_GC to "false" otherwise.'\
+	  && echo '<unknown-kernel>' > $@
+endif # BUILT_KERNEL_VERSION_FILE
+
+endif # ENABLE_UFFD_GC == "default"
+
 # -- Check VINTF compatibility of build.
 # Skip partial builds; only check full builds. Only check if:
 # - PRODUCT_ENFORCE_VINTF_MANIFEST is true
diff --git a/core/OWNERS b/core/OWNERS
index 88f6d06..c98196a 100644
--- a/core/OWNERS
+++ b/core/OWNERS
@@ -3,7 +3,7 @@
 per-file proguard*.flags = jdduke@google.com
 
 # For version updates
-per-file version_defaults.mk = aseaton@google.com,lubomir@google.com,pscovanner@google.com,bkhalife@google.com,jainne@google.com
+per-file version_defaults.mk = ankurbakshi@google.com,bkhalife@google.com,jainne@google.com,lokeshgoel@google.com,lubomir@google.com,pscovanner@google.com
 
 # For sdk extensions version updates
 per-file version_defaults.mk = amhk@google.com,gurpreetgs@google.com,mkhokhlova@google.com,robertogil@google.com
diff --git a/core/art_config.mk b/core/art_config.mk
index f47a8e2..54bfd6b 100644
--- a/core/art_config.mk
+++ b/core/art_config.mk
@@ -12,38 +12,15 @@
 # ENABLE_UFFD_GC: Whether to use userfaultfd GC.
 
 config_enable_uffd_gc := \
-  $(firstword $(OVERRIDE_ENABLE_UFFD_GC) $(PRODUCT_ENABLE_UFFD_GC))
+  $(firstword $(OVERRIDE_ENABLE_UFFD_GC) $(PRODUCT_ENABLE_UFFD_GC) default)
 
-ifeq (,$(filter-out default,$(config_enable_uffd_gc)))
-  ENABLE_UFFD_GC := true
-
-  # Disable userfaultfd GC if the device doesn't support it (i.e., if
-  # `min(ro.board.api_level ?? ro.board.first_api_level ?? MAX_VALUE,
-  #      ro.product.first_api_level ?? ro.build.version.sdk ?? MAX_VALUE) < 31`)
-  # This logic aligns with how `ro.vendor.api_level` is calculated in
-  # `system/core/init/property_service.cpp`.
-  # We omit the check on `ro.build.version.sdk` here because we are on the latest build system.
-  board_api_level := $(firstword $(BOARD_API_LEVEL) $(BOARD_SHIPPING_API_LEVEL))
-  ifneq (,$(board_api_level))
-    ifeq (true,$(call math_lt,$(board_api_level),31))
-      ENABLE_UFFD_GC := false
-    endif
-  endif
-
-  ifneq (,$(PRODUCT_SHIPPING_API_LEVEL))
-    ifeq (true,$(call math_lt,$(PRODUCT_SHIPPING_API_LEVEL),31))
-      ENABLE_UFFD_GC := false
-    endif
-  endif
-else ifeq (true,$(config_enable_uffd_gc))
-  ENABLE_UFFD_GC := true
-else ifeq (false,$(config_enable_uffd_gc))
-  ENABLE_UFFD_GC := false
-else
+ifeq (,$(filter default true false,$(config_enable_uffd_gc)))
   $(error Unknown PRODUCT_ENABLE_UFFD_GC value: $(config_enable_uffd_gc))
 endif
 
-ADDITIONAL_PRODUCT_PROPERTIES += ro.dalvik.vm.enable_uffd_gc=$(ENABLE_UFFD_GC)
+ENABLE_UFFD_GC := $(config_enable_uffd_gc)
+# If the value is "default", it will be mangled by post_process_props.py.
+ADDITIONAL_PRODUCT_PROPERTIES += ro.dalvik.vm.enable_uffd_gc=$(config_enable_uffd_gc)
 
 # Create APEX_BOOT_JARS_EXCLUDED which is a list of jars to be removed from
 # ApexBoorJars when built from mainline prebuilts.
diff --git a/core/autogen_test_config.mk b/core/autogen_test_config.mk
index 137b118..b69f694 100644
--- a/core/autogen_test_config.mk
+++ b/core/autogen_test_config.mk
@@ -29,7 +29,7 @@
   ifeq (true,$(LOCAL_VENDOR_MODULE))
     autogen_test_install_base = /data/local/tests/vendor
   endif
-  ifeq (true,$(LOCAL_USE_VNDK))
+  ifeq (true,$(call module-in-vendor-or-product))
     autogen_test_install_base = /data/local/tests/vendor
   endif
 endif
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 44e1c7a..07e1afb 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -120,7 +120,7 @@
    $(LOCAL_VENDOR_MODULE) \
    $(LOCAL_PROPRIETARY_MODULE))
 
-include $(BUILD_SYSTEM)/local_vndk.mk
+include $(BUILD_SYSTEM)/local_vendor_product.mk
 
 # local_current_sdk needs to run before local_systemsdk because the former may override
 # LOCAL_SDK_VERSION which is used by the latter.
@@ -1095,10 +1095,10 @@
 ## When compiling against API imported module, use API import stub
 ## libraries.
 ##########################################################################
-ifneq ($(LOCAL_USE_VNDK),)
+ifneq ($(call module-in-vendor-or-product),)
   ifneq ($(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK))
     apiimport_postfix := .apiimport
-    ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+    ifeq ($(LOCAL_IN_PRODUCT),true)
       apiimport_postfix := .apiimport.product
     else
       apiimport_postfix := .apiimport.vendor
@@ -1113,7 +1113,7 @@
 ## When compiling against the VNDK, add the .vendor or .product suffix to
 ## required modules.
 ##########################################################################
-ifneq ($(LOCAL_USE_VNDK),)
+ifneq ($(call module-in-vendor-or-product),)
   #####################################################
   ## Soong modules may be built three times, once for
   ## /system, once for /vendor and once for /product.
@@ -1124,7 +1124,7 @@
     # We don't do this renaming for soong-defined modules since they already
     # have correct names (with .vendor or .product suffix when necessary) in
     # their LOCAL_*_LIBRARIES.
-    ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+    ifeq ($(LOCAL_IN_PRODUCT),true)
       my_required_modules := $(foreach l,$(my_required_modules),\
         $(if $(SPLIT_PRODUCT.SHARED_LIBRARIES.$(l)),$(l).product,$(l)))
     else
diff --git a/core/binary.mk b/core/binary.mk
index d872b66..6dab49c 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -332,10 +332,10 @@
   endif
 endif
 
-ifneq ($(LOCAL_USE_VNDK),)
+ifneq ($(call module-in-vendor-or-product),)
   my_cflags += -D__ANDROID_VNDK__
-  ifneq ($(LOCAL_USE_VNDK_VENDOR),)
-    # Vendor modules have LOCAL_USE_VNDK_VENDOR
+  ifneq ($(LOCAL_IN_VENDOR),)
+    # Vendor modules have LOCAL_IN_VENDOR
     my_cflags += -D__ANDROID_VENDOR__
 
     ifeq ($(BOARD_API_LEVEL),)
@@ -345,8 +345,8 @@
     else
       my_cflags += -D__ANDROID_VENDOR_API__=$(BOARD_API_LEVEL)
     endif
-  else ifneq ($(LOCAL_USE_VNDK_PRODUCT),)
-    # Product modules have LOCAL_USE_VNDK_PRODUCT
+  else ifneq ($(LOCAL_IN_PRODUCT),)
+    # Product modules have LOCAL_IN_PRODUCT
     my_cflags += -D__ANDROID_PRODUCT__
   endif
 endif
@@ -1174,8 +1174,8 @@
 
 apiimport_postfix := .apiimport
 
-ifneq ($(LOCAL_USE_VNDK),)
-  ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+ifneq ($(call module-in-vendor-or-product),)
+  ifeq ($(LOCAL_IN_PRODUCT),true)
     apiimport_postfix := .apiimport.product
   else
     apiimport_postfix := .apiimport.vendor
@@ -1192,14 +1192,14 @@
 ###########################################################
 ## When compiling against the VNDK, use LL-NDK libraries
 ###########################################################
-ifneq ($(LOCAL_USE_VNDK),)
+ifneq ($(call module-in-vendor-or-product),)
   #####################################################
   ## Soong modules may be built three times, once for
   ## /system, once for /vendor and once for /product.
   ## If we're using the VNDK, switch all soong
   ## libraries over to the /vendor or /product variant.
   #####################################################
-  ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+  ifeq ($(LOCAL_IN_PRODUCT),true)
     my_whole_static_libraries := $(foreach l,$(my_whole_static_libraries),\
       $(if $(SPLIT_PRODUCT.STATIC_LIBRARIES.$(l)),$(l).product,$(l)))
     my_static_libraries := $(foreach l,$(my_static_libraries),\
@@ -1226,7 +1226,7 @@
 
 # Platform can use vendor public libraries. If a required shared lib is one of
 # the vendor public libraries, the lib is switched to the stub version of the lib.
-ifeq ($(LOCAL_USE_VNDK),)
+ifeq ($(call module-in-vendor-or-product),)
   my_shared_libraries := $(foreach l,$(my_shared_libraries),\
     $(if $(filter $(l),$(VENDOR_PUBLIC_LIBRARIES)),$(l).vendorpublic,$(l)))
 endif
@@ -1278,7 +1278,7 @@
 my_link_type := native:ndk:$(my_ndk_stl_family):$(my_ndk_stl_link_type)
 my_warn_types := $(my_warn_ndk_types)
 my_allowed_types := $(my_allowed_ndk_types)
-else ifdef LOCAL_USE_VNDK
+else ifeq ($(call module-in-vendor-or-product),true)
     _name := $(patsubst %.vendor,%,$(LOCAL_MODULE))
     _name := $(patsubst %.product,%,$(LOCAL_MODULE))
     ifneq ($(filter $(_name),$(VNDK_CORE_LIBRARIES) $(VNDK_SAMEPROCESS_LIBRARIES) $(LLNDK_LIBRARIES)),)
@@ -1289,7 +1289,7 @@
         endif
         my_warn_types :=
         my_allowed_types := native:vndk native:vndk_private
-    else ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+    else ifeq ($(LOCAL_IN_PRODUCT),true)
         # Modules installed to /product cannot directly depend on modules marked
         # with vendor_available: false
         my_link_type := native:product
@@ -1592,7 +1592,7 @@
 ###########################################################
 ifndef LOCAL_IS_HOST_MODULE
 
-ifdef LOCAL_USE_VNDK
+ifeq ($(call module-in-vendor-or-product),true)
   my_target_global_c_includes :=
   my_target_global_c_system_includes := $(TARGET_OUT_HEADERS)
 else ifdef LOCAL_SDK_VERSION
@@ -1686,7 +1686,7 @@
 ####################################################
 imported_includes :=
 
-ifdef LOCAL_USE_VNDK
+ifeq (true,$(call module-in-vendor-or-product))
   imported_includes += $(call intermediates-dir-for,HEADER_LIBRARIES,device_kernel_headers,$(my_kind),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))
 else
   # everything else should manually specify headers
diff --git a/core/cc_prebuilt_internal.mk b/core/cc_prebuilt_internal.mk
index 000159a..e34e110 100644
--- a/core/cc_prebuilt_internal.mk
+++ b/core/cc_prebuilt_internal.mk
@@ -80,7 +80,7 @@
 
 ifdef LOCAL_SDK_VERSION
 my_link_type := native:ndk:$(my_ndk_stl_family):$(my_ndk_stl_link_type)
-else ifdef LOCAL_USE_VNDK
+else ifeq ($(call module-in-vendor-or-product),true)
     _name := $(patsubst %.vendor,%,$(LOCAL_MODULE))
     _name := $(patsubst %.product,%,$(LOCAL_MODULE))
     ifneq ($(filter $(_name),$(VNDK_CORE_LIBRARIES) $(VNDK_SAMEPROCESS_LIBRARIES) $(LLNDK_LIBRARIES)),)
@@ -90,7 +90,7 @@
             my_link_type := native:vndk_private
         endif
     else
-        ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+        ifeq ($(LOCAL_IN_PRODUCT),true)
             my_link_type := native:product
         else
             my_link_type := native:vendor
@@ -139,8 +139,8 @@
 # When compiling against API imported module, use API import stub libraries.
 apiimport_postfix := .apiimport
 
-ifneq ($(LOCAL_USE_VNDK),)
-  ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+ifeq ($(call module-in-vendor-or-product),true)
+  ifeq ($(LOCAL_IN_PRODUCT),true)
     apiimport_postfix := .apiimport.product
   else
     apiimport_postfix := .apiimport.vendor
@@ -158,8 +158,8 @@
 endif #my_system_shared_libraries
 
 ifdef my_shared_libraries
-ifdef LOCAL_USE_VNDK
-  ifeq ($(LOCAL_USE_VNDK_PRODUCT),true)
+ifeq ($(call module-in-vendor-or-product),true)
+  ifeq ($(LOCAL_IN_PRODUCT),true)
     my_shared_libraries := $(foreach l,$(my_shared_libraries),\
       $(if $(SPLIT_PRODUCT.SHARED_LIBRARIES.$(l)),$(l).product,$(l)))
   else
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 8d99176..cf3fb9e 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -303,8 +303,8 @@
 LOCAL_UNSTRIPPED_PATH:=
 LOCAL_USE_AAPT2:=
 LOCAL_USE_CLANG_LLD:=
-LOCAL_USE_VNDK:=
-LOCAL_USE_VNDK_PRODUCT:=
+LOCAL_IN_VENDOR:=
+LOCAL_IN_PRODUCT:=
 LOCAL_USES_LIBRARIES:=
 LOCAL_VENDOR_MODULE:=
 LOCAL_VINTF_FRAGMENTS:=
diff --git a/core/copy_headers.mk b/core/copy_headers.mk
index c457eb0..397ea62 100644
--- a/core/copy_headers.mk
+++ b/core/copy_headers.mk
@@ -13,13 +13,12 @@
   $(call pretty-error,Modules using LOCAL_SDK_VERSION may not use LOCAL_COPY_HEADERS)
 endif
 
-include $(BUILD_SYSTEM)/local_vndk.mk
+include $(BUILD_SYSTEM)/local_vendor_product.mk
 
-# If we're using the VNDK, only vendor modules using the VNDK may use
-# LOCAL_COPY_HEADERS. Platform libraries will not have the include path
-# present.
-ifndef LOCAL_USE_VNDK
-  $(call pretty-error,Only vendor modules using LOCAL_USE_VNDK may use LOCAL_COPY_HEADERS)
+# Modules in vendor or product may use LOCAL_COPY_HEADERS.
+# Platform libraries will not have the include path present.
+ifeq ($(call module-in-vendor-or-product),)
+  $(call pretty-error,Only modules in vendor or product may use LOCAL_COPY_HEADERS)
 endif
 
 # Clean up LOCAL_COPY_HEADERS_TO, since soong_ui will be comparing cleaned
diff --git a/core/definitions.mk b/core/definitions.mk
index ed842bc..40b7980 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -2774,6 +2774,10 @@
 $(if $(LOCAL_MIN_SDK_VERSION),$(LOCAL_MIN_SDK_VERSION),$(call module-target-sdk-version))
 endef
 
+# Checks if module is in vendor or product
+define module-in-vendor-or-product
+$(if $(filter true,$(LOCAL_IN_VENDOR) $(LOCAL_IN_PRODUCT)),true)
+endef
 
 define transform-classes.jar-to-dex
 @echo "target Dex: $(PRIVATE_MODULE)"
diff --git a/core/dex_preopt.mk b/core/dex_preopt.mk
index 37a389f..08311ca 100644
--- a/core/dex_preopt.mk
+++ b/core/dex_preopt.mk
@@ -57,6 +57,7 @@
 # Build the boot.zip which contains the boot jars and their compilation output
 # We can do this only if preopt is enabled and if the product uses libart config (which sets the
 # default properties for preopting).
+# At the time of writing, this is only for ART Cloud.
 ifeq ($(WITH_DEXPREOPT), true)
 ifneq ($(WITH_DEXPREOPT_ART_BOOT_IMG_ONLY), true)
 ifeq ($(PRODUCT_USES_DEFAULT_ART_CONFIG), true)
@@ -95,15 +96,16 @@
 bootclasspath_locations_arg := $(subst $(space),:,$(DEXPREOPT_BOOTCLASSPATH_DEX_LOCATIONS))
 boot_images := $(subst :,$(space),$(DEXPREOPT_IMAGE_LOCATIONS_ON_DEVICE$(DEXPREOPT_INFIX)))
 boot_image_arg := $(subst $(space),:,$(patsubst /%,%,$(boot_images)))
-dex2oat_extra_args := $(if $(filter true,$(ENABLE_UFFD_GC)),--runtime-arg -Xgc:CMC)
+uffd_gc_flag_txt := $(OUT_DIR)/soong/dexpreopt/uffd_gc_flag.txt
 
 boot_zip_metadata_txt := $(dir $(boot_zip))boot_zip/METADATA.txt
+$(boot_zip_metadata_txt): $(uffd_gc_flag_txt)
 $(boot_zip_metadata_txt):
 	rm -f $@
 	echo "bootclasspath = $(bootclasspath_arg)" >> $@
 	echo "bootclasspath-locations = $(bootclasspath_locations_arg)" >> $@
 	echo "boot-image = $(boot_image_arg)" >> $@
-	echo "extra-args = $(dex2oat_extra_args)" >> $@
+	echo "extra-args = `cat $(uffd_gc_flag_txt)`" >> $@
 
 $(call dist-for-goals, droidcore, $(boot_zip_metadata_txt))
 
diff --git a/core/dex_preopt_config.mk b/core/dex_preopt_config.mk
index 10fbe8f..d51de33 100644
--- a/core/dex_preopt_config.mk
+++ b/core/dex_preopt_config.mk
@@ -122,7 +122,7 @@
   $(call add_json_str,  Dex2oatXmx,                              $(DEX2OAT_XMX))
   $(call add_json_str,  Dex2oatXms,                              $(DEX2OAT_XMS))
   $(call add_json_str,  EmptyDirectory,                          $(OUT_DIR)/empty)
-  $(call add_json_bool, EnableUffdGc,                            $(filter true,$(ENABLE_UFFD_GC)))
+  $(call add_json_str,  EnableUffdGc,                            $(ENABLE_UFFD_GC))
 
 ifdef TARGET_ARCH
   $(call add_json_map,  CpuVariant)
diff --git a/core/executable_internal.mk b/core/executable_internal.mk
index fb14cce..fecf4f6 100644
--- a/core/executable_internal.mk
+++ b/core/executable_internal.mk
@@ -45,7 +45,7 @@
 my_target_crtbegin_dynamic_o :=
 my_target_crtbegin_static_o :=
 my_target_crtend_o :=
-else ifdef LOCAL_USE_VNDK
+else ifeq (true,$(call module-in-vendor-or-product))
 my_target_crtbegin_dynamic_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtbegin_dynamic.vendor)
 my_target_crtbegin_static_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtbegin_static.vendor)
 my_target_crtend_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_android.vendor)
diff --git a/core/instrumentation_test_config_template.xml b/core/instrumentation_test_config_template.xml
index 9dfc001..379126c 100644
--- a/core/instrumentation_test_config_template.xml
+++ b/core/instrumentation_test_config_template.xml
@@ -24,7 +24,7 @@
     </target_preparer>
 
     <test class="com.android.tradefed.testtype.{TEST_TYPE}" >
-        <option name="package" value="{PACKAGE}" />
+        {EXTRA_TEST_RUNNER_CONFIGS}<option name="package" value="{PACKAGE}" />
         <option name="runner" value="{RUNNER}" />
     </test>
 </configuration>
diff --git a/core/local_vendor_product.mk b/core/local_vendor_product.mk
new file mode 100644
index 0000000..fef9b26
--- /dev/null
+++ b/core/local_vendor_product.mk
@@ -0,0 +1,16 @@
+
+# Set LOCAL_IN_VENDOR for modules going into vendor or odm partition and LOCAL_IN_PRODUCT for product
+# except for host modules. If LOCAL_SDK_VERSION is set, thats a more restrictive set, so they don't need
+# LOCAL_IN_VENDOR or LOCAL_IN_PRODUCT
+ifndef LOCAL_IS_HOST_MODULE
+ifndef LOCAL_SDK_VERSION
+  ifneq (,$(filter true,$(LOCAL_VENDOR_MODULE) $(LOCAL_ODM_MODULE) $(LOCAL_OEM_MODULE) $(LOCAL_PROPRIETARY_MODULE)))
+    LOCAL_IN_VENDOR:=true
+    # Note: no need to check LOCAL_MODULE_PATH* since LOCAL_[VENDOR|ODM|OEM]_MODULE is already
+    # set correctly before this is included.
+  endif
+  ifeq (true,$(LOCAL_PRODUCT_MODULE))
+    LOCAL_IN_PRODUCT:=true
+  endif
+endif
+endif
diff --git a/core/local_vndk.mk b/core/local_vndk.mk
deleted file mode 100644
index eb8f2c0..0000000
--- a/core/local_vndk.mk
+++ /dev/null
@@ -1,41 +0,0 @@
-
-#Set LOCAL_USE_VNDK for modules going into product, vendor or odm partition, except for host modules
-#If LOCAL_SDK_VERSION is set, thats a more restrictive set, so they dont need LOCAL_USE_VNDK
-ifndef LOCAL_IS_HOST_MODULE
-ifndef LOCAL_SDK_VERSION
-  ifneq (,$(filter true,$(LOCAL_VENDOR_MODULE) $(LOCAL_ODM_MODULE) $(LOCAL_OEM_MODULE) $(LOCAL_PROPRIETARY_MODULE)))
-    LOCAL_USE_VNDK:=true
-    LOCAL_USE_VNDK_VENDOR:=true
-    # Note: no need to check LOCAL_MODULE_PATH* since LOCAL_[VENDOR|ODM|OEM]_MODULE is already
-    # set correctly before this is included.
-  endif
-  ifdef PRODUCT_PRODUCT_VNDK_VERSION
-    # Product modules also use VNDK when PRODUCT_PRODUCT_VNDK_VERSION is defined.
-    ifeq (true,$(LOCAL_PRODUCT_MODULE))
-      LOCAL_USE_VNDK:=true
-      LOCAL_USE_VNDK_PRODUCT:=true
-    endif
-  endif
-endif
-endif
-
-# Verify LOCAL_USE_VNDK usage, and set LOCAL_SDK_VERSION if necessary
-
-ifdef LOCAL_IS_HOST_MODULE
-  ifdef LOCAL_USE_VNDK
-    $(shell echo $(LOCAL_MODULE_MAKEFILE): $(LOCAL_MODULE): Do not use LOCAL_USE_VNDK with host modules >&2)
-    $(error done)
-  endif
-endif
-ifdef LOCAL_USE_VNDK
-  ifneq ($(LOCAL_USE_VNDK),true)
-    $(shell echo '$(LOCAL_MODULE_MAKEFILE): $(LOCAL_MODULE): LOCAL_USE_VNDK must be "true" or empty, not "$(LOCAL_USE_VNDK)"' >&2)
-    $(error done)
-  endif
-
-  ifdef LOCAL_SDK_VERSION
-    $(shell echo $(LOCAL_MODULE_MAKEFILE): $(LOCAL_MODULE): LOCAL_USE_VNDK must not be used with LOCAL_SDK_VERSION >&2)
-    $(error done)
-  endif
-endif
-
diff --git a/core/packaging/flags.mk b/core/packaging/flags.mk
index 6fc1e4c..62ef3df 100644
--- a/core/packaging/flags.mk
+++ b/core/packaging/flags.mk
@@ -97,6 +97,20 @@
 	)) \
 )
 
+# Collect the on-device flags into a single file, similar to all_aconfig_declarations.
+required_aconfig_flags_files := \
+		$(sort $(foreach partition, $(filter $(IMAGES_TO_BUILD), $(_FLAG_PARTITIONS)), \
+			$(aconfig_flag_summaries_protobuf.$(partition)) \
+		))
+
+.PHONY: device_aconfig_declarations
+device_aconfig_declarations: $(PRODUCT_OUT)/device_aconfig_declarations.pb
+$(eval $(call generate-partition-aconfig-flag-file, \
+			$(TARGET_OUT_FLAGS)/device_aconfig_declarations.pb, \
+			$(PRODUCT_OUT)/device_aconfig_declarations.pb, \
+			$(sort $(required_aconfig_flags_files)) \
+)) \
+
 # Create a set of storage file for each partition
 # $(1): built aconfig flags storage package map file (out)
 # $(2): built aconfig flags storage flag map file (out)
@@ -178,6 +192,7 @@
 
 # Clean up
 required_flags_files:=
+required_aconfig_flags_files:=
 $(foreach partition, $(_FLAG_PARTITIONS), \
 	$(eval build_flag_summaries.$(partition):=) \
 	$(eval aconfig_flag_summaries_protobuf.$(partition):=) \
diff --git a/core/release_config.mk b/core/release_config.mk
index 1fb5747..8d19bc7 100644
--- a/core/release_config.mk
+++ b/core/release_config.mk
@@ -59,15 +59,51 @@
     $(if $(filter $(map),$(config_map_files)),,$(eval config_map_files += $(map))) \
 )
 
+# Declare an alias release-config
+#
+# This should be used to declare a release as an alias of another, meaning no
+# release config files should be present.
+#
+# $1 config name
+# $2 release config for which it is an alias
+define alias-release-config
+    $(call _declare-release-config,$(1),,$(2),true)
+endef
+
 # Declare or extend a release-config.
 #
+# The order of processing is:
+# 1. Recursively apply any overridden release configs.  Only apply each config
+#    the first time we reach it.
+# 2. Apply any files for this release config, in the order they were added to
+#    the declaration.
+#
+# Example:
+#   With these declarations:
+#     $(declare-release-config foo, foo.scl)
+#     $(declare-release-config bar, bar.scl, foo)
+#     $(declare-release-config baz, baz.scl, bar)
+#     $(declare-release-config bif, bif.scl, foo baz)
+#     $(declare-release-config bop, bop.scl, bar baz)
+#
+#   TARGET_RELEASE:
+#     - bar will use: foo.scl bar.scl
+#     - baz will use: foo.scl bar.scl baz.scl
+#     - bif will use: foo.scl bar.scl baz.scl bif.scl
+#     - bop will use: foo.scl bar.scl baz.scl bop.scl
+#
 # $1 config name
 # $2 release config files
-# $3 overridden release config.  Only applied for $(TARGET_RELEASE), not in depth.
+# $3 overridden release config
 define declare-release-config
-    $(if $(strip $(2)),,  \
-        $(error declare-release-config: config $(strip $(1)) must have release config files) \
+    $(call _declare-release-config,$(1),$(2),$(3),)
+endef
+
+define _declare-release-config
+    $(if $(strip $(2)$(3)),,  \
+        $(error declare-release-config: config $(strip $(1)) must have release config files, override another release config, or both) \
     )
+    $(if $(strip $(4)),$(eval _all_release_configs.$(strip $(1)).ALIAS := true))
     $(eval _all_release_configs := $(sort $(_all_release_configs) $(strip $(1))))
     $(if $(strip $(3)), \
       $(if $(filter $(_all_release_configs), $(strip $(3))),
@@ -113,16 +149,30 @@
 # Don't sort this, use it in the order they gave us.
 # Do allow duplicate entries, retaining only the first usage.
 flag_value_files :=
-$(foreach r,$(_all_release_configs.$(TARGET_RELEASE).OVERRIDES) $(TARGET_RELEASE), \
+
+# Apply overrides recursively
+#
+# $1 release config that we override
+applied_releases :=
+define _apply-release-config-overrides
+$(foreach r,$(1), \
+  $(if $(filter $(r),$(applied_releases)),, \
+    $(foreach o,$(_all_release_configs.$(r).OVERRIDES),$(call _apply-release-config-overrides,$(o)))\
+    $(eval applied_releases += $(r))\
     $(foreach f,$(_all_release_configs.$(r).FILES), \
       $(if $(filter $(f),$(flag_value_files)),,$(eval flag_value_files += $(f)))\
     )\
+  )\
 )
-
+endef
+$(call _apply-release-config-overrides,$(TARGET_RELEASE))
 # Unset variables so they can't use them
 define declare-release-config
 $(error declare-release-config can only be called from inside release_config_map.mk files)
 endef
+define apply-release-config-overrides
+$(error invalid use of apply-release-config-overrides)
+endef
 
 # TODO: Remove this check after enough people have sourced lunch that we don't
 # need to worry about it trying to do get_build_vars TARGET_RELEASE. Maybe after ~9/2023
@@ -135,6 +185,11 @@
 endif
 .KATI_READONLY := TARGET_RELEASE
 
+# Verify that alias configs do not have config files.
+$(foreach r,$(_all_release_configs),\
+  $(if $(_all_release_configs.$(r).ALIAS),$(if $(_all_release_configs.$(r).FILES),\
+    $(error Alias release config "$(r)" may not specify release config files $(_all_release_configs.$(r).FILES))\
+)))
 
 $(foreach config, $(_all_release_configs), \
     $(eval _all_release_configs.$(config).DECLARED_IN:= ) \
@@ -142,6 +197,7 @@
 )
 _all_release_configs:=
 config_map_files:=
+applied_releases:=
 
 
 # -----------------------------------------------------------------
diff --git a/core/shared_library_internal.mk b/core/shared_library_internal.mk
index 139de10..2f510d9 100644
--- a/core/shared_library_internal.mk
+++ b/core/shared_library_internal.mk
@@ -42,7 +42,7 @@
 ifeq ($(LOCAL_NO_CRT),true)
 my_target_crtbegin_so_o :=
 my_target_crtend_so_o :=
-else ifdef LOCAL_USE_VNDK
+else ifeq ($(call module-in-vendor-or-product),true)
 my_target_crtbegin_so_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtbegin_so.vendor)
 my_target_crtend_so_o := $(SOONG_$(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_OBJECT_crtend_so.vendor)
 else
diff --git a/core/soong_cc_rust_prebuilt.mk b/core/soong_cc_rust_prebuilt.mk
index 94e1115..943ed30 100644
--- a/core/soong_cc_rust_prebuilt.mk
+++ b/core/soong_cc_rust_prebuilt.mk
@@ -99,7 +99,7 @@
   include $(BUILD_SYSTEM)/link_type.mk
 endif
 
-ifdef LOCAL_USE_VNDK
+ifeq ($(call module-in-vendor-or-product),true)
   ifneq ($(LOCAL_VNDK_DEPEND_ON_CORE_VARIANT),true)
     name_without_suffix := $(patsubst %.vendor,%,$(LOCAL_MODULE))
     ifneq ($(name_without_suffix),$(LOCAL_MODULE))
@@ -128,8 +128,8 @@
 ifdef LOCAL_INSTALLED_MODULE
   ifdef LOCAL_SHARED_LIBRARIES
     my_shared_libraries := $(LOCAL_SHARED_LIBRARIES)
-    ifdef LOCAL_USE_VNDK
-      ifdef LOCAL_USE_VNDK_PRODUCT
+    ifeq ($(call module-in-vendor-or-product),true)
+      ifdef LOCAL_IN_PRODUCT
         my_shared_libraries := $(foreach l,$(my_shared_libraries),\
           $(if $(SPLIT_PRODUCT.SHARED_LIBRARIES.$(l)),$(l).product,$(l)))
       else
@@ -143,8 +143,8 @@
   ifdef LOCAL_DYLIB_LIBRARIES
     my_dylibs := $(LOCAL_DYLIB_LIBRARIES)
     # Treat these as shared library dependencies for installation purposes.
-    ifdef LOCAL_USE_VNDK
-      ifdef LOCAL_USE_VNDK_PRODUCT
+    ifeq ($(call module-in-vendor-or-product),true)
+      ifdef LOCAL_IN_PRODUCT
         my_dylibs := $(foreach l,$(my_dylibs),\
           $(if $(SPLIT_PRODUCT.SHARED_LIBRARIES.$(l)),$(l).product,$(l)))
       else
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 4e8e976..652ca97 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -124,7 +124,7 @@
     $(eval _option := --allow-dup)\
 )
 
-$(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(3) $(6)
+$(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(3) $(6) $(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC)
 	$(hide) echo Building $$@
 	$(hide) mkdir -p $$(dir $$@)
 	$(hide) rm -f $$@ && touch $$@
@@ -148,7 +148,10 @@
 	        echo "$$(line)" >> $$@;\
 	    )\
 	)
-	$(hide) $(POST_PROCESS_PROPS) $$(_option) --sdk-version $(PLATFORM_SDK_VERSION) $$@ $(5)
+	$(hide) $(POST_PROCESS_PROPS) $$(_option) \
+	  --sdk-version $(PLATFORM_SDK_VERSION) \
+	  --kernel-version-file-for-uffd-gc "$(BUILT_KERNEL_VERSION_FILE_FOR_UFFD_GC)" \
+	  $$@ $(5)
 	$(hide) $(foreach file,$(strip $(6)),\
 	    if [ -f "$(file)" ]; then\
 	        cat $(file) >> $$@;\
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 1004d8c..23e9a44 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -64,6 +64,7 @@
     com.android.ondevicepersonalization \
     com.android.os.statsd \
     com.android.permission \
+    com.android.profiling \
     com.android.resolv \
     com.android.rkpd \
     com.android.neuralnetworks \
@@ -75,6 +76,7 @@
     com.android.virt \
     com.android.wifi \
     ContactsProvider \
+    ContactKeysProvider \
     content \
     CtsShimPrebuilt \
     CtsShimPrivPrebuilt \
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
index d78864d..cf6d97a 100644
--- a/target/product/default_art_config.mk
+++ b/target/product/default_art_config.mk
@@ -79,6 +79,7 @@
     com.android.os.statsd:framework-statsd \
     com.android.permission:framework-permission \
     com.android.permission:framework-permission-s \
+    com.android.profiling:framework-profiling \
     com.android.scheduling:framework-scheduling \
     com.android.sdkext:framework-sdkextensions \
     com.android.tethering:framework-connectivity \
@@ -150,6 +151,7 @@
     com.android.btservices:service-bluetooth \
     com.android.devicelock:service-devicelock \
     com.android.os.statsd:service-statsd \
+    com.android.profiling:service-profiling \
     com.android.scheduling:service-scheduling \
     com.android.tethering:service-connectivity \
     com.android.uwb:service-uwb \
diff --git a/target/product/handheld_system.mk b/target/product/handheld_system.mk
index bf9aa41..d706372 100644
--- a/target/product/handheld_system.mk
+++ b/target/product/handheld_system.mk
@@ -75,6 +75,10 @@
     vr \
 
 PRODUCT_PACKAGES += $(RELEASE_PACKAGE_VIRTUAL_CAMERA)
+# Set virtual_camera_service_enabled soong config variable based on the
+# RELEASE_PACKAGE_VIRTUAL_CAMERA build. virtual_camera_service_enabled soong config
+# variable is used to prevent accessing the service when it's not present in the build.
+$(call soong_config_set,vdm,virtual_camera_service_enabled,$(if $(RELEASE_PACKAGE_VIRTUAL_CAMERA),true,false))
 
 PRODUCT_SYSTEM_SERVER_APPS += \
     FusedLocation \
diff --git a/tools/Android.bp b/tools/Android.bp
index 5c54fcf..0a55ed4 100644
--- a/tools/Android.bp
+++ b/tools/Android.bp
@@ -18,56 +18,65 @@
 }
 
 python_binary_host {
-  name: "generate-self-extracting-archive",
-  srcs: ["generate-self-extracting-archive.py"],
+    name: "generate-self-extracting-archive",
+    srcs: ["generate-self-extracting-archive.py"],
 }
 
 python_binary_host {
-  name: "post_process_props",
-  srcs: ["post_process_props.py"],
+    name: "post_process_props",
+    srcs: ["post_process_props.py"],
+    libs: [
+        "uffd_gc_utils",
+    ],
 }
 
 python_test_host {
-  name: "post_process_props_unittest",
-  main: "test_post_process_props.py",
-  srcs: [
-    "post_process_props.py",
-    "test_post_process_props.py",
-  ],
-  test_config: "post_process_props_unittest.xml",
-  test_suites: ["general-tests"],
+    name: "post_process_props_unittest",
+    main: "test_post_process_props.py",
+    srcs: [
+        "post_process_props.py",
+        "test_post_process_props.py",
+    ],
+    libs: [
+        "uffd_gc_utils",
+    ],
+    test_config: "post_process_props_unittest.xml",
+    test_suites: ["general-tests"],
 }
 
 python_binary_host {
-  name: "extract_kernel",
-  srcs: ["extract_kernel.py"],
+    name: "extract_kernel",
+    srcs: ["extract_kernel.py"],
 }
 
 genrule_defaults {
-  name: "extract_kernel_release_defaults",
-  tools: ["extract_kernel", "lz4"],
-  out: ["kernel_release.txt"],
-  cmd: "$(location) --tools lz4:$(location lz4) --input $(in) --output-release > $(out)"
+    name: "extract_kernel_release_defaults",
+    tools: [
+        "extract_kernel",
+        "lz4",
+    ],
+    out: ["kernel_release.txt"],
+    cmd: "$(location) --tools lz4:$(location lz4) --input $(in) --output-release > $(out)",
 }
 
 cc_binary_host {
-  name: "build-runfiles",
-  srcs: ["build-runfiles.cc"],
+    name: "build-runfiles",
+    srcs: ["build-runfiles.cc"],
 }
 
 python_binary_host {
-  name: "check_radio_versions",
-  srcs: ["check_radio_versions.py"],
+    name: "check_radio_versions",
+    srcs: ["check_radio_versions.py"],
 }
 
 python_binary_host {
-  name: "check_elf_file",
-  srcs: ["check_elf_file.py"],
+    name: "check_elf_file",
+    srcs: ["check_elf_file.py"],
 }
 
 python_binary_host {
-  name: "generate_gts_shared_report",
-  srcs: ["generate_gts_shared_report.py"],
+    name: "generate_gts_shared_report",
+    srcs: ["generate_gts_shared_report.py"],
 }
 
 python_binary_host {
@@ -77,10 +86,10 @@
         "list_files.py",
     ],
     version: {
-      py3: {
-        embedded_launcher: true,
-      }
-    }
+        py3: {
+            embedded_launcher: true,
+        },
+    },
 }
 
 python_test_host {
@@ -98,11 +107,11 @@
 }
 
 python_binary_host {
-  name: "characteristics_rro_generator",
-  srcs: ["characteristics_rro_generator.py"],
-  version: {
-    py3: {
-      embedded_launcher: true,
+    name: "characteristics_rro_generator",
+    srcs: ["characteristics_rro_generator.py"],
+    version: {
+        py3: {
+            embedded_launcher: true,
+        },
     },
-  },
 }
diff --git a/tools/aconfig/aconfig/src/storage/flag_table.rs b/tools/aconfig/aconfig/src/storage/flag_table.rs
index bebac890..1381e89 100644
--- a/tools/aconfig/aconfig/src/storage/flag_table.rs
+++ b/tools/aconfig/aconfig/src/storage/flag_table.rs
@@ -32,40 +32,52 @@
     }
 }
 
-fn new_node(
-    package_id: u32,
-    flag_name: &str,
-    flag_type: u16,
-    flag_id: u16,
-    num_buckets: u32,
-) -> FlagTableNode {
-    let bucket_index = FlagTableNode::find_bucket_index(package_id, flag_name, num_buckets);
-    FlagTableNode {
-        package_id,
-        flag_name: flag_name.to_string(),
-        flag_type,
-        flag_id,
-        next_offset: None,
-        bucket_index,
-    }
+// a struct that contains FlagTableNode and a bunch of other information to help
+// flag table creation
+#[derive(PartialEq, Debug, Clone)]
+struct FlagTableNodeWrapper {
+    pub node: FlagTableNode,
+    pub bucket_index: u32,
 }
 
-fn create_nodes(package: &FlagPackage, num_buckets: u32) -> Result<Vec<FlagTableNode>> {
-    let flag_ids = assign_flag_ids(package.package_name, package.boolean_flags.iter().copied())?;
-    package
-        .boolean_flags
-        .iter()
-        .map(|&pf| {
-            let fid = flag_ids
-                .get(pf.name())
-                .ok_or(anyhow!(format!("missing flag id for {}", pf.name())))?;
-            // all flags are boolean value at the moment, thus using the last bit. When more
-            // flag value types are supported, flag value type information should come from the
-            // parsed flag, and we will set the flag_type bit mask properly.
-            let flag_type = 1;
-            Ok(new_node(package.package_id, pf.name(), flag_type, *fid, num_buckets))
-        })
-        .collect::<Result<Vec<_>>>()
+impl FlagTableNodeWrapper {
+    fn new(
+        package_id: u32,
+        flag_name: &str,
+        flag_type: u16,
+        flag_id: u16,
+        num_buckets: u32,
+    ) -> Self {
+        let bucket_index = FlagTableNode::find_bucket_index(package_id, flag_name, num_buckets);
+        let node = FlagTableNode {
+            package_id,
+            flag_name: flag_name.to_string(),
+            flag_type,
+            flag_id,
+            next_offset: None,
+        };
+        Self { node, bucket_index }
+    }
+
+    fn create_nodes(package: &FlagPackage, num_buckets: u32) -> Result<Vec<Self>> {
+        let flag_ids =
+            assign_flag_ids(package.package_name, package.boolean_flags.iter().copied())?;
+        package
+            .boolean_flags
+            .iter()
+            .map(|&pf| {
+                let fid = flag_ids
+                    .get(pf.name())
+                    .ok_or(anyhow!(format!("missing flag id for {}", pf.name())))?;
+                // all flags are boolean value at the moment, thus using the last bit.
+                // When more flag value types are supported, flag value type information
+                // should come from the parsed flag, and we will set the flag_type bit
+                // mask properly.
+                let flag_type = 1;
+                Ok(Self::new(package.package_id, pf.name(), flag_type, *fid, num_buckets))
+            })
+            .collect::<Result<Vec<_>>>()
+    }
 }
 
 pub fn create_flag_table(container: &str, packages: &[FlagPackage]) -> Result<FlagTable> {
@@ -73,44 +85,48 @@
     let num_flags = packages.iter().map(|pkg| pkg.boolean_flags.len() as u32).sum();
     let num_buckets = get_table_size(num_flags)?;
 
-    let mut table = FlagTable {
-        header: new_header(container, num_flags),
-        buckets: vec![None; num_buckets as usize],
-        nodes: packages
-            .iter()
-            .map(|pkg| create_nodes(pkg, num_buckets))
-            .collect::<Result<Vec<_>>>()?
-            .concat(),
-    };
+    let mut header = new_header(container, num_flags);
+    let mut buckets = vec![None; num_buckets as usize];
+    let mut node_wrappers = packages
+        .iter()
+        .map(|pkg| FlagTableNodeWrapper::create_nodes(pkg, num_buckets))
+        .collect::<Result<Vec<_>>>()?
+        .concat();
 
     // initialize all header fields
-    table.header.bucket_offset = table.header.as_bytes().len() as u32;
-    table.header.node_offset = table.header.bucket_offset + num_buckets * 4;
-    table.header.file_size = table.header.node_offset
-        + table.nodes.iter().map(|x| x.as_bytes().len()).sum::<usize>() as u32;
+    header.bucket_offset = header.as_bytes().len() as u32;
+    header.node_offset = header.bucket_offset + num_buckets * 4;
+    header.file_size = header.node_offset
+        + node_wrappers.iter().map(|x| x.node.as_bytes().len()).sum::<usize>() as u32;
 
     // sort nodes by bucket index for efficiency
-    table.nodes.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index));
+    node_wrappers.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index));
 
     // fill all node offset
-    let mut offset = table.header.node_offset;
-    for i in 0..table.nodes.len() {
-        let node_bucket_idx = table.nodes[i].bucket_index;
-        let next_node_bucket_idx =
-            if i + 1 < table.nodes.len() { Some(table.nodes[i + 1].bucket_index) } else { None };
+    let mut offset = header.node_offset;
+    for i in 0..node_wrappers.len() {
+        let node_bucket_idx = node_wrappers[i].bucket_index;
+        let next_node_bucket_idx = if i + 1 < node_wrappers.len() {
+            Some(node_wrappers[i + 1].bucket_index)
+        } else {
+            None
+        };
 
-        if table.buckets[node_bucket_idx as usize].is_none() {
-            table.buckets[node_bucket_idx as usize] = Some(offset);
+        if buckets[node_bucket_idx as usize].is_none() {
+            buckets[node_bucket_idx as usize] = Some(offset);
         }
-        offset += table.nodes[i].as_bytes().len() as u32;
+        offset += node_wrappers[i].node.as_bytes().len() as u32;
 
         if let Some(index) = next_node_bucket_idx {
             if index == node_bucket_idx {
-                table.nodes[i].next_offset = Some(offset);
+                node_wrappers[i].node.next_offset = Some(offset);
             }
         }
     }
 
+    let table =
+        FlagTable { header, buckets, nodes: node_wrappers.into_iter().map(|nw| nw.node).collect() };
+
     Ok(table)
 }
 
@@ -126,7 +142,6 @@
         flag_type: u16,
         flag_id: u16,
         next_offset: Option<u32>,
-        bucket_index: u32,
     ) -> FlagTableNode {
         FlagTableNode {
             package_id,
@@ -134,7 +149,6 @@
             flag_type,
             flag_id,
             next_offset,
-            bucket_index,
         }
     }
 
@@ -186,13 +200,13 @@
         let nodes: &Vec<FlagTableNode> = &flag_table.as_ref().unwrap().nodes;
         assert_eq!(nodes.len(), 8);
 
-        assert_eq!(nodes[0], new_expected_node(0, "enabled_ro", 1, 1, None, 0));
-        assert_eq!(nodes[1], new_expected_node(0, "enabled_rw", 1, 2, Some(150), 1));
-        assert_eq!(nodes[2], new_expected_node(1, "disabled_ro", 1, 0, None, 1));
-        assert_eq!(nodes[3], new_expected_node(2, "enabled_ro", 1, 1, None, 5));
-        assert_eq!(nodes[4], new_expected_node(1, "enabled_fixed_ro", 1, 1, Some(235), 7));
-        assert_eq!(nodes[5], new_expected_node(1, "enabled_ro", 1, 2, None, 7));
-        assert_eq!(nodes[6], new_expected_node(2, "enabled_fixed_ro", 1, 0, None, 9));
-        assert_eq!(nodes[7], new_expected_node(0, "disabled_rw", 1, 0, None, 15));
+        assert_eq!(nodes[0], new_expected_node(0, "enabled_ro", 1, 1, None));
+        assert_eq!(nodes[1], new_expected_node(0, "enabled_rw", 1, 2, Some(150)));
+        assert_eq!(nodes[2], new_expected_node(1, "disabled_ro", 1, 0, None));
+        assert_eq!(nodes[3], new_expected_node(2, "enabled_ro", 1, 1, None));
+        assert_eq!(nodes[4], new_expected_node(1, "enabled_fixed_ro", 1, 1, Some(235)));
+        assert_eq!(nodes[5], new_expected_node(1, "enabled_ro", 1, 2, None));
+        assert_eq!(nodes[6], new_expected_node(2, "enabled_fixed_ro", 1, 0, None));
+        assert_eq!(nodes[7], new_expected_node(0, "disabled_rw", 1, 0, None));
     }
 }
diff --git a/tools/aconfig/aconfig/src/storage/package_table.rs b/tools/aconfig/aconfig/src/storage/package_table.rs
index f82e932..4c08129 100644
--- a/tools/aconfig/aconfig/src/storage/package_table.rs
+++ b/tools/aconfig/aconfig/src/storage/package_table.rs
@@ -17,8 +17,7 @@
 use anyhow::Result;
 
 use aconfig_storage_file::{
-    get_bucket_index, get_table_size, PackageTable, PackageTableHeader, PackageTableNode,
-    FILE_VERSION,
+    get_table_size, PackageTable, PackageTableHeader, PackageTableNode, FILE_VERSION,
 };
 
 use crate::storage::FlagPackage;
@@ -34,14 +33,24 @@
     }
 }
 
-fn new_node(package: &FlagPackage, num_buckets: u32) -> PackageTableNode {
-    let bucket_index = get_bucket_index(&package.package_name.to_string(), num_buckets);
-    PackageTableNode {
-        package_name: String::from(package.package_name),
-        package_id: package.package_id,
-        boolean_offset: package.boolean_offset,
-        next_offset: None,
-        bucket_index,
+// a struct that contains PackageTableNode and a bunch of other information to help
+// package table creation
+#[derive(PartialEq, Debug)]
+struct PackageTableNodeWrapper {
+    pub node: PackageTableNode,
+    pub bucket_index: u32,
+}
+
+impl PackageTableNodeWrapper {
+    fn new(package: &FlagPackage, num_buckets: u32) -> Self {
+        let node = PackageTableNode {
+            package_name: String::from(package.package_name),
+            package_id: package.package_id,
+            boolean_offset: package.boolean_offset,
+            next_offset: None,
+        };
+        let bucket_index = PackageTableNode::find_bucket_index(package.package_name, num_buckets);
+        Self { node, bucket_index }
     }
 }
 
@@ -49,40 +58,47 @@
     // create table
     let num_packages = packages.len() as u32;
     let num_buckets = get_table_size(num_packages)?;
-    let mut table = PackageTable {
-        header: new_header(container, num_packages),
-        buckets: vec![None; num_buckets as usize],
-        nodes: packages.iter().map(|pkg| new_node(pkg, num_buckets)).collect(),
-    };
+    let mut header = new_header(container, num_packages);
+    let mut buckets = vec![None; num_buckets as usize];
+    let mut node_wrappers: Vec<_> =
+        packages.iter().map(|pkg| PackageTableNodeWrapper::new(pkg, num_buckets)).collect();
 
     // initialize all header fields
-    table.header.bucket_offset = table.header.as_bytes().len() as u32;
-    table.header.node_offset = table.header.bucket_offset + num_buckets * 4;
-    table.header.file_size = table.header.node_offset
-        + table.nodes.iter().map(|x| x.as_bytes().len()).sum::<usize>() as u32;
+    header.bucket_offset = header.as_bytes().len() as u32;
+    header.node_offset = header.bucket_offset + num_buckets * 4;
+    header.file_size = header.node_offset
+        + node_wrappers.iter().map(|x| x.node.as_bytes().len()).sum::<usize>() as u32;
 
-    // sort nodes by bucket index for efficiency
-    table.nodes.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index));
+    // sort node_wrappers by bucket index for efficiency
+    node_wrappers.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index));
 
     // fill all node offset
-    let mut offset = table.header.node_offset;
-    for i in 0..table.nodes.len() {
-        let node_bucket_idx = table.nodes[i].bucket_index;
-        let next_node_bucket_idx =
-            if i + 1 < table.nodes.len() { Some(table.nodes[i + 1].bucket_index) } else { None };
+    let mut offset = header.node_offset;
+    for i in 0..node_wrappers.len() {
+        let node_bucket_idx = node_wrappers[i].bucket_index;
+        let next_node_bucket_idx = if i + 1 < node_wrappers.len() {
+            Some(node_wrappers[i + 1].bucket_index)
+        } else {
+            None
+        };
 
-        if table.buckets[node_bucket_idx as usize].is_none() {
-            table.buckets[node_bucket_idx as usize] = Some(offset);
+        if buckets[node_bucket_idx as usize].is_none() {
+            buckets[node_bucket_idx as usize] = Some(offset);
         }
-        offset += table.nodes[i].as_bytes().len() as u32;
+        offset += node_wrappers[i].node.as_bytes().len() as u32;
 
         if let Some(index) = next_node_bucket_idx {
             if index == node_bucket_idx {
-                table.nodes[i].next_offset = Some(offset);
+                node_wrappers[i].node.next_offset = Some(offset);
             }
         }
     }
 
+    let table = PackageTable {
+        header,
+        buckets,
+        nodes: node_wrappers.into_iter().map(|nw| nw.node).collect(),
+    };
     Ok(table)
 }
 
@@ -125,7 +141,6 @@
             package_id: 1,
             boolean_offset: 3,
             next_offset: None,
-            bucket_index: 0,
         };
         assert_eq!(nodes[0], first_node_expected);
         let second_node_expected = PackageTableNode {
@@ -133,7 +148,6 @@
             package_id: 0,
             boolean_offset: 0,
             next_offset: Some(158),
-            bucket_index: 3,
         };
         assert_eq!(nodes[1], second_node_expected);
         let third_node_expected = PackageTableNode {
@@ -141,7 +155,6 @@
             package_id: 2,
             boolean_offset: 6,
             next_offset: None,
-            bucket_index: 3,
         };
         assert_eq!(nodes[2], third_node_expected);
     }
diff --git a/tools/aconfig/aconfig_protos/src/lib.rs b/tools/aconfig/aconfig_protos/src/lib.rs
index ef16e06..8f5667f 100644
--- a/tools/aconfig/aconfig_protos/src/lib.rs
+++ b/tools/aconfig/aconfig_protos/src/lib.rs
@@ -150,10 +150,7 @@
         ensure_required_fields!("flag declarations", pdf, "package");
         // TODO(b/312769710): Make the container field required.
 
-        ensure!(
-            is_valid_package_ident(pdf.package()),
-            "bad flag declarations: bad package"
-        );
+        ensure!(is_valid_package_ident(pdf.package()), "bad flag declarations: bad package");
         ensure!(
             !pdf.has_container() || is_valid_container_ident(pdf.container()),
             "bad flag declarations: bad container"
@@ -898,10 +895,7 @@
 "#;
         let parsed_flags = try_from_binary_proto_from_text_proto(text_proto).unwrap();
         let parsed_flag = &parsed_flags.parsed_flag[0];
-        assert_eq!(
-            crate::parsed_flag::path_to_declaration(parsed_flag),
-            "flags.declarations"
-        );
+        assert_eq!(crate::parsed_flag::path_to_declaration(parsed_flag), "flags.declarations");
     }
 
     #[test]
diff --git a/tools/aconfig/aconfig_storage_file/Android.bp b/tools/aconfig/aconfig_storage_file/Android.bp
index a2650d8..53b693f 100644
--- a/tools/aconfig/aconfig_storage_file/Android.bp
+++ b/tools/aconfig/aconfig_storage_file/Android.bp
@@ -12,6 +12,7 @@
         "libaconfig_storage_protos",
         "libonce_cell",
         "libprotobuf",
+        "libtempfile",
     ],
 }
 
diff --git a/tools/aconfig/aconfig_storage_file/Cargo.toml b/tools/aconfig/aconfig_storage_file/Cargo.toml
index e65b1bf..54ba6c7 100644
--- a/tools/aconfig/aconfig_storage_file/Cargo.toml
+++ b/tools/aconfig/aconfig_storage_file/Cargo.toml
@@ -9,7 +9,10 @@
 
 [dependencies]
 anyhow = "1.0.69"
+memmap2 = "0.8.0"
 protobuf = "3.2.0"
+once_cell = "1.19.0"
+tempfile = "3.9.0"
 
 [build-dependencies]
 protobuf-codegen = "3.2.0"
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_table.rs b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
index 421f847..dfbd9de 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
@@ -17,7 +17,7 @@
 //! flag table module defines the flag table file format and methods for serialization
 //! and deserialization
 
-use crate::{read_str_from_bytes, read_u16_from_bytes, read_u32_from_bytes, get_bucket_index};
+use crate::{get_bucket_index, read_str_from_bytes, read_u16_from_bytes, read_u32_from_bytes};
 use anyhow::{anyhow, Result};
 
 /// Flag table header struct
@@ -68,7 +68,6 @@
     pub flag_type: u16,
     pub flag_id: u16,
     pub next_offset: Option<u32>,
-    pub bucket_index: u32,
 }
 
 impl FlagTableNode {
@@ -97,7 +96,6 @@
                 0 => None,
                 val => Some(val),
             },
-            bucket_index: 0,
         };
         Ok(node)
     }
@@ -142,13 +140,11 @@
             .collect();
         let nodes = (0..num_flags)
             .map(|_| {
-                let mut node = FlagTableNode::from_bytes(&bytes[head..]).unwrap();
+                let node = FlagTableNode::from_bytes(&bytes[head..])?;
                 head += node.as_bytes().len();
-                node.bucket_index = FlagTableNode::find_bucket_index(
-                    node.package_id, &node.flag_name, num_buckets);
-                node
+                Ok(node)
             })
-            .collect();
+            .collect::<Result<Vec<_>>>()?;
 
         let table = Self { header, buckets, nodes };
         Ok(table)
@@ -179,8 +175,7 @@
 
     loop {
         let interpreted_node = FlagTableNode::from_bytes(&buf[flag_node_offset..])?;
-        if interpreted_node.package_id == package_id &&
-            interpreted_node.flag_name == flag {
+        if interpreted_node.package_id == package_id && interpreted_node.flag_name == flag {
             return Ok(Some(interpreted_node.flag_id));
         }
         match interpreted_node.next_offset {
@@ -188,7 +183,6 @@
             None => return Ok(None),
         }
     }
-
 }
 
 #[cfg(test)]
@@ -203,16 +197,8 @@
             flag_type: u16,
             flag_id: u16,
             next_offset: Option<u32>,
-            bucket_index: u32,
         ) -> Self {
-            Self {
-                package_id,
-                flag_name: flag_name.to_string(),
-                flag_type,
-                flag_id,
-                next_offset,
-                bucket_index,
-            }
+            Self { package_id, flag_name: flag_name.to_string(), flag_type, flag_id, next_offset }
         }
     }
 
@@ -245,14 +231,14 @@
             None,
         ];
         let nodes = vec![
-            FlagTableNode::new_expected(0, "enabled_ro", 1, 1, None, 0),
-            FlagTableNode::new_expected(0, "enabled_rw", 1, 2, Some(150), 1),
-            FlagTableNode::new_expected(1, "disabled_ro", 1, 0, None, 1),
-            FlagTableNode::new_expected(2, "enabled_ro", 1, 1, None, 5),
-            FlagTableNode::new_expected(1, "enabled_fixed_ro", 1, 1, Some(235), 7),
-            FlagTableNode::new_expected(1, "enabled_ro", 1, 2, None, 7),
-            FlagTableNode::new_expected(2, "enabled_fixed_ro", 1, 0, None, 9),
-            FlagTableNode::new_expected(0, "disabled_rw", 1, 0, None, 15),
+            FlagTableNode::new_expected(0, "enabled_ro", 1, 1, None),
+            FlagTableNode::new_expected(0, "enabled_rw", 1, 2, Some(150)),
+            FlagTableNode::new_expected(1, "disabled_ro", 1, 0, None),
+            FlagTableNode::new_expected(2, "enabled_ro", 1, 1, None),
+            FlagTableNode::new_expected(1, "enabled_fixed_ro", 1, 1, Some(235)),
+            FlagTableNode::new_expected(1, "enabled_ro", 1, 2, None),
+            FlagTableNode::new_expected(2, "enabled_fixed_ro", 1, 0, None),
+            FlagTableNode::new_expected(0, "disabled_rw", 1, 0, None),
         ];
         Ok(FlagTable { header, buckets, nodes })
     }
@@ -268,14 +254,8 @@
         assert_eq!(header, &reinterpreted_header.unwrap());
 
         let nodes: &Vec<FlagTableNode> = &flag_table.nodes;
-        let num_buckets = crate::get_table_size(header.num_flags).unwrap();
         for node in nodes.iter() {
-            let mut reinterpreted_node = FlagTableNode::from_bytes(&node.as_bytes()).unwrap();
-            reinterpreted_node.bucket_index = FlagTableNode::find_bucket_index(
-                reinterpreted_node.package_id,
-                &reinterpreted_node.flag_name,
-                num_buckets
-            );
+            let reinterpreted_node = FlagTableNode::from_bytes(&node.as_bytes()).unwrap();
             assert_eq!(node, &reinterpreted_node);
         }
 
@@ -300,9 +280,7 @@
         ];
         for (package_id, flag_name, expected_offset) in baseline.into_iter() {
             let flag_offset =
-                find_flag_offset(&flag_table[..], package_id, flag_name)
-                .unwrap()
-                .unwrap();
+                find_flag_offset(&flag_table[..], package_id, flag_name).unwrap().unwrap();
             assert_eq!(flag_offset, expected_offset);
         }
     }
@@ -311,11 +289,9 @@
     // this test point locks down table query of a non exist flag
     fn test_not_existed_flag_query() {
         let flag_table = create_test_flag_table().unwrap().as_bytes();
-        let flag_offset =
-            find_flag_offset(&flag_table[..], 1, "disabled_fixed_ro").unwrap();
+        let flag_offset = find_flag_offset(&flag_table[..], 1, "disabled_fixed_ro").unwrap();
         assert_eq!(flag_offset, None);
-        let flag_offset =
-            find_flag_offset(&flag_table[..], 2, "disabled_rw").unwrap();
+        let flag_offset = find_flag_offset(&flag_table[..], 2, "disabled_rw").unwrap();
         assert_eq!(flag_offset, None);
     }
 
@@ -325,8 +301,7 @@
         let mut table = create_test_flag_table().unwrap();
         table.header.version = crate::FILE_VERSION + 1;
         let flag_table = table.as_bytes();
-        let error = find_flag_offset(&flag_table[..], 0, "enabled_ro")
-            .unwrap_err();
+        let error = find_flag_offset(&flag_table[..], 0, "enabled_ro").unwrap_err();
         assert_eq!(
             format!("{:?}", error),
             format!(
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_value.rs b/tools/aconfig/aconfig_storage_file/src/flag_value.rs
index c8b52a9..bb8892d 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_value.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_value.rs
@@ -146,9 +146,7 @@
         let flag_value_list = create_test_flag_value_list().unwrap().as_bytes();
         let baseline: Vec<bool> = vec![false, true, false, false, true, true, false, true];
         for (offset, expected_value) in baseline.into_iter().enumerate() {
-            let flag_value =
-                get_boolean_flag_value(&flag_value_list[..], offset as u32)
-                .unwrap();
+            let flag_value = get_boolean_flag_value(&flag_value_list[..], offset as u32).unwrap();
             assert_eq!(flag_value, expected_value);
         }
     }
@@ -157,12 +155,8 @@
     // this test point locks down query beyond the end of boolean section
     fn test_boolean_out_of_range() {
         let flag_value_list = create_test_flag_value_list().unwrap().as_bytes();
-        let error = get_boolean_flag_value(&flag_value_list[..], 8)
-            .unwrap_err();
-        assert_eq!(
-            format!("{:?}", error),
-            "Flag value offset goes beyond the end of the file."
-        );
+        let error = get_boolean_flag_value(&flag_value_list[..], 8).unwrap_err();
+        assert_eq!(format!("{:?}", error), "Flag value offset goes beyond the end of the file.");
     }
 
     #[test]
@@ -171,8 +165,7 @@
         let mut value_list = create_test_flag_value_list().unwrap();
         value_list.header.version = crate::FILE_VERSION + 1;
         let flag_value = value_list.as_bytes();
-        let error = get_boolean_flag_value(&flag_value[..], 4)
-            .unwrap_err();
+        let error = get_boolean_flag_value(&flag_value[..], 4).unwrap_err();
         assert_eq!(
             format!("{:?}", error),
             format!(
diff --git a/tools/aconfig/aconfig_storage_file/src/lib.rs b/tools/aconfig/aconfig_storage_file/src/lib.rs
index f5aecff..a9f5e21 100644
--- a/tools/aconfig/aconfig_storage_file/src/lib.rs
+++ b/tools/aconfig/aconfig_storage_file/src/lib.rs
@@ -21,6 +21,9 @@
 pub mod flag_value;
 pub mod package_table;
 
+#[cfg(feature = "cargo")]
+pub mod mapped_file;
+
 mod protos;
 #[cfg(test)]
 mod test_utils;
diff --git a/tools/aconfig/aconfig_storage_file/src/mapped_file.rs b/tools/aconfig/aconfig_storage_file/src/mapped_file.rs
new file mode 100644
index 0000000..4f65df0
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/src/mapped_file.rs
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::collections::HashMap;
+use std::fs::File;
+use std::io::{BufReader, Read};
+use std::sync::{Arc, Mutex};
+
+use anyhow::{bail, ensure, Result};
+use memmap2::Mmap;
+use once_cell::sync::Lazy;
+
+use crate::protos::{
+    storage_files::try_from_binary_proto, ProtoStorageFileInfo, ProtoStorageFiles,
+};
+use crate::StorageFileSelection;
+
+/// Cache for already mapped files
+static ALL_MAPPED_FILES: Lazy<Mutex<HashMap<String, MappedStorageFileSet>>> = Lazy::new(|| {
+    let mapped_files = HashMap::new();
+    Mutex::new(mapped_files)
+});
+
+/// Mapped storage files for a particular container
+#[derive(Debug)]
+struct MappedStorageFileSet {
+    package_map: Arc<Mmap>,
+    flag_map: Arc<Mmap>,
+    flag_val: Arc<Mmap>,
+}
+
+/// Find where storage files are stored for a particular container
+fn find_container_storage_location(
+    location_pb_file: &str,
+    container: &str,
+) -> Result<ProtoStorageFileInfo> {
+    let file = File::open(location_pb_file)?;
+    let mut reader = BufReader::new(file);
+    let mut bytes = Vec::new();
+    reader.read_to_end(&mut bytes)?;
+
+    let storage_locations: ProtoStorageFiles = try_from_binary_proto(&bytes)?;
+    for location_info in storage_locations.files.iter() {
+        if location_info.container() == container {
+            return Ok(location_info.clone());
+        }
+    }
+    bail!("Storage file does not exist for {}", container)
+}
+
+/// Verify the file is read only and then map it
+fn verify_read_only_and_map(file_path: &str) -> Result<Mmap> {
+    let file = File::open(file_path)?;
+    let metadata = file.metadata()?;
+    ensure!(
+        metadata.permissions().readonly(),
+        "Cannot mmap file {} as it is not read only",
+        file_path
+    );
+    // SAFETY:
+    //
+    // Mmap constructors are unsafe as it would have undefined behaviors if the file
+    // is modified after mapped (https://docs.rs/memmap2/latest/memmap2/struct.Mmap.html).
+    //
+    // We either have to make this api unsafe or ensure that the file will not be modified
+    // which means it is read only. Here in the code, we check explicitly that the file
+    // being mapped must only have read permission, otherwise, error out, thus making sure
+    // it is safe.
+    //
+    // We should remove this restriction if we need to support mmap non read only file in
+    // the future (by making this api unsafe). But for now, all flags are boot stable, so
+    // the boot flag file copy should be readonly.
+    unsafe { Ok(Mmap::map(&file)?) }
+}
+
+/// Map all storage files for a particular container
+fn map_container_storage_files(
+    location_pb_file: &str,
+    container: &str,
+) -> Result<MappedStorageFileSet> {
+    let files_location = find_container_storage_location(location_pb_file, container)?;
+    let package_map = Arc::new(verify_read_only_and_map(files_location.package_map())?);
+    let flag_map = Arc::new(verify_read_only_and_map(files_location.flag_map())?);
+    let flag_val = Arc::new(verify_read_only_and_map(files_location.flag_val())?);
+    Ok(MappedStorageFileSet { package_map, flag_map, flag_val })
+}
+
+/// Get a mapped storage file given the container and file type
+pub fn get_mapped_file(
+    location_pb_file: &str,
+    container: &str,
+    file_selection: StorageFileSelection,
+) -> Result<Arc<Mmap>> {
+    let mut all_mapped_files = ALL_MAPPED_FILES.lock().unwrap();
+    match all_mapped_files.get(container) {
+        Some(mapped_files) => Ok(match file_selection {
+            StorageFileSelection::PackageMap => Arc::clone(&mapped_files.package_map),
+            StorageFileSelection::FlagMap => Arc::clone(&mapped_files.flag_map),
+            StorageFileSelection::FlagVal => Arc::clone(&mapped_files.flag_val),
+        }),
+        None => {
+            let mapped_files = map_container_storage_files(location_pb_file, container)?;
+            let file_ptr = match file_selection {
+                StorageFileSelection::PackageMap => Arc::clone(&mapped_files.package_map),
+                StorageFileSelection::FlagMap => Arc::clone(&mapped_files.flag_map),
+                StorageFileSelection::FlagVal => Arc::clone(&mapped_files.flag_val),
+            };
+            all_mapped_files.insert(container.to_string(), mapped_files);
+            Ok(file_ptr)
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::test_utils::{get_binary_storage_proto_bytes, write_bytes_to_temp_file};
+
+    #[test]
+    fn test_find_storage_file_location() {
+        let text_proto = r#"
+files {
+    version: 0
+    container: "system"
+    package_map: "/system/etc/package.map"
+    flag_map: "/system/etc/flag.map"
+    flag_val: "/metadata/aconfig/system.val"
+    timestamp: 12345
+}
+files {
+    version: 1
+    container: "product"
+    package_map: "/product/etc/package.map"
+    flag_map: "/product/etc/flag.map"
+    flag_val: "/metadata/aconfig/product.val"
+    timestamp: 54321
+}
+"#;
+        let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap();
+        let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap();
+        let file_full_path = file.path().display().to_string();
+
+        let file_info = find_container_storage_location(&file_full_path, "system").unwrap();
+        assert_eq!(file_info.version(), 0);
+        assert_eq!(file_info.container(), "system");
+        assert_eq!(file_info.package_map(), "/system/etc/package.map");
+        assert_eq!(file_info.flag_map(), "/system/etc/flag.map");
+        assert_eq!(file_info.flag_val(), "/metadata/aconfig/system.val");
+        assert_eq!(file_info.timestamp(), 12345);
+
+        let file_info = find_container_storage_location(&file_full_path, "product").unwrap();
+        assert_eq!(file_info.version(), 1);
+        assert_eq!(file_info.container(), "product");
+        assert_eq!(file_info.package_map(), "/product/etc/package.map");
+        assert_eq!(file_info.flag_map(), "/product/etc/flag.map");
+        assert_eq!(file_info.flag_val(), "/metadata/aconfig/product.val");
+        assert_eq!(file_info.timestamp(), 54321);
+
+        let err = find_container_storage_location(&file_full_path, "vendor").unwrap_err();
+        assert_eq!(format!("{:?}", err), "Storage file does not exist for vendor");
+    }
+
+    fn map_and_verify(
+        location_pb_file: &str,
+        file_selection: StorageFileSelection,
+        actual_file: &str,
+    ) {
+        let mut opened_file = File::open(actual_file).unwrap();
+        let mut content = Vec::new();
+        opened_file.read_to_end(&mut content).unwrap();
+
+        let mmaped_file = get_mapped_file(location_pb_file, "system", file_selection).unwrap();
+        assert_eq!(mmaped_file[..], content[..]);
+    }
+
+    #[test]
+    fn test_mapped_file_contents() {
+        let text_proto = r#"
+files {
+    version: 0
+    container: "system"
+    package_map: "./tests/package.map"
+    flag_map: "./tests/flag.map"
+    flag_val: "./tests/flag.val"
+    timestamp: 12345
+}
+"#;
+        let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap();
+        let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap();
+        let file_full_path = file.path().display().to_string();
+
+        map_and_verify(&file_full_path, StorageFileSelection::PackageMap, "./tests/package.map");
+
+        map_and_verify(&file_full_path, StorageFileSelection::FlagMap, "./tests/flag.map");
+
+        map_and_verify(&file_full_path, StorageFileSelection::FlagVal, "./tests/flag.val");
+    }
+
+    #[test]
+    fn test_map_non_read_only_file() {
+        let text_proto = r#"
+files {
+    version: 0
+    container: "system"
+    package_map: "./tests/rw.package.map"
+    flag_map: "./tests/rw.flag.map"
+    flag_val: "./tests/rw.flag.val"
+    timestamp: 12345
+}
+"#;
+        let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap();
+        let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap();
+        let file_full_path = file.path().display().to_string();
+
+        let error = map_container_storage_files(&file_full_path, "system").unwrap_err();
+        assert_eq!(
+            format!("{:?}", error),
+            "Cannot mmap file ./tests/rw.package.map as it is not read only"
+        );
+
+        let text_proto = r#"
+files {
+    version: 0
+    container: "system"
+    package_map: "./tests/package.map"
+    flag_map: "./tests/rw.flag.map"
+    flag_val: "./tests/rw.flag.val"
+    timestamp: 12345
+}
+"#;
+        let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap();
+        let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap();
+        let file_full_path = file.path().display().to_string();
+
+        let error = map_container_storage_files(&file_full_path, "system").unwrap_err();
+        assert_eq!(
+            format!("{:?}", error),
+            "Cannot mmap file ./tests/rw.flag.map as it is not read only"
+        );
+
+        let text_proto = r#"
+files {
+    version: 0
+    container: "system"
+    package_map: "./tests/package.map"
+    flag_map: "./tests/flag.map"
+    flag_val: "./tests/rw.flag.val"
+    timestamp: 12345
+}
+"#;
+        let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap();
+        let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap();
+        let file_full_path = file.path().display().to_string();
+
+        let error = map_container_storage_files(&file_full_path, "system").unwrap_err();
+        assert_eq!(
+            format!("{:?}", error),
+            "Cannot mmap file ./tests/rw.flag.val as it is not read only"
+        );
+    }
+}
diff --git a/tools/aconfig/aconfig_storage_file/src/package_table.rs b/tools/aconfig/aconfig_storage_file/src/package_table.rs
index 8fd57e6..a3ad6ec 100644
--- a/tools/aconfig/aconfig_storage_file/src/package_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/package_table.rs
@@ -69,7 +69,6 @@
     // boolean flag value array in the flag value file
     pub boolean_offset: u32,
     pub next_offset: Option<u32>,
-    pub bucket_index: u32,
 }
 
 impl PackageTableNode {
@@ -96,10 +95,16 @@
                 0 => None,
                 val => Some(val),
             },
-            bucket_index: 0,
         };
         Ok(node)
     }
+
+    /// Get the bucket index for a package table node, defined it here so the
+    /// construction side (aconfig binary) and consumption side (flag read lib)
+    /// use the same method of hashing
+    pub fn find_bucket_index(package: &str, num_buckets: u32) -> u32 {
+        get_bucket_index(&package, num_buckets)
+    }
 }
 
 /// Package table struct
@@ -135,12 +140,11 @@
             .collect();
         let nodes = (0..num_packages)
             .map(|_| {
-                let mut node = PackageTableNode::from_bytes(&bytes[head..]).unwrap();
+                let node = PackageTableNode::from_bytes(&bytes[head..])?;
                 head += node.as_bytes().len();
-                node.bucket_index = get_bucket_index(&node.package_name, num_buckets);
-                node
+                Ok(node)
             })
-            .collect();
+            .collect::<Result<Vec<_>>>()?;
 
         let table = Self { header, buckets, nodes };
         Ok(table)
@@ -166,7 +170,7 @@
     }
 
     let num_buckets = (interpreted_header.node_offset - interpreted_header.bucket_offset) / 4;
-    let bucket_index = get_bucket_index(&package, num_buckets);
+    let bucket_index = PackageTableNode::find_bucket_index(&package, num_buckets);
 
     let mut pos = (interpreted_header.bucket_offset + 4 * bucket_index) as usize;
     let mut package_node_offset = read_u32_from_bytes(buf, &mut pos)? as usize;
@@ -210,21 +214,18 @@
             package_id: 1,
             boolean_offset: 3,
             next_offset: None,
-            bucket_index: 0,
         };
         let second_node = PackageTableNode {
             package_name: String::from("com.android.aconfig.storage.test_1"),
             package_id: 0,
             boolean_offset: 0,
             next_offset: Some(158),
-            bucket_index: 3,
         };
         let third_node = PackageTableNode {
             package_name: String::from("com.android.aconfig.storage.test_4"),
             package_id: 2,
             boolean_offset: 6,
             next_offset: None,
-            bucket_index: 3,
         };
         let nodes = vec![first_node, second_node, third_node];
         Ok(PackageTable { header, buckets, nodes })
@@ -241,11 +242,8 @@
         assert_eq!(header, &reinterpreted_header.unwrap());
 
         let nodes: &Vec<PackageTableNode> = &package_table.nodes;
-        let num_buckets = crate::get_table_size(header.num_packages).unwrap();
         for node in nodes.iter() {
-            let mut reinterpreted_node = PackageTableNode::from_bytes(&node.as_bytes()).unwrap();
-            reinterpreted_node.bucket_index =
-                get_bucket_index(&reinterpreted_node.package_name, num_buckets);
+            let reinterpreted_node = PackageTableNode::from_bytes(&node.as_bytes()).unwrap();
             assert_eq!(node, &reinterpreted_node);
         }
 
diff --git a/tools/aconfig/aconfig_storage_file/src/test_utils.rs b/tools/aconfig/aconfig_storage_file/src/test_utils.rs
index e1fb6c7..c468683 100644
--- a/tools/aconfig/aconfig_storage_file/src/test_utils.rs
+++ b/tools/aconfig/aconfig_storage_file/src/test_utils.rs
@@ -14,9 +14,11 @@
  * limitations under the License.
  */
 
+use crate::protos::ProtoStorageFiles;
 use anyhow::Result;
 use protobuf::Message;
-use crate::protos::ProtoStorageFiles;
+use std::io::Write;
+use tempfile::NamedTempFile;
 
 pub fn get_binary_storage_proto_bytes(text_proto: &str) -> Result<Vec<u8>> {
     let storage_files: ProtoStorageFiles = protobuf::text_format::parse_from_str(text_proto)?;
@@ -24,3 +26,9 @@
     storage_files.write_to_vec(&mut binary_proto)?;
     Ok(binary_proto)
 }
+
+pub fn write_bytes_to_temp_file(bytes: &[u8]) -> Result<NamedTempFile> {
+    let mut file = NamedTempFile::new()?;
+    let _ = file.write_all(&bytes);
+    Ok(file)
+}
diff --git a/tools/aconfig/aconfig_storage_file/tests/flag.map b/tools/aconfig/aconfig_storage_file/tests/flag.map
new file mode 100644
index 0000000..43b6f9a
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/flag.map
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/flag.val b/tools/aconfig/aconfig_storage_file/tests/flag.val
new file mode 100644
index 0000000..f39f8d3
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/flag.val
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/package.map b/tools/aconfig/aconfig_storage_file/tests/package.map
new file mode 100644
index 0000000..8ed4767
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/package.map
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/rw.flag.map b/tools/aconfig/aconfig_storage_file/tests/rw.flag.map
new file mode 100644
index 0000000..43b6f9a
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/rw.flag.map
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/rw.flag.val b/tools/aconfig/aconfig_storage_file/tests/rw.flag.val
new file mode 100644
index 0000000..f39f8d3
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/rw.flag.val
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/rw.package.map b/tools/aconfig/aconfig_storage_file/tests/rw.package.map
new file mode 100644
index 0000000..8ed4767
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/rw.package.map
Binary files differ
diff --git a/tools/aconfig/printflags/src/main.rs b/tools/aconfig/printflags/src/main.rs
index 7fcde61..a0c9ee8 100644
--- a/tools/aconfig/printflags/src/main.rs
+++ b/tools/aconfig/printflags/src/main.rs
@@ -17,7 +17,7 @@
 //! `printflags` is a device binary to print feature flags.
 
 use aconfig_protos::ProtoFlagState as State;
-use aconfig_protos::ProtoParsedFlags as ProtoParsedFlags;
+use aconfig_protos::ProtoParsedFlags;
 use anyhow::{bail, Context, Result};
 use regex::Regex;
 use std::collections::BTreeMap;
diff --git a/tools/finalization/environment.sh b/tools/finalization/environment.sh
index d9c42c8..b2d75bb 100755
--- a/tools/finalization/environment.sh
+++ b/tools/finalization/environment.sh
@@ -23,4 +23,7 @@
 # 'rel' - branch is finalized, switched to REL
 export FINAL_STATE='unfinalized'
 
-export BUILD_FROM_SOURCE_STUB=true
\ No newline at end of file
+export BUILD_FROM_SOURCE_STUB=true
+
+# FINAL versions for VINTF
+export FINAL_BOARD_API_LEVEL=$(TARGET_RELEASE=fina_1 $top/build/soong/soong_ui.bash --dumpvar-mode RELEASE_BOARD_API_LEVEL)
diff --git a/tools/finalization/finalize-sdk-rel.sh b/tools/finalization/finalize-sdk-rel.sh
index 11fb90c..59fe28c 100755
--- a/tools/finalization/finalize-sdk-rel.sh
+++ b/tools/finalization/finalize-sdk-rel.sh
@@ -40,9 +40,6 @@
     fi
     git -C "$top/cts" mv hostsidetests/theme/assets/${FINAL_PLATFORM_CODENAME} hostsidetests/theme/assets/${FINAL_PLATFORM_SDK_VERSION}
 
-    # system/sepolicy
-    system/sepolicy/tools/finalize-sdk-rel.sh "$top" "$FINAL_PLATFORM_SDK_VERSION"
-
     # prebuilts/abi-dumps/platform
     mkdir -p "$top/prebuilts/abi-dumps/platform/$FINAL_PLATFORM_SDK_VERSION"
     cp -r "$top/prebuilts/abi-dumps/platform/current/64/" "$top/prebuilts/abi-dumps/platform/$FINAL_PLATFORM_SDK_VERSION/"
diff --git a/tools/finalization/finalize-vintf-resources.sh b/tools/finalization/finalize-vintf-resources.sh
index 88645b3..362d9f7 100755
--- a/tools/finalization/finalize-vintf-resources.sh
+++ b/tools/finalization/finalize-vintf-resources.sh
@@ -7,16 +7,15 @@
     source $top/build/make/tools/finalization/environment.sh
 
     # TODO(b/314010764): finalize LL_NDK
-    # TODO(b/314010177): finalize SELinux
+
+    # system/sepolicy
+    system/sepolicy/tools/finalize-vintf-resources.sh "$top" "$FINAL_BOARD_API_LEVEL"
 
     create_new_compat_matrix
 
     # pre-finalization build target (trunk)
     local aidl_m="$top/build/soong/soong_ui.bash --make-mode TARGET_PRODUCT=aosp_arm64 TARGET_RELEASE=trunk TARGET_BUILD_VARIANT=userdebug DIST_DIR=out/dist"
     AIDL_TRANSITIVE_FREEZE=true $aidl_m aidl-freeze-api
-
-    # build/make
-    sed -i -e "s/sepolicy_major_vers := .*/sepolicy_major_vers := ${FINAL_PLATFORM_SDK_VERSION}/g" "$top/build/make/core/config.mk"
 }
 
 function create_new_compat_matrix() {
diff --git a/tools/perf/benchmarks b/tools/perf/benchmarks
index acc53bb..ad34586 100755
--- a/tools/perf/benchmarks
+++ b/tools/perf/benchmarks
@@ -29,6 +29,7 @@
 import subprocess
 import time
 import uuid
+from typing import Optional
 
 import pretty
 import utils
@@ -80,6 +81,33 @@
     undo: callable
     "Function to revert the source tree to its previous condition in the most minimal way possible."
 
+_DUMPVARS_VARS=[
+    "COMMON_LUNCH_CHOICES",
+    "HOST_PREBUILT_TAG",
+    "print",
+    "PRODUCT_OUT",
+    "report_config",
+    "TARGET_ARCH",
+    "TARGET_BUILD_VARIANT",
+    "TARGET_DEVICE",
+    "TARGET_PRODUCT",
+]
+
+_DUMPVARS_ABS_VARS =[
+    "ANDROID_CLANG_PREBUILTS",
+    "ANDROID_JAVA_HOME",
+    "ANDROID_JAVA_TOOLCHAIN",
+    "ANDROID_PREBUILTS",
+    "HOST_OUT",
+    "HOST_OUT_EXECUTABLES",
+    "HOST_OUT_TESTCASES",
+    "OUT_DIR",
+    "print",
+    "PRODUCT_OUT",
+    "SOONG_HOST_OUT",
+    "SOONG_HOST_OUT_EXECUTABLES",
+    "TARGET_OUT_TESTCASES",
+]
 
 @dataclasses.dataclass(frozen=True)
 class Benchmark:
@@ -94,15 +122,47 @@
     change: Change
     "Source tree modification for the benchmark that will be measured"
 
-    modules: list[str]
+    dumpvars: Optional[bool] = False
+    "If specified, soong will run in dumpvars mode rather than build-mode."
+
+    modules: Optional[list[str]] = None
     "Build modules to build on soong command line"
 
-    preroll: int
+    preroll: Optional[int] = 0
     "Number of times to run the build command to stabilize"
 
-    postroll: int
+    postroll: Optional[int] = 3
     "Number of times to run the build command after reverting the action to stabilize"
 
+    def build_description(self):
+      "Short description of the benchmark's Soong invocation."
+      if self.dumpvars:
+        return "dumpvars"
+      elif self.modules:
+        return " ".join(self.modules)
+      return ""
+
+
+    def soong_command(self, root):
+      "Command line args to soong_ui for this benchmark."
+      if self.dumpvars:
+          return [
+              "--dumpvars-mode",
+              f"--vars=\"{' '.join(_DUMPVARS_VARS)}\"",
+              f"--abs-vars=\"{' '.join(_DUMPVARS_ABS_VARS)}\"",
+              "--var-prefix=var_cache_",
+              "--abs-var-prefix=abs_var_cache_",
+          ]
+      elif self.modules:
+          return [
+              "--build-mode",
+              "--all-modules",
+              f"--dir={root}",
+              "--skip-metrics-upload",
+          ] + self.modules
+      else:
+          raise Exception("Benchmark must specify dumpvars or modules")
+
 
 @dataclasses.dataclass(frozen=True)
 class FileSnapshot:
@@ -131,8 +191,14 @@
     """Remove the out directory."""
     def remove_out():
         out_dir = utils.get_out_dir()
+        #only remove actual contents, in case out is a symlink (as is the case for cog)
         if os.path.exists(out_dir):
-            shutil.rmtree(out_dir)
+          for filename in os.listdir(out_dir):
+              p = os.path.join(out_dir, filename)
+              if os.path.isfile(p) or os.path.islink(p):
+                  os.remove(p)
+              elif os.path.isdir(p):
+                  shutil.rmtree(p)
     return Change(label="Remove out", change=remove_out, undo=lambda: None)
 
 
@@ -236,6 +302,7 @@
             "id": self.benchmark.id,
             "title": self.benchmark.title,
             "modules": self.benchmark.modules,
+            "dumpvars": self.benchmark.dumpvars,
             "change": self.benchmark.change.label,
             "iteration": self.iteration,
             "log_dir": self.log_dir,
@@ -284,7 +351,7 @@
 
         # Preroll builds
         for i in range(benchmark.preroll):
-            ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"pre_{i}"), benchmark.modules)
+            ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"pre_{i}"), benchmark)
             report.preroll_duration_ns.append(ns)
 
         sys.stderr.write(f"PERFORMING CHANGE: {benchmark.change.label}\n")
@@ -293,18 +360,19 @@
         try:
 
             # Measured build
-            ns = self._run_build(lunch, benchmark_log_dir.joinpath("measured"), benchmark.modules)
+            ns = self._run_build(lunch, benchmark_log_dir.joinpath("measured"), benchmark)
             report.duration_ns = ns
 
             dist_one = self._options.DistOne()
             if dist_one:
                 # If we're disting just one benchmark, save the logs and we can stop here.
-                self._dist(utils.get_dist_dir())
+                self._dist(utils.get_dist_dir(), benchmark.dumpvars)
             else:
+                self._dist(benchmark_log_dir, benchmark.dumpvars, store_metrics_only=True)
                 # Postroll builds
-                for i in range(benchmark.preroll):
+                for i in range(benchmark.postroll):
                     ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"post_{i}"),
-                                         benchmark.modules)
+                                         benchmark)
                     report.postroll_duration_ns.append(ns)
 
         finally:
@@ -323,21 +391,17 @@
         path += ("/%0" + str(len(str(self._options.Iterations()))) + "d") % iteration
         return path
 
-    def _run_build(self, lunch, build_log_dir, modules):
+    def _run_build(self, lunch, build_log_dir, benchmark):
         """Builds the modules.  Saves interesting log files to log_dir.  Raises FatalError
         if the build fails.
         """
-        sys.stderr.write(f"STARTING BUILD {modules}\n")
+        sys.stderr.write(f"STARTING BUILD {benchmark.build_description()}\n")
 
         before_ns = time.perf_counter_ns()
         if not self._options.DryRun():
             cmd = [
                 "build/soong/soong_ui.bash",
-                "--build-mode",
-                "--all-modules",
-                f"--dir={self._options.root}",
-                "--skip-metrics-upload",
-            ] + modules
+            ] + benchmark.soong_command(self._options.root)
             env = dict(os.environ)
             env["TARGET_PRODUCT"] = lunch.target_product
             env["TARGET_RELEASE"] = lunch.target_release
@@ -351,20 +415,25 @@
 
         # TODO: Copy some log files.
 
-        sys.stderr.write(f"FINISHED BUILD {modules}\n")
+        sys.stderr.write(f"FINISHED BUILD {benchmark.build_description()}\n")
 
         return after_ns - before_ns
 
-    def _dist(self, dist_dir):
+    def _dist(self, dist_dir, dumpvars, store_metrics_only=False):
         out_dir = utils.get_out_dir()
         dest_dir = dist_dir.joinpath("logs")
         os.makedirs(dest_dir, exist_ok=True)
         basenames = [
-            "build.trace.gz",
-            "soong.log",
             "soong_build_metrics.pb",
             "soong_metrics",
         ]
+        if not store_metrics_only:
+            basenames.extend([
+                "build.trace.gz",
+                "soong.log",
+            ])
+        if dumpvars:
+            basenames = ['dumpvars-'+b for b in basenames]
         for base in basenames:
             src = out_dir.joinpath(base)
             if src.exists():
@@ -387,7 +456,7 @@
 
 def benchmark_table(benchmarks):
     rows = [("ID", "DESCRIPTION", "REBUILD"),]
-    rows += [(benchmark.id, benchmark.title, " ".join(benchmark.modules)) for benchmark in
+    rows += [(benchmark.id, benchmark.title, benchmark.build_description()) for benchmark in
              benchmarks]
     return rows
 
@@ -571,6 +640,22 @@
         """Initialize the list of benchmarks."""
         # Assumes that we've already chdired to the root of the tree.
         self._benchmarks = [
+            Benchmark(
+                      id="full_lunch",
+                      title="Lunch from clean out",
+                      change=Clean(),
+                      dumpvars=True,
+                      preroll=0,
+                      postroll=0,
+            ),
+            Benchmark(
+                      id="noop_lunch",
+                      title="Lunch with no change",
+                      change=NoChange(),
+                      dumpvars=True,
+                      preroll=1,
+                      postroll=0,
+            ),
             Benchmark(id="full",
                       title="Full build",
                       change=Clean(),
diff --git a/tools/perf/utils.py b/tools/perf/utils.py
index 934130d..0e66d4c 100644
--- a/tools/perf/utils.py
+++ b/tools/perf/utils.py
@@ -19,9 +19,11 @@
 
 def get_root():
     top_dir = os.environ.get("ANDROID_BUILD_TOP")
-    if top_dir:
-        return pathlib.Path(top_dir).resolve()
     d = pathlib.Path.cwd()
+    # with cog, someone may have a new workspace and new source tree top, but
+    # not run lunch yet, resulting in a misleading ANDROID_BUILD_TOP value
+    if top_dir and d.is_relative_to(top_dir):
+        return pathlib.Path(top_dir).resolve()
     while True:
         if d.joinpath("build", "soong", "soong_ui.bash").exists():
             return d.resolve().absolute()
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index 32829c1..6f429fa 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -17,6 +17,8 @@
 import argparse
 import sys
 
+from uffd_gc_utils import should_enable_uffd_gc
+
 # Usage: post_process_props.py file.prop [disallowed_key, ...]
 # Disallowed keys are removed from the property file, if present
 
@@ -27,7 +29,7 @@
 
 # Put the modifications that you need to make into the */build.prop into this
 # function.
-def mangle_build_prop(prop_list):
+def mangle_build_prop(prop_list, kernel_version_file_for_uffd_gc):
   # If ro.debuggable is 1, then enable adb on USB by default
   # (this is for userdebug builds)
   if prop_list.get_value("ro.debuggable") == "1":
@@ -38,6 +40,11 @@
       else:
         val = val + ",adb"
       prop_list.put("persist.sys.usb.config", val)
+  if prop_list.get_value("ro.dalvik.vm.enable_uffd_gc") == "default":
+    assert kernel_version_file_for_uffd_gc != ""
+    enable_uffd_gc = should_enable_uffd_gc(kernel_version_file_for_uffd_gc)
+    prop_list.put("ro.dalvik.vm.enable_uffd_gc",
+                  "true" if enable_uffd_gc else "false")
 
 def validate_grf_props(prop_list):
   """Validate GRF properties if exist.
@@ -233,6 +240,7 @@
   parser.add_argument("filename")
   parser.add_argument("disallowed_keys", metavar="KEY", type=str, nargs="*")
   parser.add_argument("--sdk-version", type=int, required=True)
+  parser.add_argument("--kernel-version-file-for-uffd-gc", required=True)
   args = parser.parse_args()
 
   if not args.filename.endswith("/build.prop"):
@@ -240,7 +248,7 @@
     sys.exit(1)
 
   props = PropList(args.filename)
-  mangle_build_prop(props)
+  mangle_build_prop(props, args.kernel_version_file_for_uffd_gc)
   if not override_optional_props(props, args.allow_dup):
     sys.exit(1)
   if not validate_grf_props(props):