Merge "Revert "Revert "Add system staging dir stamp file for bazel sand..."" into main
diff --git a/core/Makefile b/core/Makefile
index d23ab19..07d705e 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -4211,6 +4211,13 @@
 
 ifneq ($(strip $(BOARD_CUSTOMIMAGES_PARTITION_LIST)),)
 INTERNAL_AVB_CUSTOMIMAGES_SIGNING_ARGS :=
+BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST :=
+# If BOARD_AVB_$(call to-upper,$(partition))_KEY_PATH is set, the image will be included in
+# BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST, otherwise the image won't be AVB signed.
+$(foreach partition,$(BOARD_CUSTOMIMAGES_PARTITION_LIST), \
+	$(if $(BOARD_AVB_$(call to-upper,$(partition))_KEY_PATH), \
+	$(eval BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST += $(partition)) \
+	$(eval BOARD_$(call to-upper,$(partition))_IMAGE_LIST := $(BOARD_AVB_$(call to-upper,$(partition))_IMAGE_LIST))))
 
 # Sign custom image.
 # $(1): the prebuilt custom image.
@@ -4235,9 +4242,26 @@
 INSTALLED_CUSTOMIMAGES_TARGET += $(3)
 endef
 
-$(foreach partition,$(BOARD_CUSTOMIMAGES_PARTITION_LIST), \
+# Copy unsigned custom image.
+# $(1): the prebuilt custom image.
+# $(2): the signed custom image target.
+define copy_custom_image
+$(2): $(1) $(INTERNAL_USERIMAGES_DEPS)
+	@echo Target custom image: $(2)
+	mkdir -p $(dir $(2))
+	cp $(1) $(2)
+INSTALLED_CUSTOMIMAGES_TARGET += $(2)
+endef
+
+# Add AVB custom image to droid target
+$(foreach partition,$(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST), \
   $(foreach image,$(BOARD_AVB_$(call to-upper,$(partition))_IMAGE_LIST), \
      $(eval $(call sign_custom_image,$(image),$(partition),$(PRODUCT_OUT)/$(notdir $(image))))))
+
+# Add unsigned custom image to droid target
+$(foreach partition,$(filter-out $(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST), $(BOARD_CUSTOMIMAGES_PARTITION_LIST)), \
+  $(foreach image,$(BOARD_$(call to-upper,$(partition))_IMAGE_LIST), \
+     $(eval $(call copy_custom_image,$(image),$(PRODUCT_OUT)/$(notdir $(image))))))
 endif
 
 # -----------------------------------------------------------------
@@ -4514,7 +4538,9 @@
 $(eval part := $(1))
 $(eval PART=$(call to-upper,$(part)))
 $(eval _rollback_index_location := BOARD_AVB_$(PART)_ROLLBACK_INDEX_LOCATION)
+$(eval _key_path := BOARD_AVB_$(PART)_KEY_PATH)
 $(if $($(_rollback_index_location)),,$(error $(_rollback_index_location) is not defined))
+$(if $($(_key_path)),,$(error $(_key_path) is not defined))
 
 INTERNAL_AVB_MAKE_VBMETA_IMAGE_ARGS += \
     --chain_partition $(part):$($(_rollback_index_location)):$(AVB_CHAIN_KEY_DIR)/$(part).avbpubkey
@@ -4594,8 +4620,8 @@
 $(foreach partition,$(BOARD_AVB_VBMETA_CUSTOM_PARTITIONS),$(eval BOARD_AVB_MAKE_VBMETA_$(call to-upper,$(partition))_IMAGE_ARGS += --padding_size 4096))
 endif
 
-ifneq ($(strip $(BOARD_CUSTOMIMAGES_PARTITION_LIST)),)
-$(foreach partition,$(BOARD_CUSTOMIMAGES_PARTITION_LIST), \
+ifneq ($(strip $(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST)),)
+$(foreach partition,$(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST), \
     $(eval $(call check-and-set-custom-avb-chain-args,$(partition))))
 endif
 
@@ -4682,8 +4708,8 @@
   $(if $(BOARD_AVB_VBMETA_VENDOR_KEY_PATH),\
     $(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_VBMETA_VENDOR_KEY_PATH) \
         --output $(1)/vbmeta_vendor.avbpubkey)
-  $(if $(BOARD_CUSTOMIMAGES_PARTITION_LIST),\
-    $(hide) $(foreach partition,$(BOARD_CUSTOMIMAGES_PARTITION_LIST), \
+  $(if $(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST),\
+    $(hide) $(foreach partition,$(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST), \
         $(AVBTOOL) extract_public_key --key $(BOARD_AVB_$(call to-upper,$(partition))_KEY_PATH) \
             --output $(1)/$(partition).avbpubkey;)) \
   $(if $(BOARD_AVB_VBMETA_CUSTOM_PARTITIONS),\
@@ -5603,15 +5629,20 @@
 	$(hide) echo "avb_recovery_rollback_index_location=$(BOARD_AVB_RECOVERY_ROLLBACK_INDEX_LOCATION)" >> $@
 endif # BOARD_AVB_RECOVERY_KEY_PATH
 ifneq (,$(strip $(BOARD_CUSTOMIMAGES_PARTITION_LIST)))
-	$(hide) echo "avb_custom_images_partition_list=$(BOARD_CUSTOMIMAGES_PARTITION_LIST)" >> $@
-	$(hide) $(foreach partition,$(BOARD_CUSTOMIMAGES_PARTITION_LIST), \
+	$(hide) echo "custom_images_partition_list=$(filter-out $(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST), $(BOARD_CUSTOMIMAGES_PARTITION_LIST))" >> $@
+	$(hide) $(foreach partition,$(filter-out $(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST), $(BOARD_CUSTOMIMAGES_PARTITION_LIST)), \
+	    echo "$(partition)_image_list=$(foreach image,$(BOARD_$(call to-upper,$(partition))_IMAGE_LIST),$(notdir $(image)))" >> $@;)
+endif # BOARD_CUSTOMIMAGES_PARTITION_LIST
+ifneq (,$(strip $(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST)))
+	$(hide) echo "avb_custom_images_partition_list=$(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST)" >> $@
+	$(hide) $(foreach partition,$(BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST), \
 	    echo "avb_$(partition)_key_path=$(BOARD_AVB_$(call to-upper,$(partition))_KEY_PATH)"  >> $@; \
 	    echo "avb_$(partition)_algorithm=$(BOARD_AVB_$(call to-upper,$(partition))_ALGORITHM)"  >> $@; \
 	    echo "avb_$(partition)_add_hashtree_footer_args=$(BOARD_AVB_$(call to-upper,$(partition))_ADD_HASHTREE_FOOTER_ARGS)"  >> $@; \
 	    echo "avb_$(partition)_rollback_index_location=$(BOARD_AVB_$(call to-upper,$(partition))_ROLLBACK_INDEX_LOCATION)"  >> $@; \
 	    echo "avb_$(partition)_partition_size=$(BOARD_AVB_$(call to-upper,$(partition))_PARTITION_SIZE)"  >> $@; \
 	    echo "avb_$(partition)_image_list=$(foreach image,$(BOARD_AVB_$(call to-upper,$(partition))_IMAGE_LIST),$(notdir $(image)))" >> $@;)
-endif # BOARD_CUSTOMIMAGES_PARTITION_LIST
+endif # BOARD_AVB_CUSTOMIMAGES_PARTITION_LIST
 ifneq (,$(strip $(BOARD_AVB_VBMETA_SYSTEM)))
 	$(hide) echo "avb_vbmeta_system=$(BOARD_AVB_VBMETA_SYSTEM)" >> $@
 	$(hide) echo "avb_vbmeta_system_args=$(BOARD_AVB_MAKE_VBMETA_SYSTEM_IMAGE_ARGS)" >> $@
@@ -6434,7 +6465,7 @@
 ifneq ($(strip $(BOARD_CUSTOMIMAGES_PARTITION_LIST)),)
 	$(hide) mkdir -p $(zip_root)/PREBUILT_IMAGES
 	$(hide) $(foreach partition,$(BOARD_CUSTOMIMAGES_PARTITION_LIST), \
-	    $(foreach image,$(BOARD_AVB_$(call to-upper,$(partition))_IMAGE_LIST),cp $(image) $(zip_root)/PREBUILT_IMAGES/;))
+	    $(foreach image,$(BOARD_$(call to-upper,$(partition))_IMAGE_LIST),cp $(image) $(zip_root)/PREBUILT_IMAGES/;))
 endif # BOARD_CUSTOMIMAGES_PARTITION_LIST
 	@# The radio images in BOARD_PACK_RADIOIMAGES will be additionally copied from RADIO/ into
 	@# IMAGES/, which then will be added into <product>-img.zip. Such images must be listed in
diff --git a/core/config.mk b/core/config.mk
index 920e457..e919be3 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -436,11 +436,25 @@
   TARGET_MAX_PAGE_SIZE_SUPPORTED := 4096
   # When VSR vendor API level >= 34, binary alignment will be 65536.
   ifeq ($(call math_gt_or_eq,$(vsr_vendor_api_level),34),true)
+    ifeq ($(TARGET_ARCH),arm64)
       TARGET_MAX_PAGE_SIZE_SUPPORTED := 65536
+    endif
+    ifeq ($(TARGET_ARCH),arm)
+      TARGET_MAX_PAGE_SIZE_SUPPORTED := 65536
+    endif
   endif
 endif
 .KATI_READONLY := TARGET_MAX_PAGE_SIZE_SUPPORTED
 
+# Check that TARGET_MAX_PAGE_SIZE_SUPPORTED is greater than 4096 only for ARM arch.
+ifneq ($(TARGET_MAX_PAGE_SIZE_SUPPORTED),4096)
+  ifneq ($(TARGET_ARCH),arm64)
+    ifneq ($(TARGET_ARCH),arm)
+      $(error TARGET_MAX_PAGE_SIZE_SUPPORTED=$(TARGET_MAX_PAGE_SIZE_SUPPORTED) is greater than 4096. Only supported in ARM arch)
+    endif
+  endif
+endif
+
 # Boolean variable determining if AOSP is page size agnostic. This means
 # that AOSP can use a kernel configured with 4k/16k/64k PAGE SIZES.
 TARGET_PAGE_SIZE_AGNOSTIC := false
diff --git a/core/soong_config.mk b/core/soong_config.mk
index f150660..bd6cfbb 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -40,6 +40,7 @@
 $(call add_json_str,  Platform_base_os,                  $(PLATFORM_BASE_OS))
 $(call add_json_str,  Platform_version_last_stable,      $(PLATFORM_VERSION_LAST_STABLE))
 $(call add_json_str,  Platform_version_known_codenames,  $(PLATFORM_VERSION_KNOWN_CODENAMES))
+$(call add_json_bool, Release_aidl_use_unfrozen,         $(RELEASE_AIDL_USE_UNFROZEN))
 
 $(call add_json_str,  Platform_min_supported_target_sdk_version, $(PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION))
 
diff --git a/tools/aconfig/src/commands.rs b/tools/aconfig/src/commands.rs
index 4f0a706..bd09e24 100644
--- a/tools/aconfig/src/commands.rs
+++ b/tools/aconfig/src/commands.rs
@@ -35,8 +35,15 @@
 impl Input {
     fn try_parse_flags(&mut self) -> Result<ProtoParsedFlags> {
         let mut buffer = Vec::new();
-        self.reader.read_to_end(&mut buffer)?;
+        self.reader
+            .read_to_end(&mut buffer)
+            .with_context(|| format!("failed to read {}", self.source))?;
         crate::protos::parsed_flags::try_from_binary_proto(&buffer)
+            .with_context(|| self.error_context())
+    }
+
+    fn error_context(&self) -> String {
+        format!("failed to parse {}", self.source)
     }
 }
 
@@ -53,20 +60,23 @@
 
     for mut input in declarations {
         let mut contents = String::new();
-        input.reader.read_to_string(&mut contents)?;
+        input
+            .reader
+            .read_to_string(&mut contents)
+            .with_context(|| format!("failed to read {}", input.source))?;
 
         let flag_declarations = crate::protos::flag_declarations::try_from_text_proto(&contents)
-            .with_context(|| format!("Failed to parse {}", input.source))?;
+            .with_context(|| input.error_context())?;
         ensure!(
             package == flag_declarations.package(),
-            "Failed to parse {}: expected package {}, got {}",
+            "failed to parse {}: expected package {}, got {}",
             input.source,
             package,
             flag_declarations.package()
         );
         for mut flag_declaration in flag_declarations.flag.into_iter() {
             crate::protos::flag_declaration::verify_fields(&flag_declaration)
-                .with_context(|| format!("Failed to parse {}", input.source))?;
+                .with_context(|| input.error_context())?;
 
             // create ParsedFlag using FlagDeclaration and default values
             let mut parsed_flag = ProtoParsedFlag::new();
@@ -101,12 +111,15 @@
 
     for mut input in values {
         let mut contents = String::new();
-        input.reader.read_to_string(&mut contents)?;
+        input
+            .reader
+            .read_to_string(&mut contents)
+            .with_context(|| format!("failed to read {}", input.source))?;
         let flag_values = crate::protos::flag_values::try_from_text_proto(&contents)
-            .with_context(|| format!("Failed to parse {}", input.source))?;
+            .with_context(|| input.error_context())?;
         for flag_value in flag_values.flag_value.into_iter() {
             crate::protos::flag_value::verify_fields(&flag_value)
-                .with_context(|| format!("Failed to parse {}", input.source))?;
+                .with_context(|| input.error_context())?;
 
             let Some(parsed_flag) = parsed_flags
                 .parsed_flag
diff --git a/tools/aconfig/src/main.rs b/tools/aconfig/src/main.rs
index 151cbe8..920b761 100644
--- a/tools/aconfig/src/main.rs
+++ b/tools/aconfig/src/main.rs
@@ -16,7 +16,7 @@
 
 //! `aconfig` is a build time tool to manage build time configurations, such as feature flags.
 
-use anyhow::{anyhow, bail, ensure, Result};
+use anyhow::{anyhow, bail, Context, Result};
 use clap::{builder::ArgAction, builder::EnumValueParser, Arg, ArgMatches, Command};
 use core::any::Any;
 use std::fs;
@@ -129,26 +129,27 @@
 }
 
 fn write_output_file_realtive_to_dir(root: &Path, output_file: &OutputFile) -> Result<()> {
-    ensure!(
-        root.is_dir(),
-        "output directory {} does not exist or is not a directory",
-        root.display()
-    );
     let path = root.join(output_file.path.clone());
     let parent = path
         .parent()
         .ok_or(anyhow!("unable to locate parent of output file {}", path.display()))?;
-    fs::create_dir_all(parent)?;
-    let mut file = fs::File::create(path)?;
-    file.write_all(&output_file.contents)?;
+    fs::create_dir_all(parent)
+        .with_context(|| format!("failed to create directory {}", parent.display()))?;
+    let mut file = fs::File::create(path.clone())
+        .with_context(|| format!("failed to open {}", path.display()))?;
+    file.write_all(&output_file.contents)
+        .with_context(|| format!("failed to write to {}", path.display()))?;
     Ok(())
 }
 
 fn write_output_to_file_or_stdout(path: &str, data: &[u8]) -> Result<()> {
     if path == "-" {
-        io::stdout().write_all(data)?;
+        io::stdout().write_all(data).context("failed to write to stdout")?;
     } else {
-        fs::File::create(path)?.write_all(data)?;
+        fs::File::create(path)
+            .with_context(|| format!("failed to open {}", path))?
+            .write_all(data)
+            .with_context(|| format!("failed to write to {}", path))?;
     }
     Ok(())
 }
@@ -160,14 +161,16 @@
             let package = get_required_arg::<String>(sub_matches, "package")?;
             let declarations = open_zero_or_more_files(sub_matches, "declarations")?;
             let values = open_zero_or_more_files(sub_matches, "values")?;
-            let output = commands::parse_flags(package, declarations, values)?;
+            let output = commands::parse_flags(package, declarations, values)
+                .context("failed to create cache")?;
             let path = get_required_arg::<String>(sub_matches, "cache")?;
             write_output_to_file_or_stdout(path, &output)?;
         }
         Some(("create-java-lib", sub_matches)) => {
             let cache = open_single_file(sub_matches, "cache")?;
             let mode = get_required_arg::<CodegenMode>(sub_matches, "mode")?;
-            let generated_files = commands::create_java_lib(cache, *mode)?;
+            let generated_files =
+                commands::create_java_lib(cache, *mode).context("failed to create java lib")?;
             let dir = PathBuf::from(get_required_arg::<String>(sub_matches, "out")?);
             generated_files
                 .iter()
@@ -176,7 +179,8 @@
         Some(("create-cpp-lib", sub_matches)) => {
             let cache = open_single_file(sub_matches, "cache")?;
             let mode = get_required_arg::<CodegenMode>(sub_matches, "mode")?;
-            let generated_files = commands::create_cpp_lib(cache, *mode)?;
+            let generated_files =
+                commands::create_cpp_lib(cache, *mode).context("failed to create cpp lib")?;
             let dir = PathBuf::from(get_required_arg::<String>(sub_matches, "out")?);
             generated_files
                 .iter()
@@ -185,25 +189,29 @@
         Some(("create-rust-lib", sub_matches)) => {
             let cache = open_single_file(sub_matches, "cache")?;
             let mode = get_required_arg::<CodegenMode>(sub_matches, "mode")?;
-            let generated_file = commands::create_rust_lib(cache, *mode)?;
+            let generated_file =
+                commands::create_rust_lib(cache, *mode).context("failed to create rust lib")?;
             let dir = PathBuf::from(get_required_arg::<String>(sub_matches, "out")?);
             write_output_file_realtive_to_dir(&dir, &generated_file)?;
         }
         Some(("create-device-config-defaults", sub_matches)) => {
             let cache = open_single_file(sub_matches, "cache")?;
-            let output = commands::create_device_config_defaults(cache)?;
+            let output = commands::create_device_config_defaults(cache)
+                .context("failed to create device config defaults")?;
             let path = get_required_arg::<String>(sub_matches, "out")?;
             write_output_to_file_or_stdout(path, &output)?;
         }
         Some(("create-device-config-sysprops", sub_matches)) => {
             let cache = open_single_file(sub_matches, "cache")?;
-            let output = commands::create_device_config_sysprops(cache)?;
+            let output = commands::create_device_config_sysprops(cache)
+                .context("failed to create device config sysprops")?;
             let path = get_required_arg::<String>(sub_matches, "out")?;
             write_output_to_file_or_stdout(path, &output)?;
         }
         Some(("dump", sub_matches)) => {
             let input = open_zero_or_more_files(sub_matches, "cache")?;
-            let format = get_required_arg::<DumpFormat>(sub_matches, "format")?;
+            let format = get_required_arg::<DumpFormat>(sub_matches, "format")
+                .context("failed to dump previously parsed flags")?;
             let output = commands::dump_parsed_flags(input, *format)?;
             let path = get_required_arg::<String>(sub_matches, "out")?;
             write_output_to_file_or_stdout(path, &output)?;
diff --git a/tools/aconfig/src/protos.rs b/tools/aconfig/src/protos.rs
index 4ddada7..2ab6e05 100644
--- a/tools/aconfig/src/protos.rs
+++ b/tools/aconfig/src/protos.rs
@@ -92,8 +92,7 @@
         ensure!(codegen::is_valid_name_ident(pdf.name()), "bad flag declaration: bad name");
         ensure!(codegen::is_valid_name_ident(pdf.namespace()), "bad flag declaration: bad name");
         ensure!(!pdf.description().is_empty(), "bad flag declaration: empty description");
-
-        // ProtoFlagDeclaration.bug: Vec<String>: may be empty, no checks needed
+        ensure!(pdf.bug.len() == 1, "bad flag declaration: exactly one bug required");
 
         Ok(())
     }
@@ -195,8 +194,7 @@
         for tp in pf.trace.iter() {
             super::tracepoint::verify_fields(tp)?;
         }
-
-        // ProtoParsedFlag.bug: Vec<String>: may be empty, no checks needed
+        ensure!(pf.bug.len() == 1, "bad flag declaration: exactly one bug required");
 
         Ok(())
     }
@@ -279,12 +277,12 @@
     namespace: "first_ns"
     description: "This is the description of the first flag."
     bug: "123"
-    bug: "abc"
 }
 flag {
     name: "second"
     namespace: "second_ns"
     description: "This is the description of the second flag."
+    bug: "abc"
 }
 "#,
         )
@@ -294,14 +292,12 @@
         assert_eq!(first.name(), "first");
         assert_eq!(first.namespace(), "first_ns");
         assert_eq!(first.description(), "This is the description of the first flag.");
-        assert_eq!(first.bug.len(), 2);
-        assert_eq!(first.bug[0], "123");
-        assert_eq!(first.bug[1], "abc");
+        assert_eq!(first.bug, vec!["123"]);
         let second = flag_declarations.flag.iter().find(|pf| pf.name() == "second").unwrap();
         assert_eq!(second.name(), "second");
         assert_eq!(second.namespace(), "second_ns");
         assert_eq!(second.description(), "This is the description of the second flag.");
-        assert_eq!(second.bug.len(), 0);
+        assert_eq!(second.bug, vec!["abc"]);
 
         // bad input: missing package in flag declarations
         let error = flag_declarations::try_from_text_proto(
@@ -376,6 +372,36 @@
         )
         .unwrap_err();
         assert!(format!("{:?}", error).contains("bad flag declaration: bad name"));
+
+        // bad input: no bug entries in flag declaration
+        let error = flag_declarations::try_from_text_proto(
+            r#"
+package: "com.foo.bar"
+flag {
+    name: "first"
+    namespace: "first_ns"
+    description: "This is the description of the first flag."
+}
+"#,
+        )
+        .unwrap_err();
+        assert!(format!("{:?}", error).contains("bad flag declaration: exactly one bug required"));
+
+        // bad input: multiple bug entries in flag declaration
+        let error = flag_declarations::try_from_text_proto(
+            r#"
+package: "com.foo.bar"
+flag {
+    name: "first"
+    namespace: "first_ns"
+    description: "This is the description of the first flag."
+    bug: "123"
+    bug: "abc"
+}
+"#,
+        )
+        .unwrap_err();
+        assert!(format!("{:?}", error).contains("bad flag declaration: exactly one bug required"));
     }
 
     #[test]
@@ -482,6 +508,7 @@
     name: "first"
     namespace: "first_ns"
     description: "This is the description of the first flag."
+    bug: "SOME_BUG"
     state: DISABLED
     permission: READ_ONLY
     trace {
@@ -495,6 +522,7 @@
     name: "second"
     namespace: "second_ns"
     description: "This is the description of the second flag."
+    bug: "SOME_BUG"
     state: ENABLED
     permission: READ_WRITE
     trace {
@@ -516,6 +544,7 @@
         assert_eq!(second.name(), "second");
         assert_eq!(second.namespace(), "second_ns");
         assert_eq!(second.description(), "This is the description of the second flag.");
+        assert_eq!(second.bug, vec!["SOME_BUG"]);
         assert_eq!(second.state(), ProtoFlagState::ENABLED);
         assert_eq!(second.permission(), ProtoFlagPermission::READ_WRITE);
         assert_eq!(2, second.trace.len());
@@ -569,6 +598,7 @@
     name: "first"
     namespace: "first_ns"
     description: "This is the description of the first flag."
+    bug: ""
     state: DISABLED
     permission: READ_ONLY
     trace {
@@ -582,6 +612,7 @@
     name: "second"
     namespace: "second_ns"
     description: "This is the description of the second flag."
+    bug: ""
     state: ENABLED
     permission: READ_WRITE
     trace {
@@ -604,6 +635,7 @@
     name: "bbb"
     namespace: "first_ns"
     description: "This is the description of the first flag."
+    bug: ""
     state: DISABLED
     permission: READ_ONLY
     trace {
@@ -617,6 +649,7 @@
     name: "aaa"
     namespace: "second_ns"
     description: "This is the description of the second flag."
+    bug: ""
     state: ENABLED
     permission: READ_WRITE
     trace {
@@ -639,6 +672,7 @@
     name: "bar"
     namespace: "first_ns"
     description: "This is the description of the first flag."
+    bug: ""
     state: DISABLED
     permission: READ_ONLY
     trace {
@@ -652,6 +686,7 @@
     name: "bar"
     namespace: "second_ns"
     description: "This is the description of the second flag."
+    bug: ""
     state: ENABLED
     permission: READ_WRITE
     trace {
@@ -673,6 +708,7 @@
     name: "bar"
     namespace: "first_ns"
     description: "This is the description of the first flag."
+    bug: "b/12345678"
     state: DISABLED
     permission: READ_ONLY
     trace {
@@ -703,6 +739,7 @@
     name: "first"
     namespace: "first_ns"
     description: "This is the description of the first flag."
+    bug: "a"
     state: DISABLED
     permission: READ_ONLY
     trace {
@@ -716,6 +753,7 @@
     name: "second"
     namespace: "second_ns"
     description: "This is the description of the second flag."
+    bug: "b"
     state: ENABLED
     permission: READ_WRITE
     trace {
@@ -733,6 +771,7 @@
     name: "first"
     namespace: "first_ns"
     description: "This is the description of the first flag."
+    bug: "a"
     state: DISABLED
     permission: READ_ONLY
     trace {
@@ -749,6 +788,7 @@
     package: "com.second"
     name: "second"
     namespace: "second_ns"
+    bug: "b"
     description: "This is the description of the second flag."
     state: ENABLED
     permission: READ_WRITE
diff --git a/tools/aconfig/src/test.rs b/tools/aconfig/src/test.rs
index 04bbe28..14beb93 100644
--- a/tools/aconfig/src/test.rs
+++ b/tools/aconfig/src/test.rs
@@ -61,7 +61,6 @@
   name: "enabled_ro"
   namespace: "aconfig_test"
   description: "This flag is ENABLED + READ_ONLY"
-  bug: "789"
   bug: "abc"
   state: ENABLED
   permission: READ_ONLY
@@ -86,6 +85,7 @@
   name: "enabled_rw"
   namespace: "aconfig_test"
   description: "This flag is ENABLED + READ_WRITE"
+  bug: ""
   state: ENABLED
   permission: READ_WRITE
   trace {
diff --git a/tools/aconfig/tests/test.aconfig b/tools/aconfig/tests/test.aconfig
index d7ac919..46cf1e9 100644
--- a/tools/aconfig/tests/test.aconfig
+++ b/tools/aconfig/tests/test.aconfig
@@ -8,7 +8,6 @@
     name: "enabled_ro"
     namespace: "aconfig_test"
     description: "This flag is ENABLED + READ_ONLY"
-    bug: "789"
     bug: "abc"
 }
 
@@ -19,7 +18,8 @@
     name: "enabled_rw"
     namespace: "aconfig_test"
     description: "This flag is ENABLED + READ_WRITE"
-    # no bug field: bug is not mandatory
+    # for bug fields, the empty string is a discouraged but valid value
+    bug: ""
 }
 
 # This flag's final value is calculated from:
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index f29d801..31f8736 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -517,12 +517,14 @@
   return img.name
 
 
-def AddCustomImages(output_zip, partition_name):
-  """Adds and signs custom images in IMAGES/.
+def AddCustomImages(output_zip, partition_name, image_list):
+  """Adds and signs avb custom images as needed in IMAGES/.
 
   Args:
     output_zip: The output zip file (needs to be already open), or None to
         write images to OPTIONS.input_tmp/.
+    partition_name: The custom image partition name.
+    image_list: The image list of the custom image partition.
 
   Uses the image under IMAGES/ if it already exists. Otherwise looks for the
   image under PREBUILT_IMAGES/, signs it as needed, and returns the image name.
@@ -531,19 +533,20 @@
     AssertionError: If image can't be found.
   """
 
+  builder = None
   key_path = OPTIONS.info_dict.get("avb_{}_key_path".format(partition_name))
-  algorithm = OPTIONS.info_dict.get("avb_{}_algorithm".format(partition_name))
-  extra_args = OPTIONS.info_dict.get(
-      "avb_{}_add_hashtree_footer_args".format(partition_name))
-  partition_size = OPTIONS.info_dict.get(
-      "avb_{}_partition_size".format(partition_name))
+  if key_path is not None:
+    algorithm = OPTIONS.info_dict.get("avb_{}_algorithm".format(partition_name))
+    extra_args = OPTIONS.info_dict.get(
+        "avb_{}_add_hashtree_footer_args".format(partition_name))
+    partition_size = OPTIONS.info_dict.get(
+        "avb_{}_partition_size".format(partition_name))
 
-  builder = verity_utils.CreateCustomImageBuilder(
-      OPTIONS.info_dict, partition_name, partition_size,
-      key_path, algorithm, extra_args)
+    builder = verity_utils.CreateCustomImageBuilder(
+        OPTIONS.info_dict, partition_name, partition_size,
+        key_path, algorithm, extra_args)
 
-  for img_name in OPTIONS.info_dict.get(
-          "avb_{}_image_list".format(partition_name)).split():
+  for img_name in image_list:
     custom_image = OutputFile(
         output_zip, OPTIONS.input_tmp, "IMAGES", img_name)
     if os.path.exists(custom_image.name):
@@ -1066,18 +1069,29 @@
 
   # Custom images.
   custom_partitions = OPTIONS.info_dict.get(
-      "avb_custom_images_partition_list", "").strip().split()
+      "custom_images_partition_list", "").strip().split()
   for partition_name in custom_partitions:
     partition_name = partition_name.strip()
     banner("custom images for " + partition_name)
-    partitions[partition_name] = AddCustomImages(output_zip, partition_name)
+    image_list = OPTIONS.info_dict.get(
+          "{}_image_list".format(partition_name)).split()
+    partitions[partition_name] = AddCustomImages(output_zip, partition_name, image_list)
+
+  avb_custom_partitions = OPTIONS.info_dict.get(
+      "avb_custom_images_partition_list", "").strip().split()
+  for partition_name in avb_custom_partitions:
+    partition_name = partition_name.strip()
+    banner("avb custom images for " + partition_name)
+    image_list = OPTIONS.info_dict.get(
+          "avb_{}_image_list".format(partition_name)).split()
+    partitions[partition_name] = AddCustomImages(output_zip, partition_name, image_list)
 
   if OPTIONS.info_dict.get("avb_enable") == "true":
     # vbmeta_partitions includes the partitions that should be included into
     # top-level vbmeta.img, which are the ones that are not included in any
     # chained VBMeta image plus the chained VBMeta images themselves.
-    # Currently custom_partitions are all chained to VBMeta image.
-    vbmeta_partitions = common.AVB_PARTITIONS[:] + tuple(custom_partitions)
+    # Currently avb_custom_partitions are all chained to VBMeta image.
+    vbmeta_partitions = common.AVB_PARTITIONS[:] + tuple(avb_custom_partitions)
 
     vbmeta_system = OPTIONS.info_dict.get("avb_vbmeta_system", "").strip()
     if vbmeta_system:
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 1f021e0..64970d9 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -96,7 +96,6 @@
     self.cache_size = None
     self.stash_threshold = 0.8
     self.logfile = None
-    self.host_tools = {}
     self.sepolicy_name = 'sepolicy.apex'
 
 
@@ -225,23 +224,15 @@
   logging.config.dictConfig(config)
 
 
-def SetHostToolLocation(tool_name, location):
-  OPTIONS.host_tools[tool_name] = location
-
-
 def FindHostToolPath(tool_name):
   """Finds the path to the host tool.
 
   Args:
     tool_name: name of the tool to find
   Returns:
-    path to the tool if found under either one of the host_tools map or under
-    the same directory as this binary is located at. If not found, tool_name
-    is returned.
+    path to the tool if found under the same directory as this binary is located at. If not found,
+    tool_name is returned.
   """
-  if tool_name in OPTIONS.host_tools:
-    return OPTIONS.host_tools[tool_name]
-
   my_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
   tool_path = os.path.join(my_dir, tool_name)
   if os.path.exists(tool_path):
@@ -1429,7 +1420,7 @@
       if os.path.exists(new_path):
         return new_path
     raise ExternalError(
-        "Failed to find {}".format(new_path))
+        "Failed to find {}".format(path))
 
   if not split_args:
     return split_args
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index f3e6f1e..c73dd97 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -147,7 +147,7 @@
 A/B OTA specific options
 
   --disable_fec_computation
-      Disable the on device FEC data computation for incremental updates.
+      Disable the on device FEC data computation for incremental updates. OTA will be larger but installation will be faster.
 
   --include_secondary
       Additionally include the payload for secondary slot images (default:
@@ -224,7 +224,7 @@
       wait time in recovery.
 
   --enable_vabc_xor
-      Enable the VABC xor feature. Will reduce space requirements for OTA
+      Enable the VABC xor feature. Will reduce space requirements for OTA, but OTA installation will be slower.
 
   --force_minor_version
       Override the update_engine minor version for delta generation.
@@ -233,7 +233,10 @@
       A colon ':' separated list of compressors. Allowed values are bz2 and brotli.
 
   --enable_zucchini
-      Whether to enable to zucchini feature. Will generate smaller OTA but uses more memory.
+      Whether to enable to zucchini feature. Will generate smaller OTA but uses more memory, OTA generation will take longer.
+
+  --enable_puffdiff
+      Whether to enable to puffdiff feature. Will generate smaller OTA but uses more memory, OTA generation will take longer.
 
   --enable_lz4diff
       Whether to enable lz4diff feature. Will generate smaller OTA for EROFS but
@@ -320,6 +323,7 @@
 OPTIONS.force_minor_version = None
 OPTIONS.compressor_types = None
 OPTIONS.enable_zucchini = True
+OPTIONS.enable_puffdiff = None
 OPTIONS.enable_lz4diff = False
 OPTIONS.vabc_compression_param = None
 OPTIONS.security_patch_level = None
@@ -456,48 +460,51 @@
   target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
   target_zip = zipfile.ZipFile(target_file, 'w', allowZip64=True)
 
-  with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
-    infolist = input_zip.infolist()
+  fileslist = []
+  for (root, dirs, files) in os.walk(input_file):
+    root = root.lstrip(input_file).lstrip("/")
+    fileslist.extend([os.path.join(root, d) for d in dirs])
+    fileslist.extend([os.path.join(root, d) for d in files])
 
-  input_tmp = common.UnzipTemp(input_file, UNZIP_PATTERN)
-  for info in infolist:
-    unzipped_file = os.path.join(input_tmp, *info.filename.split('/'))
-    if info.filename == 'IMAGES/system_other.img':
+  input_tmp = input_file
+  for filename in fileslist:
+    unzipped_file = os.path.join(input_tmp, *filename.split('/'))
+    if filename == 'IMAGES/system_other.img':
       common.ZipWrite(target_zip, unzipped_file, arcname='IMAGES/system.img')
 
     # Primary images and friends need to be skipped explicitly.
-    elif info.filename in ('IMAGES/system.img',
-                           'IMAGES/system.map'):
+    elif filename in ('IMAGES/system.img',
+                      'IMAGES/system.map'):
       pass
 
     # Copy images that are not in SECONDARY_PAYLOAD_SKIPPED_IMAGES.
-    elif info.filename.startswith(('IMAGES/', 'RADIO/')):
-      image_name = os.path.basename(info.filename)
+    elif filename.startswith(('IMAGES/', 'RADIO/')):
+      image_name = os.path.basename(filename)
       if image_name not in ['{}.img'.format(partition) for partition in
                             SECONDARY_PAYLOAD_SKIPPED_IMAGES]:
-        common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
+        common.ZipWrite(target_zip, unzipped_file, arcname=filename)
 
     # Skip copying the postinstall config if requested.
-    elif skip_postinstall and info.filename == POSTINSTALL_CONFIG:
+    elif skip_postinstall and filename == POSTINSTALL_CONFIG:
       pass
 
-    elif info.filename.startswith('META/'):
+    elif filename.startswith('META/'):
       # Remove the unnecessary partitions for secondary images from the
       # ab_partitions file.
-      if info.filename == AB_PARTITIONS:
+      if filename == AB_PARTITIONS:
         with open(unzipped_file) as f:
           partition_list = f.read().splitlines()
         partition_list = [partition for partition in partition_list if partition
                           and partition not in SECONDARY_PAYLOAD_SKIPPED_IMAGES]
-        common.ZipWriteStr(target_zip, info.filename,
+        common.ZipWriteStr(target_zip, filename,
                            '\n'.join(partition_list))
       # Remove the unnecessary partitions from the dynamic partitions list.
-      elif (info.filename == 'META/misc_info.txt' or
-            info.filename == DYNAMIC_PARTITION_INFO):
+      elif (filename == 'META/misc_info.txt' or
+            filename == DYNAMIC_PARTITION_INFO):
         modified_info = GetInfoForSecondaryImages(unzipped_file)
-        common.ZipWriteStr(target_zip, info.filename, modified_info)
+        common.ZipWriteStr(target_zip, filename, modified_info)
       else:
-        common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
+        common.ZipWrite(target_zip, unzipped_file, arcname=filename)
 
   common.ZipClose(target_zip)
 
@@ -994,6 +1001,9 @@
 
   additional_args += ["--enable_zucchini=" +
                       str(OPTIONS.enable_zucchini).lower()]
+  if OPTIONS.enable_puffdiff is not None:
+    additional_args += ["--enable_puffdiff=" +
+                        str(OPTIONS.enable_puffdiff).lower()]
 
   if not ota_utils.IsLz4diffCompatible(source_file, target_file):
     logger.warning(
@@ -1193,6 +1203,9 @@
     elif o == "--enable_zucchini":
       assert a.lower() in ["true", "false"]
       OPTIONS.enable_zucchini = a.lower() != "false"
+    elif o == "--enable_puffdiff":
+      assert a.lower() in ["true", "false"]
+      OPTIONS.enable_puffdiff = a.lower() != "false"
     elif o == "--enable_lz4diff":
       assert a.lower() in ["true", "false"]
       OPTIONS.enable_lz4diff = a.lower() != "false"
@@ -1254,6 +1267,7 @@
                                  "force_minor_version=",
                                  "compressor_types=",
                                  "enable_zucchini=",
+                                 "enable_puffdiff=",
                                  "enable_lz4diff=",
                                  "vabc_compression_param=",
                                  "security_patch_level=",
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 2b65e47..2b45825 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -1236,8 +1236,9 @@
 
   vendor_misc_info["has_dtbo"] = "false"  # dtbo
   vendor_misc_info["has_pvmfw"] = "false"  # pvmfw
-  vendor_misc_info["avb_custom_images_partition_list"] = ""  # custom images
+  vendor_misc_info["avb_custom_images_partition_list"] = ""  # avb custom images
   vendor_misc_info["avb_building_vbmeta_image"] = "false" # skip building vbmeta
+  vendor_misc_info["custom_images_partition_list"] = ""  # custom images
   vendor_misc_info["use_dynamic_partitions"] = "false"  # super_empty
   vendor_misc_info["build_super_partition"] = "false"  # super split
   vendor_misc_info["avb_vbmeta_system"] = ""  # skip building vbmeta_system