Merge "Rename the Default* constants in java/config."
diff --git a/Changes.md b/Changes.md
index 453ea6c..61e6bb6 100644
--- a/Changes.md
+++ b/Changes.md
@@ -1,5 +1,92 @@
 # Build System Changes for Android.mk Writers
 
+## Changes in system properties settings
+
+### Product variables
+
+System properties for each of the partition is supposed to be set via following
+product config variables.
+
+For system partititon,
+
+* `PRODUCT_SYSTEM_PROPERITES`
+* `PRODUCT_SYSTEM_DEFAULT_PROPERTIES` is highly discouraged. Will be deprecated.
+
+For vendor partition,
+
+* `PRODUCT_VENDOR_PROPERTIES`
+* `PRODUCT_PROPERTY_OVERRIDES` is highly discouraged. Will be deprecated.
+* `PRODUCT_DEFAULT_PROPERTY_OVERRIDES` is also discouraged. Will be deprecated.
+
+For odm partition,
+
+* `PRODUCT_ODM_PROPERTIES`
+
+For system_ext partition,
+
+* `PRODUCT_SYSTEM_EXT_PROPERTIES`
+
+For product partition,
+
+* `PRODUCT_PRODUCT_PROPERTIES`
+
+### Duplication is not allowed within a partition
+
+For each partition, having multiple sysprop assignments for the same name is
+prohibited. For example, the following will now trigger an error:
+
+`PRODUCT_VENDOR_PROPERTIES += foo=true foo=false`
+
+Having duplication across partitions are still allowed. So, the following is
+not an error:
+
+`PRODUCT_VENDOR_PROPERTIES += foo=true`
+`PRODUCT_SYSTEM_PROPERTIES += foo=false`
+
+In that case, the final value is determined at runtime. The precedence is
+
+* product
+* odm
+* vendor
+* system_ext
+* system
+
+So, `foo` becomes `true` because vendor has higher priority than system.
+
+To temporarily turn the build-time restriction off, use
+
+`BUILD_BROKEN_DUP_SYSPROP := true`
+
+### Optional assignments
+
+System properties can now be set as optional using the new syntax:
+
+`name ?= value`
+
+Then the system property named `name` gets the value `value` only when there
+is no other non-optional assignments having the same name. For example, the
+following is allowed and `foo` gets `true`
+
+`PRODUCT_VENDOR_PROPERTIES += foo=true foo?=false`
+
+Note that the order between the optional and the non-optional assignments
+doesn't matter. The following gives the same result as above.
+
+`PRODUCT_VENDOR_PROPERTIES += foo?=false foo=true`
+
+Optional assignments can be duplicated and in that case their order matters.
+Specifically, the last one eclipses others.
+
+`PRODUCT_VENDOR_PROPERTIES += foo?=apple foo?=banana foo?=mango`
+
+With above, `foo` becomes `mango` since its the last one.
+
+Note that this behavior is different from the previous behavior of preferring
+the first one. To go back to the original behavior for compatability reason,
+use:
+
+`BUILD_BROKEN_DUP_SYSPROP := true`
+
 ## ELF prebuilts in PRODUCT_COPY_FILES
 
 ELF prebuilts in PRODUCT_COPY_FILES that are installed into these paths are an
diff --git a/core/Makefile b/core/Makefile
index dd5c2cc..8ac4e0d 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -206,7 +206,7 @@
 
 define copy-and-strip-kernel-module
 $(2): $(1)
-	$($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP) -o $(2) --strip-debug $(1)
+	$(LLVM_STRIP) -o $(2) --strip-debug $(1)
 endef
 
 # $(1): modules list
@@ -340,6 +340,15 @@
     $(call copy-many-files,$(call module-load-list-copy-paths,$(call intermediates-dir-for,PACKAGING,vendor_ramdisk_recovery_module_list$(_sep)$(_kver)),$(BOARD_VENDOR_RAMDISK_KERNEL_MODULES$(_sep)$(_kver)),$(BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD$(_sep)$(_kver)),modules.load.recovery,$(TARGET_VENDOR_RAMDISK_OUT))))
 endef
 
+# $(1): kernel module directory name (top is an out of band value for no directory)
+define build-vendor-charger-load
+$(if $(filter top,$(1)),\
+  $(eval _kver :=)$(eval _sep :=),\
+  $(eval _kver := $(1))$(eval _sep :=_))\
+  $(if $(BOARD_VENDOR_CHARGER_KERNEL_MODULES_LOAD$(_sep)$(_kver)),\
+    $(call copy-many-files,$(call module-load-list-copy-paths,$(call intermediates-dir-for,PACKAGING,vendor_charger_module_list$(_sep)$(_kver)),$(BOARD_VENDOR_CHARGER_KERNEL_MODULES$(_sep)$(_kver)),$(BOARD_VENDOR_CHARGER_KERNEL_MODULES_LOAD$(_sep)$(_kver)),modules.load.charger,$(TARGET_OUT_VENDOR))))
+endef
+
 ifneq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
   # If there is no vendor boot partition, store vendor ramdisk kernel modules in the
   # boot ramdisk.
@@ -375,6 +384,7 @@
   $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(TARGET_VENDOR_RAMDISK_OUT),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
   $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-ramdisk-recovery-load,$(dir))) \
   $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR,$(TARGET_OUT_VENDOR),vendor,modules.load,$(VENDOR_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
+  $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-charger-load,$(dir))) \
   $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,ODM,$(TARGET_OUT_ODM),odm,modules.load,,$(dir))) \
   $(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
     $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-recovery-as-boot-load,$(dir))),\
@@ -3435,8 +3445,8 @@
 $(BUILT_KERNEL_CONFIGS_FILE): .KATI_IMPLICIT_OUTPUTS := $(BUILT_KERNEL_VERSION_FILE)
 $(BUILT_KERNEL_CONFIGS_FILE): PRIVATE_DECOMPRESS_TOOLS := $(my_decompress_tools)
 $(BUILT_KERNEL_CONFIGS_FILE): $(foreach pair,$(my_decompress_tools),$(call word-colon,2,$(pair)))
-$(BUILT_KERNEL_CONFIGS_FILE): $(EXTRACT_KERNEL) $(INSTALLED_KERNEL_TARGET)
-	$< --tools $(PRIVATE_DECOMPRESS_TOOLS) --input $(INSTALLED_KERNEL_TARGET) \
+$(BUILT_KERNEL_CONFIGS_FILE): $(EXTRACT_KERNEL) $(firstword $(INSTALLED_KERNEL_TARGET))
+	$< --tools $(PRIVATE_DECOMPRESS_TOOLS) --input $(firstword $(INSTALLED_KERNEL_TARGET)) \
 	  --output-configs $@ \
 	  --output-version $(BUILT_KERNEL_VERSION_FILE)
 
@@ -3641,6 +3651,7 @@
   libconscrypt_openjdk_jni \
   lpmake \
   lpunpack \
+  lz4 \
   make_f2fs \
   merge_target_files \
   minigzip \
@@ -4040,13 +4051,13 @@
   $(foreach device,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES), \
     echo "super_$(device)_device_size=$(BOARD_SUPER_PARTITION_$(call to-upper,$(device))_DEVICE_SIZE)" >> $(1);)
   $(if $(BOARD_SUPER_PARTITION_PARTITION_LIST), \
-    echo "dynamic_partition_list=$(call filter-out-missing-vendor, $(BOARD_SUPER_PARTITION_PARTITION_LIST))" >> $(1))
+    echo "dynamic_partition_list=$(call filter-out-missing-vendor,$(BOARD_SUPER_PARTITION_PARTITION_LIST))" >> $(1))
   $(if $(BOARD_SUPER_PARTITION_GROUPS),
     echo "super_partition_groups=$(BOARD_SUPER_PARTITION_GROUPS)" >> $(1))
   $(foreach group,$(BOARD_SUPER_PARTITION_GROUPS), \
     echo "super_$(group)_group_size=$(BOARD_$(call to-upper,$(group))_SIZE)" >> $(1); \
     $(if $(BOARD_$(call to-upper,$(group))_PARTITION_LIST), \
-      echo "super_$(group)_partition_list=$(call filter-out-missing-vendor, $(BOARD_$(call to-upper,$(group))_PARTITION_LIST))" >> $(1);))
+      echo "super_$(group)_partition_list=$(call filter-out-missing-vendor,$(BOARD_$(call to-upper,$(group))_PARTITION_LIST))" >> $(1);))
   $(if $(filter true,$(TARGET_USERIMAGES_SPARSE_EXT_DISABLED)), \
     echo "build_non_sparse_super_partition=true" >> $(1))
   $(if $(filter true,$(TARGET_USERIMAGES_SPARSE_F2FS_DISABLED)), \
diff --git a/core/OWNERS b/core/OWNERS
index 750f1fa..459683e 100644
--- a/core/OWNERS
+++ b/core/OWNERS
@@ -1,3 +1,2 @@
 per-file dex_preopt*.mk = ngeoffray@google.com,calin@google.com,mathewi@google.com,dbrazdil@google.com
-per-file construct_context.sh = ngeoffray@google.com,calin@google.com,mathieuc@google.com
 per-file verify_uses_libraries.sh = ngeoffray@google.com,calin@google.com,mathieuc@google.com
diff --git a/core/board_config.mk b/core/board_config.mk
index ae1614f..a6e586d 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -93,6 +93,7 @@
   BUILD_BROKEN_TREBLE_SYSPROP_NEVERALLOW \
   BUILD_BROKEN_USES_NETWORK \
   BUILD_BROKEN_VINTF_PRODUCT_COPY_FILES \
+  BUILD_BROKEN_DUP_SYSPROP \
 
 _build_broken_var_list += \
   $(foreach m,$(AVAILABLE_BUILD_MODULE_TYPES) \
@@ -593,6 +594,9 @@
 endef
 
 ifdef BOARD_VNDK_VERSION
+  ifeq ($(BOARD_VNDK_VERSION),$(PLATFORM_VNDK_VERSION))
+    $(error BOARD_VNDK_VERSION is equal to PLATFORM_VNDK_VERSION; use BOARD_VNDK_VERSION := current))
+  endif
   ifneq ($(BOARD_VNDK_VERSION),current)
     $(call check_vndk_version,$(BOARD_VNDK_VERSION))
   endif
diff --git a/core/config.mk b/core/config.mk
index 2ec0cfd..a5b8ef7 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -150,6 +150,7 @@
   ,Project include variables have been removed)
 $(KATI_obsolete_var TARGET_PREFER_32_BIT TARGET_PREFER_32_BIT_APPS TARGET_PREFER_32_BIT_EXECUTABLES)
 $(KATI_obsolete_var PRODUCT_ARTIFACT_SYSTEM_CERTIFICATE_REQUIREMENT_WHITELIST,Use PRODUCT_ARTIFACT_SYSTEM_CERTIFICATE_REQUIREMENT_ALLOW_LIST.)
+$(KATI_obsolete_var PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST,Use PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST.)
 
 # Used to force goals to build.  Only use for conditionally defined goals.
 .PHONY: FORCE
@@ -977,8 +978,7 @@
     BOARD_SUPER_PARTITION_GROUPS and BOARD_*_PARTITION_LIST)
 endif
 BOARD_SUPER_PARTITION_PARTITION_LIST := \
-    $(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
-        $(BOARD_$(group)_PARTITION_LIST))
+    $(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)),$(BOARD_$(group)_PARTITION_LIST))
 .KATI_READONLY := BOARD_SUPER_PARTITION_PARTITION_LIST
 
 ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
diff --git a/core/construct_context.sh b/core/construct_context.sh
deleted file mode 100755
index d620d08..0000000
--- a/core/construct_context.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e
-
-# target_sdk_version: parsed from manifest
-#
-# outputs
-# class_loader_context_arg: final class loader conext arg
-# stored_class_loader_context_arg: final stored class loader context arg
-
-if [ -z "${target_sdk_version}" ]; then
-    echo "ERROR: target_sdk_version not set"
-    exit 2
-fi
-
-# The hidl.manager shared library has a dependency on hidl.base. We'll manually
-# add that information to the class loader context if we see those libraries.
-hidl_manager="android.hidl.manager-V1.0-java"
-hidl_base="android.hidl.base-V1.0-java"
-
-function add_to_contexts {
-  for i in $1; do
-    if [[ -z "${class_loader_context}" ]]; then
-      export class_loader_context="PCL[$i]"
-    else
-      export class_loader_context+="#PCL[$i]"
-    fi
-    if [[ $i == *"$hidl_manager"* ]]; then
-      export class_loader_context+="{PCL[${i/$hidl_manager/$hidl_base}]}"
-    fi
-  done
-
-  for i in $2; do
-    if [[ -z "${stored_class_loader_context}" ]]; then
-      export stored_class_loader_context="PCL[$i]"
-    else
-      export stored_class_loader_context+="#PCL[$i]"
-    fi
-    if [[ $i == *"$hidl_manager"* ]]; then
-      export stored_class_loader_context+="{PCL[${i/$hidl_manager/$hidl_base}]}"
-    fi
-  done
-}
-
-# The order below must match what the package manager also computes for
-# class loader context.
-
-if [[ "${target_sdk_version}" -lt "28" ]]; then
-  add_to_contexts "${conditional_host_libs_28}" "${conditional_target_libs_28}"
-fi
-
-if [[ "${target_sdk_version}" -lt "29" ]]; then
-  add_to_contexts "${conditional_host_libs_29}" "${conditional_target_libs_29}"
-fi
-
-if [[ "${target_sdk_version}" -lt "30" ]]; then
-  add_to_contexts "${conditional_host_libs_30}" "${conditional_target_libs_30}"
-fi
-
-add_to_contexts "${dex_preopt_host_libraries}" "${dex_preopt_target_libraries}"
-
-# Generate the actual context string.
-export class_loader_context_arg="--class-loader-context=PCL[]{${class_loader_context}}"
-export stored_class_loader_context_arg="--stored-class-loader-context=PCL[]{${stored_class_loader_context}}"
diff --git a/core/line_coverage.mk b/core/line_coverage.mk
index b920e28..babcb30 100644
--- a/core/line_coverage.mk
+++ b/core/line_coverage.mk
@@ -8,8 +8,7 @@
 # packs them into another zip file called `line_coverage_profiles.zip`.
 #
 # To run the make target set the coverage related envvars first:
-# 	NATIVE_LINE_COVERAGE=true NATIVE_COVERAGE=true \
-#	NATIVE_COVERAGE_PATHS=* make haiku-line-coverage
+# 	NATIVE_COVERAGE=true NATIVE_COVERAGE_PATHS=* make haiku-line-coverage
 # -----------------------------------------------------------------
 
 # TODO(b/148306195): Due this issue some fuzz targets cannot be built with
@@ -47,7 +46,6 @@
 	libinputflinger \
 	libopus \
 	libstagefright \
-	libunwind \
 	libvixl:com.android.art.debug
 
 # Use the intermediates directory to avoid installing libraries to the device.
@@ -68,7 +66,7 @@
 fuzz_target_inputs := $(foreach fuzz,$(fuzz_targets), \
 	$(call intermediates-dir-for,EXECUTABLES,$(fuzz))/$(fuzz))
 
-# When line coverage is enabled (NATIVE_LINE_COVERAGE is set), make creates
+# When coverage is enabled (NATIVE_COVERAGE is set), make creates
 # a "coverage" directory and stores all profile (*.gcno) files in inside.
 # We need everything that is stored inside this directory.
 $(line_coverage_profiles): $(fuzz_target_inputs)
diff --git a/core/main.mk b/core/main.mk
index 7793889..cc7cf72 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -102,6 +102,15 @@
 EMMA_INSTRUMENT := true
 endif
 
+# TODO(b/158212027): Turn this into an error when all users have been moved to
+# `NATIVE_COVERAGE_PATHS` and `NATIVE_COVERAGE_EXCLUDE_PATHS`.
+ifneq ($(COVERAGE_PATHS),)
+  $(warning Variable COVERAGE_PATHS is deprecated. Please use NATIVE_COVERAGE_PATHS instead.)
+endif
+ifneq ($(COVERAGE_EXCLUDE_PATHS),)
+  $(warning Variable COVERAGE_EXCLUDE_PATHS is deprecated. Please use NATIVE_COVERAGE_EXCLUDE_PATHS instead.)
+endif
+
 ifeq (true,$(EMMA_INSTRUMENT))
 # Adding the jacoco library can cause the inclusion of
 # some typically banned classes
@@ -633,9 +642,11 @@
 endef
 
 # TODO(b/7456955): error if a required module doesn't exist.
-# Resolve the required module names in ALL_MODULES.*.REQUIRED_FROM_TARGET,
-# ALL_MODULES.*.REQUIRED_FROM_HOST and ALL_MODULES.*.REQUIRED_FROM_HOST_CROSS
-# to 32-bit or 64-bit variant.
+# Resolve the required module names to 32-bit or 64-bit variant for:
+#   ALL_MODULES.<*>.REQUIRED_FROM_TARGET
+#   ALL_MODULES.<*>.REQUIRED_FROM_HOST
+#   ALL_MODULES.<*>.REQUIRED_FROM_HOST_CROSS
+#
 # If a module is for cross host OS, the required modules are also for that OS.
 # Required modules explicitly suffixed with :32 or :64 resolve to that bitness.
 # Otherwise if the requiring module is native and the required module is shared
@@ -651,27 +662,53 @@
       $(eval r := $(addprefix host_cross_,$(r)))) \
     $(eval module_is_native := \
       $(filter EXECUTABLES SHARED_LIBRARIES NATIVE_TESTS,$(ALL_MODULES.$(m).CLASS))) \
-    $(eval r_r := $(foreach r_i,$(r), \
-      $(if $(filter %:32 %:64,$(r_i)), \
-        $(eval r_m := $(call resolve-bitness-for-modules,$(1),$(r_i))), \
-        $(eval r_m := \
-          $(eval r_i_2nd := $(call get-modules-for-2nd-arch,$(1),$(r_i))) \
-          $(eval required_is_shared_library_or_native_test := \
-            $(filter SHARED_LIBRARIES NATIVE_TESTS, \
-              $(ALL_MODULES.$(r_i).CLASS) $(ALL_MODULES.$(r_i_2nd).CLASS))) \
-          $(if $(and $(module_is_native),$(required_is_shared_library_or_native_test)), \
-            $(if $(ALL_MODULES.$(m).FOR_2ND_ARCH),$(r_i_2nd),$(r_i)), \
-            $(r_i) $(r_i_2nd)))) \
-      $(eval ### TODO(b/7456955): error if r_m is empty / does not exist) \
-      $(r_m))) \
+    $(eval r_r := \
+      $(foreach r_i,$(r), \
+        $(if $(filter %:32 %:64,$(r_i)), \
+          $(eval r_m := $(call resolve-bitness-for-modules,$(1),$(r_i))), \
+          $(eval r_m := \
+            $(eval r_i_2nd := $(call get-modules-for-2nd-arch,$(1),$(r_i))) \
+            $(eval required_is_shared_library_or_native_test := \
+              $(filter SHARED_LIBRARIES NATIVE_TESTS, \
+                $(ALL_MODULES.$(r_i).CLASS) $(ALL_MODULES.$(r_i_2nd).CLASS))) \
+            $(if $(and $(module_is_native),$(required_is_shared_library_or_native_test)), \
+              $(if $(ALL_MODULES.$(m).FOR_2ND_ARCH),$(r_i_2nd),$(r_i)), \
+              $(r_i) $(r_i_2nd)))) \
+        $(eval ### TODO(b/7456955): error if r_m is empty / does not exist) \
+        $(r_m))) \
     $(eval ALL_MODULES.$(m).REQUIRED_FROM_$(1) := $(sort $(r_r))) \
   ) \
 )
 endef
 
+# Resolve the required module names to 32-bit or 64-bit variant for:
+#   ALL_MODULES.<*>.TARGET_REQUIRED_FROM_HOST
+#   ALL_MODULES.<*>.HOST_REQUIRED_FROM_TARGET
+#
+# This is like select-bitness-of-required-modules, but it doesn't have
+# complicated logic for various module types.
+# Calls resolve-bitness-for-modules to resolve module names.
+# $(1): TARGET or HOST
+# $(2): HOST or TARGET
+define select-bitness-of-target-host-required-modules
+$(foreach m,$(ALL_MODULES), \
+  $(eval r := $(ALL_MODULES.$(m).$(1)_REQUIRED_FROM_$(2))) \
+  $(if $(r), \
+    $(eval r_r := \
+      $(foreach r_i,$(r), \
+        $(eval r_m := $(call resolve-bitness-for-modules,$(1),$(r_i))) \
+        $(eval ### TODO(b/7456955): error if r_m is empty / does not exist) \
+        $(r_m))) \
+    $(eval ALL_MODULES.$(m).$(1)_REQUIRED_FROM_$(2) := $(sort $(r_r))) \
+  ) \
+)
+endef
+
 $(call select-bitness-of-required-modules,TARGET)
 $(call select-bitness-of-required-modules,HOST)
 $(call select-bitness-of-required-modules,HOST_CROSS)
+$(call select-bitness-of-target-host-required-modules,TARGET,HOST)
+$(call select-bitness-of-target-host-required-modules,HOST,TARGET)
 
 define add-required-deps
 $(1): | $(2)
@@ -1288,8 +1325,7 @@
     $(eval extra_files := $(filter-out $(files) $(HOST_OUT)/%,$(product_target_FILES))) \
     $(eval files_in_requirement := $(filter $(path_patterns),$(extra_files))) \
     $(eval all_offending_files += $(files_in_requirement)) \
-    $(eval allowed := $(strip $(PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST) \
-      $(PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST))) \
+    $(eval allowed := $(PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST)) \
     $(eval allowed_patterns := $(call resolve-product-relative-paths,$(allowed))) \
     $(eval offending_files := $(filter-out $(allowed_patterns),$(files_in_requirement))) \
     $(eval enforcement := $(PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS)) \
diff --git a/core/product.mk b/core/product.mk
index 0eee2ab..f531319 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -328,7 +328,6 @@
 _product_single_value_vars += PRODUCT_ENFORCE_ARTIFACT_SYSTEM_CERTIFICATE_REQUIREMENT
 _product_list_vars += PRODUCT_ARTIFACT_SYSTEM_CERTIFICATE_REQUIREMENT_ALLOW_LIST
 _product_list_vars += PRODUCT_ARTIFACT_PATH_REQUIREMENT_HINT
-_product_list_vars += PRODUCT_ARTIFACT_PATH_REQUIREMENT_WHITELIST
 _product_list_vars += PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST
 
 # List of modules that should be forcefully unmarked from being LOCAL_PRODUCT_MODULE, and hence
diff --git a/core/soong_android_app_set.mk b/core/soong_android_app_set.mk
index 5ed9b2c..eb8e5e7 100644
--- a/core/soong_android_app_set.mk
+++ b/core/soong_android_app_set.mk
@@ -31,4 +31,17 @@
 $(LOCAL_INSTALLED_MODULE): PRIVATE_POST_INSTALL_CMD := $(LOCAL_POST_INSTALL_CMD)
 PACKAGES.$(LOCAL_MODULE).OVERRIDES := $(strip $(LOCAL_OVERRIDES_PACKAGES))
 
+# android_app_set modules are always presigned
+PACKAGES.$(LOCAL_MODULE).CERTIFICATE := PRESIGNED
+PACKAGES := $(PACKAGES) $(LOCAL_MODULE)
+
+ifneq ($(LOCAL_MODULE_STEM),)
+  PACKAGES.$(LOCAL_MODULE).STEM := $(LOCAL_MODULE_STEM)
+else
+  PACKAGES.$(LOCAL_MODULE).STEM := $(LOCAL_MODULE)
+endif
+
+# Set a actual_partition_tag (calculated in base_rules.mk) for the package.
+PACKAGES.$(LOCAL_MODULE).PARTITION := $(actual_partition_tag)
+
 SOONG_ALREADY_CONV += $(LOCAL_MODULE)
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 84bfd1e..b9e7a27 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -114,8 +114,7 @@
 $(call add_json_list, JavaCoveragePaths,                 $(JAVA_COVERAGE_PATHS))
 $(call add_json_list, JavaCoverageExcludePaths,          $(JAVA_COVERAGE_EXCLUDE_PATHS))
 
-$(call add_json_bool, NativeLineCoverage,                $(filter true,$(NATIVE_LINE_COVERAGE)))
-$(call add_json_bool, Native_coverage,                   $(filter true,$(NATIVE_COVERAGE)))
+$(call add_json_bool, GcovCoverage,                      $(filter true,$(NATIVE_COVERAGE)))
 $(call add_json_bool, ClangCoverage,                     $(filter true,$(CLANG_COVERAGE)))
 # TODO(b/158212027): Remove `$(COVERAGE_PATHS)` from this list when all users have been moved to
 # `NATIVE_COVERAGE_PATHS`.
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 0cee81c..f1311ed 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -71,10 +71,22 @@
 define build-properties
 ALL_DEFAULT_INSTALLED_MODULES += $(2)
 
-# TODO(b/117892318): eliminate the call to uniq-pairs-by-first-component when
-# it is guaranteed that there is no dup.
+$(eval # Properties can be assigned using `prop ?= value` or `prop = value` syntax.)
+$(eval # Eliminate spaces around the ?= and = separators.)
 $(foreach name,$(strip $(4)),\
-    $(eval _resolved_$(name) := $$(call collapse-pairs, $$(call uniq-pairs-by-first-component,$$($(name)),=)))\
+    $(eval _temp := $$(call collapse-pairs,$$($(name)),?=))\
+    $(eval _resolved_$(name) := $$(call collapse-pairs,$$(_temp),=))\
+)
+
+$(eval # Implement the legacy behavior when BUILD_BROKEN_DUP_SYSPROP is on.)
+$(eval # Optional assignments are all converted to normal assignments and)
+$(eval # when their duplicates the first one wins)
+$(if $(filter true,$(BUILD_BROKEN_DUP_SYSPROP)),\
+    $(foreach name,$(strip $(4)),\
+        $(eval _temp := $$(subst ?=,=,$$(_resolved_$(name))))\
+        $(eval _resolved_$(name) := $$(call uniq-pairs-by-first-component,$$(_resolved_$(name)),=))\
+    )\
+    $(eval _option := --allow-dup)\
 )
 
 $(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(API_FINGERPRINT) $(3)
@@ -99,7 +111,7 @@
 	        echo "$$(line)" >> $$@;\
 	    )\
 	)
-	$(hide) $(POST_PROCESS_PROPS) $$@ $(5)
+	$(hide) $(POST_PROCESS_PROPS) $$(_option) $$@ $(5)
 	$(hide) echo "# end of file" >> $$@
 endef
 
@@ -361,7 +373,7 @@
     $(empty)))
 
 # ----------------------------------------------------------------
-# odm/build.prop
+# odm/etc/build.prop
 #
 _prop_files_ := $(if $(TARGET_ODM_PROP),\
     $(TARGET_ODM_PROP),\
@@ -373,7 +385,9 @@
     ADDITIONAL_ODM_PROPERTIES \
     PRODUCT_ODM_PROPERTIES
 
-INSTALLED_ODM_BUILD_PROP_TARGET := $(TARGET_OUT_ODM)/build.prop
+# Note the 'etc' sub directory. For the reason, see
+# I0733c277baa67c549bb45599abb70aba13fbdbcf
+INSTALLED_ODM_BUILD_PROP_TARGET := $(TARGET_OUT_ODM)/etc/build.prop
 $(eval $(call build-properties,\
     odm,\
     $(INSTALLED_ODM_BUILD_PROP_TARGET),\
diff --git a/core/tasks/general-tests.mk b/core/tasks/general-tests.mk
index cb84a5c..a820a28 100644
--- a/core/tasks/general-tests.mk
+++ b/core/tasks/general-tests.mk
@@ -14,12 +14,10 @@
 
 .PHONY: general-tests
 
-# TODO(b/149249068): Remove vts10-tradefed.jar after all VTS tests are converted
 general_tests_tools := \
     $(HOST_OUT_JAVA_LIBRARIES)/cts-tradefed.jar \
     $(HOST_OUT_JAVA_LIBRARIES)/compatibility-host-util.jar \
     $(HOST_OUT_JAVA_LIBRARIES)/vts-tradefed.jar \
-    $(HOST_OUT_JAVA_LIBRARIES)/vts10-tradefed.jar
 
 intermediates_dir := $(call intermediates-dir-for,PACKAGING,general-tests)
 general_tests_zip := $(PRODUCT_OUT)/general-tests.zip
diff --git a/target/board/emulator_arm64/device.mk b/target/board/emulator_arm64/device.mk
index 57675d0..73dc2f4 100644
--- a/target/board/emulator_arm64/device.mk
+++ b/target/board/emulator_arm64/device.mk
@@ -26,7 +26,3 @@
 
 PRODUCT_COPY_FILES += \
     $(LOCAL_KERNEL):kernel
-
-# Adjust the Dalvik heap to be appropriate for a tablet.
-$(call inherit-product-if-exists, frameworks/base/build/tablet-dalvik-heap.mk)
-$(call inherit-product-if-exists, frameworks/native/build/tablet-dalvik-heap.mk)
diff --git a/target/board/generic_arm64/device.mk b/target/board/generic_arm64/device.mk
index 3b7cd44..b34004f 100644
--- a/target/board/generic_arm64/device.mk
+++ b/target/board/generic_arm64/device.mk
@@ -18,7 +18,3 @@
     device/google/cuttlefish_kernel/5.4-arm64/kernel-5.4:kernel-5.4 \
     device/google/cuttlefish_kernel/5.4-arm64/kernel-5.4-gz:kernel-5.4-gz \
     device/google/cuttlefish_kernel/5.4-arm64/kernel-5.4-lz4:kernel-5.4-lz4
-
-# Adjust the Dalvik heap to be appropriate for a tablet.
-$(call inherit-product-if-exists, frameworks/base/build/tablet-dalvik-heap.mk)
-$(call inherit-product-if-exists, frameworks/native/build/tablet-dalvik-heap.mk)
diff --git a/target/product/aosp_product.mk b/target/product/aosp_product.mk
index f22c3a3..a3da1c9 100644
--- a/target/product/aosp_product.mk
+++ b/target/product/aosp_product.mk
@@ -23,9 +23,9 @@
 
 # Additional settings used in all AOSP builds
 PRODUCT_PRODUCT_PROPERTIES += \
-    ro.config.ringtone=Ring_Synth_04.ogg \
-    ro.config.notification_sound=pixiedust.ogg \
-    ro.com.android.dataroaming=true \
+    ro.config.ringtone?=Ring_Synth_04.ogg \
+    ro.config.notification_sound?=pixiedust.ogg \
+    ro.com.android.dataroaming?=true \
 
 # More AOSP packages
 PRODUCT_PACKAGES += \
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index bec550b..f6770fb 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -348,7 +348,7 @@
 endif
 
 PRODUCT_COPY_FILES += system/core/rootdir/init.zygote32.rc:system/etc/init/hw/init.zygote32.rc
-PRODUCT_SYSTEM_PROPERTIES += ro.zygote=zygote32
+PRODUCT_SYSTEM_PROPERTIES += ro.zygote?=zygote32
 
 PRODUCT_SYSTEM_PROPERTIES += debug.atrace.tags.enableflags=0
 
diff --git a/target/product/full_base.mk b/target/product/full_base.mk
index dfb2204..64f61ff 100644
--- a/target/product/full_base.mk
+++ b/target/product/full_base.mk
@@ -43,8 +43,8 @@
 
 # Additional settings used in all AOSP builds
 PRODUCT_VENDOR_PROPERTIES := \
-    ro.config.ringtone=Ring_Synth_04.ogg \
-    ro.config.notification_sound=pixiedust.ogg
+    ro.config.ringtone?=Ring_Synth_04.ogg \
+    ro.config.notification_sound?=pixiedust.ogg
 
 # Put en_US first in the list, so make it default.
 PRODUCT_LOCALES := en_US
diff --git a/target/product/full_base_telephony.mk b/target/product/full_base_telephony.mk
index 5e18c05..d8a54cd 100644
--- a/target/product/full_base_telephony.mk
+++ b/target/product/full_base_telephony.mk
@@ -20,8 +20,8 @@
 # entirely appropriate to inherit from for on-device configurations.
 
 PRODUCT_VENDOR_PROPERTIES := \
-    keyguard.no_require_sim=true \
-    ro.com.android.dataroaming=true
+    keyguard.no_require_sim?=true \
+    ro.com.android.dataroaming?=true
 
 PRODUCT_COPY_FILES := \
     device/sample/etc/apns-full-conf.xml:system/etc/apns-conf.xml \
diff --git a/target/product/handheld_system.mk b/target/product/handheld_system.mk
index 22c817e..e2c91b6 100644
--- a/target/product/handheld_system.mk
+++ b/target/product/handheld_system.mk
@@ -84,6 +84,6 @@
     frameworks/av/media/libeffects/data/audio_effects.conf:system/etc/audio_effects.conf
 
 PRODUCT_VENDOR_PROPERTIES += \
-    ro.carrier=unknown \
-    ro.config.notification_sound=OnTheHunt.ogg \
-    ro.config.alarm_alert=Alarm_Classic.ogg
+    ro.carrier?=unknown \
+    ro.config.notification_sound?=OnTheHunt.ogg \
+    ro.config.alarm_alert?=Alarm_Classic.ogg
diff --git a/target/product/media_system.mk b/target/product/media_system.mk
index a3fafb3..7a2dd73 100644
--- a/target/product/media_system.mk
+++ b/target/product/media_system.mk
@@ -74,7 +74,7 @@
 # On userdebug builds, collect more tombstones by default.
 ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
 PRODUCT_VENDOR_PROPERTIES += \
-    tombstoned.max_tombstone_count=50
+    tombstoned.max_tombstone_count?=50
 endif
 
 PRODUCT_VENDOR_PROPERTIES += \
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 78a4af0..b96601d 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -57,23 +57,23 @@
 # On eng builds, make "boot" reasons only extract for faster turnaround.
 ifeq (eng,$(TARGET_BUILD_VARIANT))
     PRODUCT_SYSTEM_PROPERTIES += \
-        pm.dexopt.first-boot=extract \
-        pm.dexopt.boot=extract
+        pm.dexopt.first-boot?=extract \
+        pm.dexopt.boot?=extract
 else
     PRODUCT_SYSTEM_PROPERTIES += \
-        pm.dexopt.first-boot=quicken \
-        pm.dexopt.boot=verify
+        pm.dexopt.first-boot?=quicken \
+        pm.dexopt.boot?=verify
 endif
 
 # The install filter is speed-profile in order to enable the use of
 # profiles from the dex metadata files. Note that if a profile is not provided
 # or if it is empty speed-profile is equivalent to (quicken + empty app image).
 PRODUCT_SYSTEM_PROPERTIES += \
-    pm.dexopt.install=speed-profile \
-    pm.dexopt.bg-dexopt=speed-profile \
-    pm.dexopt.ab-ota=speed-profile \
-    pm.dexopt.inactive=verify \
-    pm.dexopt.shared=speed
+    pm.dexopt.install?=speed-profile \
+    pm.dexopt.bg-dexopt?=speed-profile \
+    pm.dexopt.ab-ota?=speed-profile \
+    pm.dexopt.inactive?=verify \
+    pm.dexopt.shared?=speed
 
 # Pass file with the list of updatable boot class path packages to dex2oat.
 PRODUCT_SYSTEM_PROPERTIES += \
diff --git a/tools/Android.bp b/tools/Android.bp
index 159890c..149d06d 100644
--- a/tools/Android.bp
+++ b/tools/Android.bp
@@ -37,3 +37,22 @@
     },
   },
 }
+
+python_test_host {
+  name: "post_process_props_unittest",
+  main: "test_post_process_props.py",
+  srcs: [
+    "post_process_props.py",
+    "test_post_process_props.py",
+  ],
+  version: {
+    py2: {
+      enabled: false,
+    },
+    py3: {
+      enabled: true,
+    },
+  },
+  test_config: "post_process_props_unittest.xml",
+  test_suites: ["general-tests"],
+}
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index 4fa15bc..d8c9cb1 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -14,10 +14,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import argparse
 import sys
 
-# Usage: post_process_props.py file.prop [blacklist_key, ...]
-# Blacklisted keys are removed from the property file, if present
+# Usage: post_process_props.py file.prop [disallowed_key, ...]
+# Disallowed keys are removed from the property file, if present
 
 # See PROP_VALUE_MAX in system_properties.h.
 # The constant in system_properties.h includes the terminating NUL,
@@ -29,8 +30,8 @@
 def mangle_build_prop(prop_list):
   # If ro.debuggable is 1, then enable adb on USB by default
   # (this is for userdebug builds)
-  if prop_list.get("ro.debuggable") == "1":
-    val = prop_list.get("persist.sys.usb.config")
+  if prop_list.get_value("ro.debuggable") == "1":
+    val = prop_list.get_value("persist.sys.usb.config")
     if "adb" not in val:
       if val == "":
         val = "adb"
@@ -40,52 +41,132 @@
   # UsbDeviceManager expects a value here.  If it doesn't get it, it will
   # default to "adb". That might not the right policy there, but it's better
   # to be explicit.
-  if not prop_list.get("persist.sys.usb.config"):
+  if not prop_list.get_value("persist.sys.usb.config"):
     prop_list.put("persist.sys.usb.config", "none");
 
 def validate(prop_list):
   """Validate the properties.
 
+  If the value of a sysprop exceeds the max limit (91), it's an error, unless
+  the sysprop is a read-only one.
+
+  Checks if there is no optional prop assignments.
+
   Returns:
     True if nothing is wrong.
   """
   check_pass = True
-  for p in prop_list.get_all():
+  for p in prop_list.get_all_props():
     if len(p.value) > PROP_VALUE_MAX and not p.name.startswith("ro."):
       check_pass = False
       sys.stderr.write("error: %s cannot exceed %d bytes: " %
                        (p.name, PROP_VALUE_MAX))
       sys.stderr.write("%s (%d)\n" % (p.value, len(p.value)))
+
+    if p.is_optional():
+      check_pass = False
+      sys.stderr.write("error: found unresolved optional prop assignment:\n")
+      sys.stderr.write(str(p) + "\n")
+
   return check_pass
 
+def override_optional_props(prop_list, allow_dup=False):
+  """Override a?=b with a=c, if the latter exists
+
+  Overriding is done by deleting a?=b
+  When there are a?=b and a?=c, then only the last one survives
+  When there are a=b and a=c, then it's an error.
+
+  Returns:
+    True if the override was successful
+  """
+  success = True
+  for name in prop_list.get_all_names():
+    props = prop_list.get_props(name)
+    optional_props = [p for p in props if p.is_optional()]
+    overriding_props = [p for p in props if not p.is_optional()]
+    if len(overriding_props) > 1:
+      # duplicated props are allowed when the all have the same value
+      if all(overriding_props[0].value == p.value for p in overriding_props):
+        for p in optional_props:
+          p.delete("overridden by %s" % str(overriding_props[0]))
+        continue
+      # or if dup is explicitly allowed for compat reason
+      if allow_dup:
+        # this could left one or more optional props unresolved.
+        # Convert them into non-optional because init doesn't understand ?=
+        # syntax
+        for p in optional_props:
+          p.optional = False
+        continue
+
+      success = False
+      sys.stderr.write("error: found duplicate sysprop assignments:\n")
+      for p in overriding_props:
+        sys.stderr.write("%s\n" % str(p))
+    elif len(overriding_props) == 1:
+      for p in optional_props:
+        p.delete("overridden by %s" % str(overriding_props[0]))
+    else:
+      if len(optional_props) > 1:
+        for p in optional_props[:-1]:
+          p.delete("overridden by %s" % str(optional_props[-1]))
+      # Make the last optional one as non-optional
+      optional_props[-1].optional = False
+
+  return success
+
 class Prop:
 
-  def __init__(self, name, value, comment=None):
+  def __init__(self, name, value, optional=False, comment=None):
     self.name = name.strip()
     self.value = value.strip()
-    self.comment = comment
+    if comment != None:
+      self.comments = [comment]
+    else:
+      self.comments = []
+    self.optional = optional
 
   @staticmethod
   def from_line(line):
     line = line.rstrip('\n')
     if line.startswith("#"):
-      return Prop("", "", line)
+      return Prop("", "", comment=line)
+    elif "?=" in line:
+      name, value = line.split("?=", 1)
+      return Prop(name, value, optional=True)
     elif "=" in line:
       name, value = line.split("=", 1)
-      return Prop(name, value)
+      return Prop(name, value, optional=False)
     else:
       # don't fail on invalid line
       # TODO(jiyong) make this a hard error
-      return Prop("", "", line)
+      return Prop("", "", comment=line)
 
   def is_comment(self):
-    return self.comment != None
+    return bool(self.comments and not self.name)
+
+  def is_optional(self):
+    return (not self.is_comment()) and self.optional
+
+  def make_as_comment(self):
+    # Prepend "#" to the last line which is the prop assignment
+    if not self.is_comment():
+      assignment = str(self).rsplit("\n", 1)[-1]
+      self.comments.append("#" + assignment)
+      self.name = ""
+      self.value = ""
+
+  def delete(self, reason):
+    self.comments.append("# Removed by post_process_props.py because " + reason)
+    self.make_as_comment()
 
   def __str__(self):
-    if self.is_comment():
-      return self.comment
-    else:
-      return self.name + "=" + self.value
+    assignment = []
+    if not self.is_comment():
+      operator = "?=" if self.is_optional() else "="
+      assignment.append(self.name + operator + self.value)
+    return "\n".join(self.comments + assignment)
 
 class PropList:
 
@@ -94,47 +175,65 @@
       self.props = [Prop.from_line(l)
                     for l in f.readlines() if l.strip() != ""]
 
-  def get_all(self):
+  def get_all_props(self):
     return [p for p in self.props if not p.is_comment()]
 
-  def get(self, name):
+  def get_all_names(self):
+    return set([p.name for p in self.get_all_props()])
+
+  def get_props(self, name):
+    return [p for p in self.get_all_props() if p.name == name]
+
+  def get_value(self, name):
+    # Caution: only the value of the first sysprop having the name is returned.
     return next((p.value for p in self.props if p.name == name), "")
 
   def put(self, name, value):
-    index = next((i for i,p in enumerate(self.props) if p.name == name), -1)
+    # Note: when there is an optional prop for the name, its value isn't changed.
+    # Instead a new non-optional prop is appended, which will override the
+    # optional prop. Otherwise, the new value might be overridden by an existing
+    # non-optional prop of the same name.
+    index = next((i for i,p in enumerate(self.props)
+                  if p.name == name and not p.is_optional()), -1)
     if index == -1:
-      self.props.append(Prop(name, value))
+      self.props.append(Prop(name, value,
+                             comment="# Auto-added by post_process_props.py"))
     else:
+      self.props[index].comments.append(
+          "# Value overridden by post_process_props.py. Original value: %s" %
+          self.props[index].value)
       self.props[index].value = value
 
-  def delete(self, name):
-    index = next((i for i,p in enumerate(self.props) if p.name == name), -1)
-    if index != -1:
-      new_comment = "# removed by post_process_props.py\n#" + str(self.props[index])
-      self.props[index] = Prop.from_line(new_comment)
-
   def write(self, filename):
     with open(filename, 'w+') as f:
       for p in self.props:
         f.write(str(p) + "\n")
 
 def main(argv):
-  filename = argv[1]
+  parser = argparse.ArgumentParser(description="Post-process build.prop file")
+  parser.add_argument("--allow-dup", dest="allow_dup", action="store_true",
+                      default=False)
+  parser.add_argument("filename")
+  parser.add_argument("disallowed_keys", metavar="KEY", type=str, nargs="*")
+  args = parser.parse_args()
 
-  if not filename.endswith("/build.prop"):
+  if not args.filename.endswith("/build.prop"):
     sys.stderr.write("bad command line: " + str(argv) + "\n")
     sys.exit(1)
 
-  props = PropList(filename)
+  props = PropList(args.filename)
   mangle_build_prop(props)
+  if not override_optional_props(props, args.allow_dup):
+    sys.exit(1)
   if not validate(props):
     sys.exit(1)
 
-  # Drop any blacklisted keys
-  for key in argv[2:]:
-    props.delete(key)
+  # Drop any disallowed keys
+  for key in args.disallowed_keys:
+    for p in props.get_props(key):
+      p.delete("%s is a disallowed key" % key)
 
-  props.write(filename)
+  props.write(args.filename)
 
 if __name__ == "__main__":
   main(sys.argv)
diff --git a/tools/post_process_props_unittest.xml b/tools/post_process_props_unittest.xml
new file mode 100644
index 0000000..4a6ecc2
--- /dev/null
+++ b/tools/post_process_props_unittest.xml
@@ -0,0 +1,6 @@
+<configuration description="Config to run post_process_props_unittest">
+    <test class="com.android.tradefed.testtype.python.PythonBinaryHostTest" >
+        <option name="par-file-name" value="post_process_props_unittest" />
+        <option name="test-timeout" value="1m" />
+    </test>
+</configuration>
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 8a59a39..8bbc35e 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -17,6 +17,7 @@
 import base64
 import collections
 import copy
+import datetime
 import errno
 import fnmatch
 import getopt
@@ -53,16 +54,17 @@
     # running this function, user-supplied search path (`--path`) hasn't been
     # available. So the value set here is the default, which might be overridden
     # by commandline flag later.
-    exec_path = sys.argv[0]
+    exec_path = os.path.realpath(sys.argv[0])
     if exec_path.endswith('.py'):
       script_name = os.path.basename(exec_path)
       # logger hasn't been initialized yet at this point. Use print to output
       # warnings.
       print(
           'Warning: releasetools script should be invoked as hermetic Python '
-          'executable -- build and run `{}` directly.'.format(script_name[:-3]),
+          'executable -- build and run `{}` directly.'.format(
+              script_name[:-3]),
           file=sys.stderr)
-    self.search_path = os.path.realpath(os.path.join(os.path.dirname(exec_path), '..'))
+    self.search_path = os.path.dirname(os.path.dirname(exec_path))
 
     self.signapk_path = "framework/signapk.jar"  # Relative to search_path
     self.signapk_shared_library_path = "lib64"   # Relative to search_path
@@ -191,11 +193,11 @@
     if OPTIONS.logfile:
       config = copy.deepcopy(config)
       config['handlers']['logfile'] = {
-        'class': 'logging.FileHandler',
-        'formatter': 'standard',
-        'level': 'INFO',
-        'mode': 'w',
-        'filename': OPTIONS.logfile,
+          'class': 'logging.FileHandler',
+          'formatter': 'standard',
+          'level': 'INFO',
+          'mode': 'w',
+          'filename': OPTIONS.logfile,
       }
       config['loggers']['']['handlers'].append('logfile')
 
@@ -224,7 +226,7 @@
   if 'universal_newlines' not in kwargs:
     kwargs['universal_newlines'] = True
   # Don't log any if caller explicitly says so.
-  if verbose != False:
+  if verbose:
     logger.info("  Running: \"%s\"", " ".join(args))
   return subprocess.Popen(args, **kwargs)
 
@@ -274,7 +276,7 @@
   if output is None:
     output = ""
   # Don't log any if caller explicitly says so.
-  if verbose != False:
+  if verbose:
     logger.info("%s", output.rstrip())
   if proc.returncode != 0:
     raise ExternalError(
@@ -375,7 +377,6 @@
             'Invalid build fingerprint: "{}". See the requirement in Android CDD '
             "3.2.2. Build Parameters.".format(fingerprint))
 
-
     self._partition_fingerprints = {}
     for partition in PARTITIONS_WITH_CARE_MAP:
       try:
@@ -522,7 +523,8 @@
           self.GetPartitionBuildProp("ro.product.device", partition),
           self.GetPartitionBuildProp("ro.build.version.release", partition),
           self.GetPartitionBuildProp("ro.build.id", partition),
-          self.GetPartitionBuildProp("ro.build.version.incremental", partition),
+          self.GetPartitionBuildProp(
+              "ro.build.version.incremental", partition),
           self.GetPartitionBuildProp("ro.build.type", partition),
           self.GetPartitionBuildProp("ro.build.tags", partition))
 
@@ -683,7 +685,7 @@
   if "boot_images" in d:
     boot_images = d["boot_images"]
   for b in boot_images.split():
-    makeint(b.replace(".img","_size"))
+    makeint(b.replace(".img", "_size"))
 
   # Load recovery fstab if applicable.
   d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
@@ -703,7 +705,7 @@
     for partition in PARTITIONS_WITH_CARE_MAP:
       fingerprint = build_info.GetPartitionFingerprint(partition)
       if fingerprint:
-        d["avb_{}_salt".format(partition)] = sha256(fingerprint).hexdigest()
+        d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest()
 
   return d
 
@@ -749,6 +751,7 @@
         placeholders in the build.prop file. We expect exactly one value for
         each of the variables.
   """
+
   def __init__(self, input_file, name, placeholder_values=None):
     self.input_file = input_file
     self.partition = name
@@ -808,7 +811,7 @@
     """Parses the build prop in a given import statement."""
 
     tokens = line.split()
-    if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3) :
+    if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3):
       raise ValueError('Unrecognized import statement {}'.format(line))
 
     if len(tokens) == 3:
@@ -998,9 +1001,9 @@
 
   # Pick virtual ab related flags from vendor dict, if defined.
   if "virtual_ab" in vendor_dict.keys():
-     merged_dict["virtual_ab"] = vendor_dict["virtual_ab"]
+    merged_dict["virtual_ab"] = vendor_dict["virtual_ab"]
   if "virtual_ab_retrofit" in vendor_dict.keys():
-     merged_dict["virtual_ab_retrofit"] = vendor_dict["virtual_ab_retrofit"]
+    merged_dict["virtual_ab_retrofit"] = vendor_dict["virtual_ab_retrofit"]
   return merged_dict
 
 
@@ -1234,7 +1237,7 @@
     kernel = "kernel"
   else:
     kernel = image_name.replace("boot", "kernel")
-    kernel = kernel.replace(".img","")
+    kernel = kernel.replace(".img", "")
   if not os.access(os.path.join(sourcedir, kernel), os.F_OK):
     return None
 
@@ -1358,7 +1361,7 @@
     if partition_name == "recovery":
       part_size = info_dict["recovery_size"]
     else:
-      part_size = info_dict[image_name.replace(".img","_size")]
+      part_size = info_dict[image_name.replace(".img", "_size")]
     cmd = [avbtool, "add_hash_footer", "--image", img.name,
            "--partition_size", str(part_size), "--partition_name",
            partition_name]
@@ -1511,7 +1514,8 @@
   if info_dict is None:
     info_dict = OPTIONS.info_dict
 
-  data = _BuildVendorBootImage(os.path.join(unpack_dir, tree_subdir), info_dict)
+  data = _BuildVendorBootImage(
+      os.path.join(unpack_dir, tree_subdir), info_dict)
   if data:
     return File(name, data)
   return None
@@ -1520,7 +1524,7 @@
 def Gunzip(in_filename, out_filename):
   """Gunzips the given gzip compressed file to a given output file."""
   with gzip.open(in_filename, "rb") as in_file, \
-       open(out_filename, "wb") as out_file:
+          open(out_filename, "wb") as out_file:
     shutil.copyfileobj(in_file, out_file)
 
 
@@ -1622,8 +1626,7 @@
     if reset_file_map:
       img.ResetFileMap()
     return img
-  else:
-    return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
+  return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
 
 
 def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
@@ -1822,10 +1825,9 @@
     # Not a decimal number. Codename?
     if version in codename_to_api_level_map:
       return codename_to_api_level_map[version]
-    else:
-      raise ExternalError(
-          "Unknown minSdkVersion: '{}'. Known codenames: {}".format(
-              version, codename_to_api_level_map))
+    raise ExternalError(
+        "Unknown minSdkVersion: '{}'. Known codenames: {}".format(
+            version, codename_to_api_level_map))
 
 
 def SignFile(input_name, output_name, key, password, min_api_level=None,
@@ -1930,7 +1932,8 @@
     msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
     if pct >= 99.0:
       raise ExternalError(msg)
-    elif pct >= 95.0:
+
+    if pct >= 95.0:
       logger.warning("\n  WARNING: %s\n", msg)
     else:
       logger.info("  %s", msg)
@@ -2040,6 +2043,7 @@
       Put verbose logs to specified file (regardless of --verbose option.)
 """
 
+
 def Usage(docstring):
   print(docstring.rstrip("\n"))
   print(COMMON_DOCSTRING)
@@ -2202,7 +2206,7 @@
 
       current = self.UpdateAndReadFile(current)
 
-  def PromptResult(self, current): # pylint: disable=no-self-use
+  def PromptResult(self, current):  # pylint: disable=no-self-use
     """Prompt the user to enter a value (password) for each key in
     'current' whose value is fales.  Returns a new dict with all the
     values.
@@ -2265,7 +2269,6 @@
 
 def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
              compress_type=None):
-  import datetime
 
   # http://b/18015246
   # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
@@ -2391,6 +2394,7 @@
 
 class DeviceSpecificParams(object):
   module = None
+
   def __init__(self, **kwargs):
     """Keyword arguments to the constructor become attributes of this
     object, which is passed to all functions in the device-specific
@@ -2519,12 +2523,12 @@
 
 
 DIFF_PROGRAM_BY_EXT = {
-    ".gz" : "imgdiff",
-    ".zip" : ["imgdiff", "-z"],
-    ".jar" : ["imgdiff", "-z"],
-    ".apk" : ["imgdiff", "-z"],
-    ".img" : "imgdiff",
-    }
+    ".gz": "imgdiff",
+    ".zip": ["imgdiff", "-z"],
+    ".jar": ["imgdiff", "-z"],
+    ".apk": ["imgdiff", "-z"],
+    ".img": "imgdiff",
+}
 
 
 class Difference(object):
@@ -2563,6 +2567,7 @@
       cmd.append(ptemp.name)
       p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
       err = []
+
       def run():
         _, e = p.communicate()
         if e:
@@ -2591,7 +2596,6 @@
     self.patch = diff
     return self.tf, self.sf, self.patch
 
-
   def GetPatch(self):
     """Returns a tuple of (target_file, source_file, patch_data).
 
@@ -2902,7 +2906,7 @@
                 new_data_name=new_data_name, code=code))
     script.AppendExtra(script.WordWrap(call))
 
-  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
+  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
     data = source.ReadRangeSet(ranges)
     ctx = sha1()
 
@@ -2911,7 +2915,7 @@
 
     return ctx.hexdigest()
 
-  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
+  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
     """Return the hash value for all zero blocks."""
     zero_block = '\x00' * 4096
     ctx = sha1()
@@ -2934,6 +2938,7 @@
     "squashfs": "EMMC"
 }
 
+
 def GetTypeAndDevice(mount_point, info, check_no_slot=True):
   """
   Use GetTypeAndDeviceExpr whenever possible. This function is kept for
@@ -2944,11 +2949,10 @@
   if fstab:
     if check_no_slot:
       assert not fstab[mount_point].slotselect, \
-             "Use GetTypeAndDeviceExpr instead"
+          "Use GetTypeAndDeviceExpr instead"
     return (PARTITION_TYPES[fstab[mount_point].fs_type],
             fstab[mount_point].device)
-  else:
-    raise KeyError
+  raise KeyError
 
 
 def GetTypeAndDeviceExpr(mount_point, info):
@@ -2963,8 +2967,7 @@
     if p.slotselect:
       device_expr = 'add_slot_suffix(%s)' % device_expr
     return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
-  else:
-    raise KeyError
+  raise KeyError
 
 
 def GetEntryForDevice(fstab, device):
@@ -2979,6 +2982,7 @@
       return fstab[mount_point]
   return None
 
+
 def ParseCertificate(data):
   """Parses and converts a PEM-encoded certificate into DER-encoded.
 
@@ -3305,7 +3309,7 @@
       for p, u in self._partition_updates.items():
         if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
           u.block_difference.WritePostInstallVerifyScript(script)
-          script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
+          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
 
     for p, u in self._partition_updates.items():
       if u.tgt_size and u.src_size <= u.tgt_size:
@@ -3313,7 +3317,7 @@
         u.block_difference.WriteScript(script, output_zip, progress=u.progress,
                                        write_verify_script=write_verify_script)
         if write_verify_script:
-          script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
+          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
 
     script.Comment('--- End patching dynamic partitions ---')
 
@@ -3370,7 +3374,8 @@
 
     for p, u in self._partition_updates.items():
       if u.tgt_size and u.src_size < u.tgt_size:
-        comment('Grow partition %s from %d to %d' % (p, u.src_size, u.tgt_size))
+        comment('Grow partition %s from %d to %d' %
+                (p, u.src_size, u.tgt_size))
         append('resize %s %d' % (p, u.tgt_size))
 
     for p, u in self._partition_updates.items():
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 99e21f1..b9c9b19 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -374,12 +374,12 @@
 
   def _CheckSecondTokenNotSlotSuffixed(self, s, fn):
     lst = s.split(':')
-    assert(len(s) == 4), "{} does not contain 4 tokens".format(s)
+    assert(len(lst) == 4), "{} does not contain 4 tokens".format(s)
     if self.fstab:
-      entry = common.GetEntryForDevice(s[1])
+      entry = common.GetEntryForDevice(self.fstab, lst[1])
       if entry is not None:
         assert not entry.slotselect, \
-          "Use %s because %s is slot suffixed" % (fn, s[1])
+          "Use %s because %s is slot suffixed" % (fn, lst[1])
 
   def WriteRawImage(self, mount_point, fn, mapfn=None):
     """Write the given package file into the partition for the given
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 3b68439..7fb0a77 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -221,8 +221,10 @@
 import check_target_files_vintf
 import common
 import edify_generator
+import target_files_diff
 import verity_utils
 
+
 if sys.hexversion < 0x02070000:
   print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
@@ -545,10 +547,10 @@
     target_files_dir = "SYSTEM/vendor"
 
   patch = "%s/recovery-from-boot.p" % target_files_dir
-  img = "%s/etc/recovery.img" %target_files_dir
+  img = "%s/etc/recovery.img" % target_files_dir
 
-  namelist = [name for name in target_files_zip.namelist()]
-  return (patch in namelist or img in namelist)
+  namelist = target_files_zip.namelist()
+  return patch in namelist or img in namelist
 
 
 def HasPartition(target_files_zip, partition):
@@ -626,7 +628,8 @@
 
   def GetIncrementalBlockDifferenceForPartition(name):
     if not HasPartition(source_zip, name):
-      raise RuntimeError("can't generate incremental that adds {}".format(name))
+      raise RuntimeError(
+          "can't generate incremental that adds {}".format(name))
 
     partition_src = common.GetUserImage(name, OPTIONS.source_tmp, source_zip,
                                         info_dict=source_info,
@@ -637,8 +640,7 @@
     partition_tgt = common.GetUserImage(name, OPTIONS.target_tmp, target_zip,
                                         info_dict=target_info,
                                         allow_shared_blocks=allow_shared_blocks,
-                                        hashtree_info_generator=
-                                        hashtree_info_generator)
+                                        hashtree_info_generator=hashtree_info_generator)
 
     # Check the first block of the source system partition for remount R/W only
     # if the filesystem is ext4.
@@ -1450,7 +1452,7 @@
     fs = source_info["fstab"]["/misc"]
     assert fs.fs_type.upper() == "EMMC", \
         "two-step packages only supported on devices with EMMC /misc partitions"
-    bcb_dev = {"bcb_dev" : fs.device}
+    bcb_dev = {"bcb_dev": fs.device}
     common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data)
     script.AppendExtra("""
 if get_stage("%(bcb_dev)s") == "2/3" then
@@ -1668,7 +1670,7 @@
         partitions = [partition for partition in partitions if partition
                       not in SECONDARY_PAYLOAD_SKIPPED_IMAGES]
         output_list.append('{}={}'.format(key, ' '.join(partitions)))
-      elif key == 'virtual_ab' or key == "virtual_ab_retrofit":
+      elif key in ['virtual_ab', "virtual_ab_retrofit"]:
         # Remove virtual_ab flag from secondary payload so that OTA client
         # don't use snapshots for secondary update
         pass
@@ -1712,7 +1714,8 @@
           partition_list = f.read().splitlines()
         partition_list = [partition for partition in partition_list if partition
                           and partition not in SECONDARY_PAYLOAD_SKIPPED_IMAGES]
-        common.ZipWriteStr(target_zip, info.filename, '\n'.join(partition_list))
+        common.ZipWriteStr(target_zip, info.filename,
+                           '\n'.join(partition_list))
       # Remove the unnecessary partitions from the dynamic partitions list.
       elif (info.filename == 'META/misc_info.txt' or
             info.filename == DYNAMIC_PARTITION_INFO):
@@ -1795,7 +1798,8 @@
       "{} is in super_block_devices but not in {}".format(
           super_device_not_updated, AB_PARTITIONS)
   # ab_partitions -= (dynamic_partition_list - super_block_devices)
-  new_ab_partitions = common.MakeTempFile(prefix="ab_partitions", suffix=".txt")
+  new_ab_partitions = common.MakeTempFile(
+      prefix="ab_partitions", suffix=".txt")
   with open(new_ab_partitions, 'w') as f:
     for partition in ab_partitions:
       if (partition in dynamic_partition_list and
@@ -1985,7 +1989,7 @@
     OPTIONS.source_tmp = common.UnzipTemp(
         OPTIONS.incremental_source, UNZIP_PATTERN)
     with zipfile.ZipFile(target_file) as input_zip, \
-        zipfile.ZipFile(source_file) as source_zip:
+            zipfile.ZipFile(source_file) as source_zip:
       WriteBlockIncrementalOTAPackage(
           input_zip,
           source_zip,
@@ -2245,7 +2249,6 @@
         OPTIONS.incremental_source, TARGET_DIFFING_UNZIP_PATTERN)
 
     with open(OPTIONS.log_diff, 'w') as out_file:
-      import target_files_diff
       target_files_diff.recursiveDiff(
           '', source_dir, target_dir, out_file)
 
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index c2bfd32..5d10c40 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -608,22 +608,22 @@
     elif (OPTIONS.remove_avb_public_keys and
           (filename.startswith("BOOT/RAMDISK/avb/") or
            filename.startswith("BOOT/RAMDISK/first_stage_ramdisk/avb/"))):
-        matched_removal = False
-        for key_to_remove in OPTIONS.remove_avb_public_keys:
-          if filename.endswith(key_to_remove):
-            matched_removal = True
-            print("Removing AVB public key from ramdisk: %s" % filename)
-            break
-        if not matched_removal:
-          # Copy it verbatim if we don't want to remove it.
-          common.ZipWriteStr(output_tf_zip, out_info, data)
+      matched_removal = False
+      for key_to_remove in OPTIONS.remove_avb_public_keys:
+        if filename.endswith(key_to_remove):
+          matched_removal = True
+          print("Removing AVB public key from ramdisk: %s" % filename)
+          break
+      if not matched_removal:
+        # Copy it verbatim if we don't want to remove it.
+        common.ZipWriteStr(output_tf_zip, out_info, data)
 
     # Skip verity keyid (for system_root_image use) if we will replace it.
     elif OPTIONS.replace_verity_keyid and filename == "BOOT/cmdline":
       pass
 
     # Skip the care_map as we will regenerate the system/vendor images.
-    elif filename == "META/care_map.pb" or filename == "META/care_map.txt":
+    elif filename in ["META/care_map.pb", "META/care_map.txt"]:
       pass
 
     # Updates system_other.avbpubkey in /product/etc/.
@@ -967,11 +967,10 @@
     if extra_args:
       print('Setting extra AVB signing args for %s to "%s"' % (
           partition, extra_args))
-      if partition in AVB_FOOTER_ARGS_BY_PARTITION:
-        args_key = AVB_FOOTER_ARGS_BY_PARTITION[partition]
-      else:
-        # custom partition
-        args_key = "avb_{}_add_hashtree_footer_args".format(partition)
+      args_key = AVB_FOOTER_ARGS_BY_PARTITION.get(
+          partition,
+          # custom partition
+          "avb_{}_add_hashtree_footer_args".format(partition))
       misc_info[args_key] = (misc_info.get(args_key, '') + ' ' + extra_args)
 
   for partition in AVB_FOOTER_ARGS_BY_PARTITION:
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 69be511..ac469eb 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -371,6 +371,17 @@
             partition, info_dict, key_file)
         cmd.extend(['--expected_chain_partition', chained_partition_arg])
 
+    # Handle the boot image with a non-default name, e.g. boot-5.4.img
+    boot_images = info_dict.get("boot_images")
+    if boot_images:
+      # we used the 1st boot image to generate the vbmeta. Rename the filename
+      # to boot.img so that avbtool can find it correctly.
+      first_image_name = boot_images.split()[0]
+      first_image_path = os.path.join(input_tmp, 'IMAGES', first_image_name)
+      assert os.path.isfile(first_image_path)
+      renamed_boot_image_path = os.path.join(input_tmp, 'IMAGES', 'boot.img')
+      os.rename(first_image_path, renamed_boot_image_path)
+
     proc = common.Run(cmd)
     stdoutdata, _ = proc.communicate()
     assert proc.returncode == 0, \
diff --git a/tools/test_post_process_props.py b/tools/test_post_process_props.py
new file mode 100644
index 0000000..12d52e5
--- /dev/null
+++ b/tools/test_post_process_props.py
@@ -0,0 +1,255 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import contextlib
+import io
+import unittest
+
+from unittest.mock import *
+from post_process_props import *
+
+class PropTestCase(unittest.TestCase):
+  def test_createFromLine(self):
+    p = Prop.from_line("# this is comment")
+    self.assertTrue(p.is_comment())
+    self.assertEqual("", p.name)
+    self.assertEqual("", p.value)
+    self.assertFalse(p.is_optional())
+    self.assertEqual("# this is comment", str(p))
+
+    for line in ["a=b", "a = b", "a= b", "a =b", "  a=b   "]:
+      p = Prop.from_line(line)
+      self.assertFalse(p.is_comment())
+      self.assertEqual("a", p.name)
+      self.assertEqual("b", p.value)
+      self.assertFalse(p.is_optional())
+      self.assertEqual("a=b", str(p))
+
+    for line in ["a?=b", "a ?= b", "a?= b", "a ?=b", "  a?=b   "]:
+      p = Prop.from_line(line)
+      self.assertFalse(p.is_comment())
+      self.assertEqual("a", p.name)
+      self.assertEqual("b", p.value)
+      self.assertTrue(p.is_optional())
+      self.assertEqual("a?=b", str(p))
+
+  def test_makeAsComment(self):
+    p = Prop.from_line("a=b")
+    p.comments.append("# a comment")
+    self.assertFalse(p.is_comment())
+
+    p.make_as_comment()
+    self.assertTrue(p.is_comment())
+    self.assertTrue("# a comment\n#a=b", str(p))
+
+class PropListTestcase(unittest.TestCase):
+  def setUp(self):
+    content = """
+    # comment
+    foo=true
+    bar=false
+    qux?=1
+    # another comment
+    foo?=false
+    """
+    self.patcher = patch("post_process_props.open", mock_open(read_data=content))
+    self.mock_open = self.patcher.start()
+    self.props = PropList("file")
+
+  def tearDown(self):
+    self.patcher.stop()
+    self.props = None
+
+  def test_readFromFile(self):
+    self.assertEqual(4, len(self.props.get_all_props()))
+    expected = [
+        ("foo", "true", False),
+        ("bar", "false", False),
+        ("qux", "1", True),
+        ("foo", "false", True)
+    ]
+    for i,p in enumerate(self.props.get_all_props()):
+      self.assertEqual(expected[i][0], p.name)
+      self.assertEqual(expected[i][1], p.value)
+      self.assertEqual(expected[i][2], p.is_optional())
+      self.assertFalse(p.is_comment())
+
+    self.assertEqual(set(["foo", "bar", "qux"]), self.props.get_all_names())
+
+    self.assertEqual("true", self.props.get_value("foo"))
+    self.assertEqual("false", self.props.get_value("bar"))
+    self.assertEqual("1", self.props.get_value("qux"))
+
+    # there are two assignments for 'foo'
+    self.assertEqual(2, len(self.props.get_props("foo")))
+
+  def test_putNewProp(self):
+    self.props.put("new", "30")
+
+    self.assertEqual(5, len(self.props.get_all_props()))
+    last_prop = self.props.get_all_props()[-1]
+    self.assertEqual("new", last_prop.name)
+    self.assertEqual("30", last_prop.value)
+    self.assertFalse(last_prop.is_optional())
+
+  def test_putExistingNonOptionalProp(self):
+    self.props.put("foo", "NewValue")
+
+    self.assertEqual(4, len(self.props.get_all_props()))
+    foo_prop = self.props.get_props("foo")[0]
+    self.assertEqual("foo", foo_prop.name)
+    self.assertEqual("NewValue", foo_prop.value)
+    self.assertFalse(foo_prop.is_optional())
+    self.assertEqual("# Value overridden by post_process_props.py. " +
+                     "Original value: true\nfoo=NewValue", str(foo_prop))
+
+  def test_putExistingOptionalProp(self):
+    self.props.put("qux", "2")
+
+    self.assertEqual(5, len(self.props.get_all_props()))
+    last_prop = self.props.get_all_props()[-1]
+    self.assertEqual("qux", last_prop.name)
+    self.assertEqual("2", last_prop.value)
+    self.assertFalse(last_prop.is_optional())
+    self.assertEqual("# Auto-added by post_process_props.py\nqux=2",
+                     str(last_prop))
+
+  def test_deleteNonOptionalProp(self):
+    props_to_delete = self.props.get_props("foo")[0]
+    props_to_delete.delete(reason="testing")
+
+    self.assertEqual(3, len(self.props.get_all_props()))
+    self.assertEqual("# Removed by post_process_props.py because testing\n" +
+                     "#foo=true", str(props_to_delete))
+
+  def test_deleteOptionalProp(self):
+    props_to_delete = self.props.get_props("qux")[0]
+    props_to_delete.delete(reason="testing")
+
+    self.assertEqual(3, len(self.props.get_all_props()))
+    self.assertEqual("# Removed by post_process_props.py because testing\n" +
+                     "#qux?=1", str(props_to_delete))
+
+  def test_overridingNonOptional(self):
+    props_to_be_overridden = self.props.get_props("foo")[1]
+    self.assertTrue("true", props_to_be_overridden.value)
+
+    self.assertTrue(override_optional_props(self.props))
+
+    # size reduced to 3 because foo?=false was overridden by foo=true
+    self.assertEqual(3, len(self.props.get_all_props()))
+
+    self.assertEqual(1, len(self.props.get_props("foo")))
+    self.assertEqual("true", self.props.get_props("foo")[0].value)
+
+    self.assertEqual("# Removed by post_process_props.py because " +
+                     "overridden by foo=true\n#foo?=false",
+                     str(props_to_be_overridden))
+
+  def test_overridingOptional(self):
+    content = """
+    # comment
+    qux?=2
+    foo=true
+    bar=false
+    qux?=1
+    # another comment
+    foo?=false
+    """
+    with patch('post_process_props.open', mock_open(read_data=content)) as m:
+      props = PropList("hello")
+
+      props_to_be_overridden = props.get_props("qux")[0]
+      self.assertEqual("2", props_to_be_overridden.value)
+
+      self.assertTrue(override_optional_props(props))
+
+      self.assertEqual(1, len(props.get_props("qux")))
+      self.assertEqual("1", props.get_props("qux")[0].value)
+      # the only left optional assignment becomes non-optional
+      self.assertFalse(props.get_props("qux")[0].is_optional())
+
+      self.assertEqual("# Removed by post_process_props.py because " +
+                       "overridden by qux?=1\n#qux?=2",
+                       str(props_to_be_overridden))
+
+  def test_overridingDuplicated(self):
+    content = """
+    # comment
+    foo=true
+    bar=false
+    qux?=1
+    foo=false
+    # another comment
+    foo?=false
+    """
+    with patch("post_process_props.open", mock_open(read_data=content)) as m:
+      stderr_redirect = io.StringIO()
+      with contextlib.redirect_stderr(stderr_redirect):
+        props = PropList("hello")
+
+        # fails due to duplicated foo=true and foo=false
+        self.assertFalse(override_optional_props(props))
+
+        self.assertEqual("error: found duplicate sysprop assignments:\n" +
+                         "foo=true\nfoo=false\n", stderr_redirect.getvalue())
+
+  def test_overridingDuplicatedWithSameValue(self):
+    content = """
+    # comment
+    foo=true
+    bar=false
+    qux?=1
+    foo=true
+    # another comment
+    foo?=false
+    """
+    with patch("post_process_props.open", mock_open(read_data=content)) as m:
+      stderr_redirect = io.StringIO()
+      with contextlib.redirect_stderr(stderr_redirect):
+        props = PropList("hello")
+        optional_prop = props.get_props("foo")[2] # the last foo?=false one
+
+        # we have duplicated foo=true and foo=true, but that's allowed
+        # since they have the same value
+        self.assertTrue(override_optional_props(props))
+
+        # foo?=false should be commented out
+        self.assertEqual("# Removed by post_process_props.py because " +
+                         "overridden by foo=true\n#foo?=false",
+                         str(optional_prop))
+
+  def test_allowDuplicates(self):
+    content = """
+    # comment
+    foo=true
+    bar=false
+    qux?=1
+    foo=false
+    # another comment
+    foo?=false
+    """
+    with patch("post_process_props.open", mock_open(read_data=content)) as m:
+      stderr_redirect = io.StringIO()
+      with contextlib.redirect_stderr(stderr_redirect):
+        props = PropList("hello")
+
+        # we have duplicated foo=true and foo=false, but that's allowed
+        # because it's explicitly allowed
+        self.assertTrue(override_optional_props(props, allow_dup=True))
+
+if __name__ == '__main__':
+    unittest.main(verbosity=2)