Merge "Delete the installation rules of host ART boot image from make" into main
diff --git a/cogsetup.sh b/cogsetup.sh
deleted file mode 100644
index 5c64a06..0000000
--- a/cogsetup.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-# Copyright (C) 2023 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This file is executed by build/envsetup.sh, and can use anything
-# defined in envsetup.sh.
-function _create_out_symlink_for_cog() {
- if [[ "${OUT_DIR}" == "" ]]; then
- OUT_DIR="out"
- fi
-
- # getoutdir ensures paths are absolute. envsetup could be called from a
- # directory other than the root of the source tree
- local outdir=$(getoutdir)
- if [[ -L "${outdir}" ]]; then
- return
- fi
- if [ -d "${outdir}" ]; then
- echo -e "\tOutput directory ${outdir} cannot be present in a Cog workspace."
- echo -e "\tDelete \"${outdir}\" or create a symlink from \"${outdir}\" to a directory outside your workspace."
- return 1
- fi
-
- DEFAULT_OUTPUT_DIR="${HOME}/.cog/android-build-out"
- mkdir -p ${DEFAULT_OUTPUT_DIR}
- ln -s ${DEFAULT_OUTPUT_DIR} ${outdir}
-}
-
-# This function sets up the build environment to be appropriate for Cog.
-function _setup_cog_env() {
- _create_out_symlink_for_cog
- if [ "$?" -eq "1" ]; then
- echo -e "\e[0;33mWARNING:\e[00m Cog environment setup failed!"
- return 1
- fi
-
- export ANDROID_BUILD_ENVIRONMENT_CONFIG="googler-cog"
-
- # Running repo command within Cog workspaces is not supported, so override
- # it with this function. If the user is running repo within a Cog workspace,
- # we'll fail with an error, otherwise, we run the original repo command with
- # the given args.
- if ! ORIG_REPO_PATH=`which repo`; then
- return 0
- fi
- function repo {
- if [[ "${PWD}" == /google/cog/* ]]; then
- echo -e "\e[01;31mERROR:\e[0mrepo command is disallowed within Cog workspaces."
- return 1
- fi
- ${ORIG_REPO_PATH} "$@"
- }
-}
-
-if [[ "${PWD}" != /google/cog/* ]]; then
- echo -e "\e[01;31mERROR:\e[0m This script must be run from a Cog workspace."
-fi
-
-_setup_cog_env
diff --git a/core/Makefile b/core/Makefile
index 49b1432..2cdb24f 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -1964,7 +1964,7 @@
installed_system_dlkm_notice_xml_gz := $(TARGET_OUT_SYSTEM_DLKM)/etc/NOTICE.xml.gz
ALL_INSTALLED_NOTICE_FILES := \
- $(installed_notice_html_or_xml_gz) \
+ $(if $(USE_SOONG_DEFINED_SYSTEM_IMAGE),,$(installed_notice_html_or_xml_gz)) \
$(installed_vendor_notice_xml_gz) \
$(installed_product_notice_xml_gz) \
$(installed_system_ext_notice_xml_gz) \
@@ -2051,7 +2051,9 @@
endif # PRODUCT_NOTICE_SPLIT
+ifneq ($(USE_SOONG_DEFINED_SYSTEM_IMAGE),true)
ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
+endif
need_vendor_notice:=false
ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
diff --git a/core/combo/arch/arm64/armv9-2a.mk b/core/combo/arch/arm64/armv9-2a.mk
new file mode 100644
index 0000000..69ffde0
--- /dev/null
+++ b/core/combo/arch/arm64/armv9-2a.mk
@@ -0,0 +1,18 @@
+#
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# .mk file required to support build for the ARMv9.2-A arch variant.
+# The file just needs to be present, it does not need to contain anything.
diff --git a/core/main.mk b/core/main.mk
index 80ffec4..e5f5b9d 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -687,12 +687,12 @@
# Scan all modules in general-tests, device-tests and other selected suites and
# flatten the shared library dependencies.
define update-host-shared-libs-deps-for-suites
-$(foreach suite,general-tests device-tests vts tvts art-host-tests host-unit-tests,\
+$(foreach suite,general-tests device-tests vts tvts art-host-tests host-unit-tests camera-hal-tests,\
$(foreach m,$(COMPATIBILITY.$(suite).MODULES),\
$(eval my_deps := $(call get-all-shared-libs-deps,$(m)))\
$(foreach dep,$(my_deps),\
$(foreach f,$(ALL_MODULES.$(dep).HOST_SHARED_LIBRARY_FILES),\
- $(if $(filter $(suite),device-tests general-tests art-host-tests host-unit-tests),\
+ $(if $(filter $(suite),device-tests general-tests art-host-tests host-unit-tests camera-hal-tests),\
$(eval my_testcases := $(HOST_OUT_TESTCASES)),\
$(eval my_testcases := $$(COMPATIBILITY_TESTCASES_OUT_$(suite))))\
$(eval target := $(my_testcases)/$(lastword $(subst /, ,$(dir $(f))))/$(notdir $(f)))\
diff --git a/core/os_licensing.mk b/core/os_licensing.mk
index 1e1b7df..d15a3d0 100644
--- a/core/os_licensing.mk
+++ b/core/os_licensing.mk
@@ -17,13 +17,17 @@
$(eval $(call text-notice-rule,$(target_notice_file_txt),"System image",$(system_notice_file_message),$(SYSTEM_NOTICE_DEPS),$(SYSTEM_NOTICE_DEPS)))
+ifneq ($(USE_SOONG_DEFINED_SYSTEM_IMAGE),true)
$(installed_notice_html_or_xml_gz): $(target_notice_file_xml_gz)
$(copy-file-to-target)
endif
+endif
$(call declare-1p-target,$(target_notice_file_xml_gz))
+ifneq ($(USE_SOONG_DEFINED_SYSTEM_IMAGE),true)
$(call declare-1p-target,$(installed_notice_html_or_xml_gz))
endif
+endif
.PHONY: vendorlicense
vendorlicense: $(call corresponding-license-metadata, $(VENDOR_NOTICE_DEPS)) reportmissinglicenses
diff --git a/core/packaging/flags.mk b/core/packaging/flags.mk
index 4693bcd..ccb502c 100644
--- a/core/packaging/flags.mk
+++ b/core/packaging/flags.mk
@@ -97,42 +97,54 @@
# $(1): built aconfig flags storage package map file (out)
# $(2): built aconfig flags storage flag map file (out)
# $(3): built aconfig flags storage flag val file (out)
-# $(4): installed aconfig flags storage package map file (out)
-# $(5): installed aconfig flags storage flag map file (out)
-# $(6): installed aconfig flags storage flag value file (out)
-# $(7): input aconfig files for the partition (in)
-# $(8): partition name
+# $(4): built aconfig flags storage flag info file (out)
+# $(5): installed aconfig flags storage package map file (out)
+# $(6): installed aconfig flags storage flag map file (out)
+# $(7): installed aconfig flags storage flag value file (out)
+# $(8): installed aconfig flags storage flag info file (out)
+# $(9): input aconfig files for the partition (in)
+# $(10): partition name
define generate-partition-aconfig-storage-file
$(eval $(strip $(1)): PRIVATE_OUT := $(strip $(1)))
-$(eval $(strip $(1)): PRIVATE_IN := $(strip $(7)))
-$(strip $(1)): $(ACONFIG) $(strip $(7))
+$(eval $(strip $(1)): PRIVATE_IN := $(strip $(9)))
+$(strip $(1)): $(ACONFIG) $(strip $(9))
mkdir -p $$(dir $$(PRIVATE_OUT))
$$(if $$(PRIVATE_IN), \
- $$(ACONFIG) create-storage --container $(8) --file package_map --out $$(PRIVATE_OUT) \
+ $$(ACONFIG) create-storage --container $(10) --file package_map --out $$(PRIVATE_OUT) \
$$(addprefix --cache ,$$(PRIVATE_IN)), \
)
touch $$(PRIVATE_OUT)
$(eval $(strip $(2)): PRIVATE_OUT := $(strip $(2)))
-$(eval $(strip $(2)): PRIVATE_IN := $(strip $(7)))
-$(strip $(2)): $(ACONFIG) $(strip $(7))
+$(eval $(strip $(2)): PRIVATE_IN := $(strip $(9)))
+$(strip $(2)): $(ACONFIG) $(strip $(9))
mkdir -p $$(dir $$(PRIVATE_OUT))
$$(if $$(PRIVATE_IN), \
- $$(ACONFIG) create-storage --container $(8) --file flag_map --out $$(PRIVATE_OUT) \
+ $$(ACONFIG) create-storage --container $(10) --file flag_map --out $$(PRIVATE_OUT) \
$$(addprefix --cache ,$$(PRIVATE_IN)), \
)
touch $$(PRIVATE_OUT)
$(eval $(strip $(3)): PRIVATE_OUT := $(strip $(3)))
-$(eval $(strip $(3)): PRIVATE_IN := $(strip $(7)))
-$(strip $(3)): $(ACONFIG) $(strip $(7))
+$(eval $(strip $(3)): PRIVATE_IN := $(strip $(9)))
+$(strip $(3)): $(ACONFIG) $(strip $(9))
mkdir -p $$(dir $$(PRIVATE_OUT))
$$(if $$(PRIVATE_IN), \
- $$(ACONFIG) create-storage --container $(8) --file flag_val --out $$(PRIVATE_OUT) \
+ $$(ACONFIG) create-storage --container $(10) --file flag_val --out $$(PRIVATE_OUT) \
$$(addprefix --cache ,$$(PRIVATE_IN)), \
)
touch $$(PRIVATE_OUT)
-$(call copy-one-file, $(strip $(1)), $(4))
-$(call copy-one-file, $(strip $(2)), $(5))
-$(call copy-one-file, $(strip $(3)), $(6))
+$(eval $(strip $(4)): PRIVATE_OUT := $(strip $(4)))
+$(eval $(strip $(4)): PRIVATE_IN := $(strip $(9)))
+$(strip $(4)): $(ACONFIG) $(strip $(9))
+ mkdir -p $$(dir $$(PRIVATE_OUT))
+ $$(if $$(PRIVATE_IN), \
+ $$(ACONFIG) create-storage --container $(10) --file flag_info --out $$(PRIVATE_OUT) \
+ $$(addprefix --cache ,$$(PRIVATE_IN)), \
+ )
+ touch $$(PRIVATE_OUT)
+$(call copy-one-file, $(strip $(1)), $(5))
+$(call copy-one-file, $(strip $(2)), $(6))
+$(call copy-one-file, $(strip $(3)), $(7))
+$(call copy-one-file, $(strip $(4)), $(8))
endef
ifeq ($(RELEASE_CREATE_ACONFIG_STORAGE_FILE),true)
@@ -140,13 +152,16 @@
$(eval aconfig_storage_package_map.$(partition) := $(PRODUCT_OUT)/$(partition)/etc/aconfig/package.map) \
$(eval aconfig_storage_flag_map.$(partition) := $(PRODUCT_OUT)/$(partition)/etc/aconfig/flag.map) \
$(eval aconfig_storage_flag_val.$(partition) := $(PRODUCT_OUT)/$(partition)/etc/aconfig/flag.val) \
+ $(eval aconfig_storage_flag_info.$(partition) := $(PRODUCT_OUT)/$(partition)/etc/aconfig/flag.info) \
$(eval $(call generate-partition-aconfig-storage-file, \
$(TARGET_OUT_FLAGS)/$(partition)/package.map, \
$(TARGET_OUT_FLAGS)/$(partition)/flag.map, \
$(TARGET_OUT_FLAGS)/$(partition)/flag.val, \
+ $(TARGET_OUT_FLAGS)/$(partition)/flag.info, \
$(aconfig_storage_package_map.$(partition)), \
$(aconfig_storage_flag_map.$(partition)), \
$(aconfig_storage_flag_val.$(partition)), \
+ $(aconfig_storage_flag_info.$(partition)), \
$(aconfig_flag_summaries_protobuf.$(partition)), \
$(partition), \
)) \
@@ -162,6 +177,7 @@
$(aconfig_storage_package_map.$(partition)) \
$(aconfig_storage_flag_map.$(partition)) \
$(aconfig_storage_flag_val.$(partition)) \
+ $(aconfig_storage_flag_info.$(partition)) \
))
ALL_DEFAULT_INSTALLED_MODULES += $(required_flags_files)
@@ -181,4 +197,5 @@
$(eval aconfig_storage_package_map.$(partition):=) \
$(eval aconfig_storage_flag_map.$(partition):=) \
$(eval aconfig_storage_flag_val.$(partition):=) \
+ $(eval aconfig_storage_flag_info.$(partition):=) \
)
diff --git a/core/product.mk b/core/product.mk
index b07e6e0..93a656d 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -390,20 +390,6 @@
# If set, Java module in product partition cannot use hidden APIs.
_product_single_value_vars += PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE
-# If set, only java_sdk_library can be used at inter-partition dependency.
-# Note: Build error if BOARD_VNDK_VERSION is not set while
-# PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY is true, because
-# PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY has no meaning if
-# BOARD_VNDK_VERSION is not set.
-# Note: When PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE is not set, there are
-# no restrictions at dependency between system and product partition.
-_product_single_value_vars += PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY
-
-# Allowlist for PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY option.
-# Listed modules are allowed at inter-partition dependency even if it isn't
-# a java_sdk_library module.
-_product_list_vars += PRODUCT_INTER_PARTITION_JAVA_LIBRARY_ALLOWLIST
-
# Install a copy of the debug policy to the system_ext partition, and allow
# init-second-stage to load debug policy from system_ext.
# This option is only meant to be set by compliance GSI targets.
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 1e6388a..97b707f 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -265,9 +265,6 @@
$(call add_json_bool, EnforceProductPartitionInterface, $(filter true,$(PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE)))
$(call add_json_str, DeviceCurrentApiLevelForVendorModules, $(BOARD_CURRENT_API_LEVEL_FOR_VENDOR_MODULES))
-$(call add_json_bool, EnforceInterPartitionJavaSdkLibrary, $(filter true,$(PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY)))
-$(call add_json_list, InterPartitionJavaLibraryAllowList, $(PRODUCT_INTER_PARTITION_JAVA_LIBRARY_ALLOWLIST))
-
$(call add_json_bool, CompressedApex, $(filter true,$(PRODUCT_COMPRESSED_APEX)))
ifndef APEX_BUILD_FOR_PRE_S_DEVICES
diff --git a/core/tasks/mke2fs-dist.mk b/core/tasks/mke2fs-dist.mk
new file mode 100644
index 0000000..3540c1f
--- /dev/null
+++ b/core/tasks/mke2fs-dist.mk
@@ -0,0 +1,22 @@
+# Copyright (C) 2024 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO: After Soong's recovery partition variation can be set to selectable
+# and the meta_lic file duplication issue is resolved, move it to the
+# dist section of the corresponding module's Android.bp.
+my_dist_files := $(HOST_OUT_EXECUTABLES)/mke2fs
+my_dist_files += $(HOST_OUT_EXECUTABLES)/make_f2fs
+my_dist_files += $(HOST_OUT_EXECUTABLES)/make_f2fs_casefold
+$(call dist-for-goals,dist_files sdk,$(my_dist_files))
+my_dist_files :=
diff --git a/envsetup.sh b/envsetup.sh
index 06dadd3..3fed5ae 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -442,6 +442,7 @@
function lunch()
{
local answer
+ setup_cog_env_if_needed
if [[ $# -gt 1 ]]; then
echo "usage: lunch [target]" >&2
@@ -1079,10 +1080,7 @@
done
done
- if [[ "${PWD}" == /google/cog/* ]]; then
- f="build/make/cogsetup.sh"
- echo "including $f"; . "$T/$f"
- fi
+ setup_cog_env_if_needed
}
function showcommands() {
diff --git a/shell_utils.sh b/shell_utils.sh
index 86f3f49..c4a6756 100644
--- a/shell_utils.sh
+++ b/shell_utils.sh
@@ -63,6 +63,70 @@
}
fi
+# This function sets up the build environment to be appropriate for Cog.
+function setup_cog_env_if_needed() {
+ local top=$(gettop)
+
+ # return early if not in a cog workspace
+ if [[ ! "$top" =~ ^/google/cog ]]; then
+ return 0
+ fi
+
+ setup_cog_symlink
+
+ export ANDROID_BUILD_ENVIRONMENT_CONFIG="googler-cog"
+
+ # Running repo command within Cog workspaces is not supported, so override
+ # it with this function. If the user is running repo within a Cog workspace,
+ # we'll fail with an error, otherwise, we run the original repo command with
+ # the given args.
+ if ! ORIG_REPO_PATH=`which repo`; then
+ return 0
+ fi
+ function repo {
+ if [[ "${PWD}" == /google/cog/* ]]; then
+ echo -e "\e[01;31mERROR:\e[0mrepo command is disallowed within Cog workspaces."
+ kill -INT $$ # exits the script without exiting the user's shell
+ fi
+ ${ORIG_REPO_PATH} "$@"
+ }
+}
+
+# creates a symlink for the out/ dir when inside a cog workspace.
+function setup_cog_symlink() {
+ local out_dir=$(getoutdir)
+ local top=$(gettop)
+
+ # return early if out dir is already a symlink
+ if [[ -L "$out_dir" ]]; then
+ return 0
+ fi
+
+ # return early if out dir is not in the workspace
+ if [[ ! "$out_dir" =~ ^$top/ ]]; then
+ return 0
+ fi
+
+ local link_destination="${HOME}/.cog/android-build-out"
+
+ # remove existing out/ dir if it exists
+ if [[ -d "$out_dir" ]]; then
+ echo "Detected existing out/ directory in the Cog workspace which is not supported. Repairing workspace by removing it and creating the symlink to ~/.cog/android-build-out"
+ if ! rm -rf "$out_dir"; then
+ echo "Failed to remove existing out/ directory: $out_dir" >&2
+ kill -INT $$ # exits the script without exiting the user's shell
+ fi
+ fi
+
+ # create symlink
+ echo "Creating symlink: $out_dir -> $link_destination"
+ mkdir -p ${link_destination}
+ if ! ln -s "$link_destination" "$out_dir"; then
+ echo "Failed to create cog symlink: $out_dir -> $link_destination" >&2
+ kill -INT $$ # exits the script without exiting the user's shell
+ fi
+}
+
function getoutdir
{
local top=$(gettop)
diff --git a/tools/aconfig/aconfig/Android.bp b/tools/aconfig/aconfig/Android.bp
index 5037783..68521af 100644
--- a/tools/aconfig/aconfig/Android.bp
+++ b/tools/aconfig/aconfig/Android.bp
@@ -68,14 +68,6 @@
],
}
-aconfig_values {
- name: "aconfig.test.flag.second_values",
- package: "com.android.aconfig.test",
- srcs: [
- "tests/third.values",
- ],
-}
-
aconfig_value_set {
name: "aconfig.test.flag.value_set",
values: [
diff --git a/tools/aconfig/aconfig/src/commands.rs b/tools/aconfig/aconfig/src/commands.rs
index b585416..797a893 100644
--- a/tools/aconfig/aconfig/src/commands.rs
+++ b/tools/aconfig/aconfig/src/commands.rs
@@ -17,7 +17,7 @@
use anyhow::{bail, ensure, Context, Result};
use itertools::Itertools;
use protobuf::Message;
-use std::collections::HashMap;
+use std::collections::{BTreeMap, HashMap};
use std::hash::Hasher;
use std::io::Read;
use std::path::PathBuf;
@@ -422,30 +422,23 @@
Ok(flag_ids)
}
-// Creates a fingerprint of the flag names. Sorts the vector.
-pub fn compute_flags_fingerprint(flag_names: &mut Vec<String>) -> Result<u64> {
- flag_names.sort();
-
- let mut hasher = SipHasher13::new();
- for flag in flag_names {
- hasher.write(flag.as_bytes());
- }
- Ok(hasher.finish())
-}
-
#[allow(dead_code)] // TODO: b/316357686 - Use fingerprint in codegen to
// protect hardcoded offset reads.
-fn compute_fingerprint_from_parsed_flags(flags: ProtoParsedFlags) -> Result<u64> {
- let separated_flags: Vec<ProtoParsedFlag> = flags.parsed_flag.into_iter().collect::<Vec<_>>();
+pub fn compute_flag_offsets_fingerprint(flags_map: &HashMap<String, u16>) -> Result<u64> {
+ let mut hasher = SipHasher13::new();
- // All flags must belong to the same package as the fingerprint is per-package.
- let Some(_package) = find_unique_package(&separated_flags) else {
- bail!("No parsed flags, or the parsed flags use different packages.");
- };
+ // Need to sort to ensure the data is added to the hasher in the same order
+ // each run.
+ let sorted_map: BTreeMap<&String, &u16> = flags_map.iter().collect();
- let mut flag_names =
- separated_flags.into_iter().map(|flag| flag.name.unwrap()).collect::<Vec<_>>();
- compute_flags_fingerprint(&mut flag_names)
+ for (flag, offset) in sorted_map {
+ // See https://docs.rs/siphasher/latest/siphasher/#note for use of write
+ // over write_i16. Similarly, use to_be_bytes rather than to_ne_bytes to
+ // ensure consistency.
+ hasher.write(flag.as_bytes());
+ hasher.write(&offset.to_be_bytes());
+ }
+ Ok(hasher.finish())
}
#[cfg(test)]
@@ -456,47 +449,16 @@
#[test]
fn test_offset_fingerprint() {
let parsed_flags = crate::test::parse_test_flags();
- let expected_fingerprint: u64 = 5801144784618221668;
+ let package = find_unique_package(&parsed_flags.parsed_flag).unwrap().to_string();
+ let flag_ids = assign_flag_ids(&package, parsed_flags.parsed_flag.iter()).unwrap();
+ let expected_fingerprint = 10709892481002252132u64;
- let hash_result = compute_fingerprint_from_parsed_flags(parsed_flags);
+ let hash_result = compute_flag_offsets_fingerprint(&flag_ids);
assert_eq!(hash_result.unwrap(), expected_fingerprint);
}
#[test]
- fn test_offset_fingerprint_matches_from_package() {
- let parsed_flags: ProtoParsedFlags = crate::test::parse_test_flags();
-
- // All test flags are in the same package, so fingerprint from all of them.
- let result_from_parsed_flags = compute_fingerprint_from_parsed_flags(parsed_flags.clone());
-
- let mut flag_names_vec = parsed_flags
- .parsed_flag
- .clone()
- .into_iter()
- .map(|flag| flag.name.unwrap())
- .map(String::from)
- .collect::<Vec<_>>();
- let result_from_names = compute_flags_fingerprint(&mut flag_names_vec);
-
- // Assert the same hash is generated for each case.
- assert_eq!(result_from_parsed_flags.unwrap(), result_from_names.unwrap());
- }
-
- #[test]
- fn test_offset_fingerprint_different_packages_does_not_match() {
- // Parse flags from two packages.
- let parsed_flags: ProtoParsedFlags = crate::test::parse_test_flags();
- let second_parsed_flags = crate::test::parse_second_package_flags();
-
- let result_from_parsed_flags = compute_fingerprint_from_parsed_flags(parsed_flags).unwrap();
- let second_result = compute_fingerprint_from_parsed_flags(second_parsed_flags).unwrap();
-
- // Different flags should have a different fingerprint.
- assert_ne!(result_from_parsed_flags, second_result);
- }
-
- #[test]
fn test_parse_flags() {
let parsed_flags = crate::test::parse_test_flags(); // calls parse_flags
aconfig_protos::parsed_flags::verify_fields(&parsed_flags).unwrap();
diff --git a/tools/aconfig/aconfig/src/storage/flag_info.rs b/tools/aconfig/aconfig/src/storage/flag_info.rs
new file mode 100644
index 0000000..04e2b93
--- /dev/null
+++ b/tools/aconfig/aconfig/src/storage/flag_info.rs
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use crate::commands::assign_flag_ids;
+use crate::storage::FlagPackage;
+use aconfig_protos::ProtoFlagPermission;
+use aconfig_storage_file::{
+ FlagInfoHeader, FlagInfoList, FlagInfoNode, StorageFileType, FILE_VERSION,
+};
+use anyhow::{anyhow, Result};
+
+fn new_header(container: &str, num_flags: u32) -> FlagInfoHeader {
+ FlagInfoHeader {
+ version: FILE_VERSION,
+ container: String::from(container),
+ file_type: StorageFileType::FlagInfo as u8,
+ file_size: 0,
+ num_flags,
+ boolean_flag_offset: 0,
+ }
+}
+
+pub fn create_flag_info(container: &str, packages: &[FlagPackage]) -> Result<FlagInfoList> {
+ // create list
+ let num_flags = packages.iter().map(|pkg| pkg.boolean_flags.len() as u32).sum();
+
+ let mut is_flag_rw = vec![false; num_flags as usize];
+ for pkg in packages.iter() {
+ let start_index = pkg.boolean_start_index as usize;
+ let flag_ids = assign_flag_ids(pkg.package_name, pkg.boolean_flags.iter().copied())?;
+ for pf in pkg.boolean_flags.iter() {
+ let fid = flag_ids
+ .get(pf.name())
+ .ok_or(anyhow!(format!("missing flag id for {}", pf.name())))?;
+ is_flag_rw[start_index + (*fid as usize)] =
+ pf.permission() == ProtoFlagPermission::READ_WRITE;
+ }
+ }
+
+ let mut list = FlagInfoList {
+ header: new_header(container, num_flags),
+ nodes: is_flag_rw.iter().map(|&rw| FlagInfoNode::create(rw)).collect(),
+ };
+
+ // initialize all header fields
+ list.header.boolean_flag_offset = list.header.into_bytes().len() as u32;
+ let bytes_per_node = FlagInfoNode::create(false).into_bytes().len() as u32;
+ list.header.file_size = list.header.boolean_flag_offset + num_flags * bytes_per_node;
+
+ Ok(list)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::storage::{group_flags_by_package, tests::parse_all_test_flags};
+
+ pub fn create_test_flag_info_list_from_source() -> Result<FlagInfoList> {
+ let caches = parse_all_test_flags();
+ let packages = group_flags_by_package(caches.iter());
+ create_flag_info("mockup", &packages)
+ }
+
+ #[test]
+ // this test point locks down the flag info creation and each field
+ fn test_list_contents() {
+ let flag_info_list = create_test_flag_info_list_from_source();
+ assert!(flag_info_list.is_ok());
+ let expected_flag_info_list =
+ aconfig_storage_file::test_utils::create_test_flag_info_list();
+ assert_eq!(flag_info_list.unwrap(), expected_flag_info_list);
+ }
+}
diff --git a/tools/aconfig/aconfig/src/storage/mod.rs b/tools/aconfig/aconfig/src/storage/mod.rs
index efce24e..1d8dcfc 100644
--- a/tools/aconfig/aconfig/src/storage/mod.rs
+++ b/tools/aconfig/aconfig/src/storage/mod.rs
@@ -14,25 +14,24 @@
* limitations under the License.
*/
+pub mod flag_info;
pub mod flag_table;
pub mod flag_value;
pub mod package_table;
-use anyhow::{anyhow, Result};
+use anyhow::Result;
use std::collections::{HashMap, HashSet};
use crate::storage::{
- flag_table::create_flag_table, flag_value::create_flag_value,
+ flag_info::create_flag_info, flag_table::create_flag_table, flag_value::create_flag_value,
package_table::create_package_table,
};
-use aconfig_protos::ProtoParsedFlag;
-use aconfig_protos::ProtoParsedFlags;
+use aconfig_protos::{ProtoParsedFlag, ProtoParsedFlags};
use aconfig_storage_file::StorageFileType;
pub struct FlagPackage<'a> {
pub package_name: &'a str,
pub package_id: u32,
- pub fingerprint: u64,
pub flag_names: HashSet<&'a str>,
pub boolean_flags: Vec<&'a ProtoParsedFlag>,
// The index of the first boolean flag in this aconfig package among all boolean
@@ -45,7 +44,6 @@
FlagPackage {
package_name,
package_id,
- fingerprint: 0,
flag_names: HashSet::new(),
boolean_flags: vec![],
boolean_start_index: 0,
@@ -81,8 +79,6 @@
for p in packages.iter_mut() {
p.boolean_start_index = boolean_start_index;
boolean_start_index += p.boolean_flags.len() as u32;
-
- // TODO: b/316357686 - Calculate fingerprint and add to package.
}
packages
@@ -111,7 +107,10 @@
let flag_value = create_flag_value(container, &packages)?;
Ok(flag_value.into_bytes())
}
- _ => Err(anyhow!("aconfig does not support the creation of this storage file type")),
+ StorageFileType::FlagInfo => {
+ let flag_info = create_flag_info(container, &packages)?;
+ Ok(flag_info.into_bytes())
+ }
}
}
@@ -120,8 +119,6 @@
use super::*;
use crate::Input;
- use aconfig_protos::ProtoParsedFlags;
-
pub fn parse_all_test_flags() -> Vec<ProtoParsedFlags> {
let aconfig_files = [
(
diff --git a/tools/aconfig/aconfig/src/storage/package_table.rs b/tools/aconfig/aconfig/src/storage/package_table.rs
index 33bb077..c53602f 100644
--- a/tools/aconfig/aconfig/src/storage/package_table.rs
+++ b/tools/aconfig/aconfig/src/storage/package_table.rs
@@ -48,7 +48,6 @@
let node = PackageTableNode {
package_name: String::from(package.package_name),
package_id: package.package_id,
- fingerprint: package.fingerprint,
boolean_start_index: package.boolean_start_index,
next_offset: None,
};
diff --git a/tools/aconfig/aconfig/src/test.rs b/tools/aconfig/aconfig/src/test.rs
index a19b372..7409cda 100644
--- a/tools/aconfig/aconfig/src/test.rs
+++ b/tools/aconfig/aconfig/src/test.rs
@@ -295,24 +295,6 @@
aconfig_protos::parsed_flags::try_from_binary_proto(&bytes).unwrap()
}
- pub fn parse_second_package_flags() -> ProtoParsedFlags {
- let bytes = crate::commands::parse_flags(
- "com.android.aconfig.second_test",
- Some("system"),
- vec![Input {
- source: "tests/test_second_package.aconfig".to_string(),
- reader: Box::new(include_bytes!("../tests/test_second_package.aconfig").as_slice()),
- }],
- vec![Input {
- source: "tests/third.values".to_string(),
- reader: Box::new(include_bytes!("../tests/third.values").as_slice()),
- }],
- crate::commands::DEFAULT_FLAG_PERMISSION,
- )
- .unwrap();
- aconfig_protos::parsed_flags::try_from_binary_proto(&bytes).unwrap()
- }
-
pub fn first_significant_code_diff(a: &str, b: &str) -> Option<String> {
let a = a.lines().map(|line| line.trim_start()).filter(|line| !line.is_empty());
let b = b.lines().map(|line| line.trim_start()).filter(|line| !line.is_empty());
diff --git a/tools/aconfig/aconfig/templates/cpp_source_file.template b/tools/aconfig/aconfig/templates/cpp_source_file.template
index 623034a..852b905 100644
--- a/tools/aconfig/aconfig/templates/cpp_source_file.template
+++ b/tools/aconfig/aconfig/templates/cpp_source_file.template
@@ -2,7 +2,7 @@
{{ if allow_instrumentation }}
{{ if readwrite- }}
-#include <sys/stat.h>
+#include <unistd.h>
#include "aconfig_storage/aconfig_storage_read_api.hpp"
#include <android/log.h>
#define LOG_TAG "aconfig_cpp_codegen"
@@ -78,8 +78,7 @@
, flag_value_file_(nullptr)
, read_from_new_storage_(false) \{
- struct stat buffer;
- if (stat("/metadata/aconfig/boot/enable_only_new_storage", &buffer) == 0) \{
+ if (access("/metadata/aconfig/boot/enable_only_new_storage", F_OK) == 0) \{
read_from_new_storage_ = true;
}
diff --git a/tools/aconfig/aconfig/tests/test.aconfig b/tools/aconfig/aconfig/tests/test.aconfig
index a818b23..c11508a 100644
--- a/tools/aconfig/aconfig/tests/test.aconfig
+++ b/tools/aconfig/aconfig/tests/test.aconfig
@@ -86,4 +86,4 @@
bug: "111"
is_fixed_read_only: true
is_exported: true
-}
+}
\ No newline at end of file
diff --git a/tools/aconfig/aconfig/tests/test_second_package.aconfig b/tools/aconfig/aconfig/tests/test_second_package.aconfig
deleted file mode 100644
index a8740b8..0000000
--- a/tools/aconfig/aconfig/tests/test_second_package.aconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-package: "com.android.aconfig.second_test"
-container: "system"
-
-flag {
- name: "testing_flag"
- namespace: "another_namespace"
- description: "This is a flag for testing."
- bug: "123"
- metadata {
- purpose: PURPOSE_UNSPECIFIED
- }
-}
diff --git a/tools/aconfig/aconfig/tests/third.values b/tools/aconfig/aconfig/tests/third.values
deleted file mode 100644
index 675832a..0000000
--- a/tools/aconfig/aconfig/tests/third.values
+++ /dev/null
@@ -1,6 +0,0 @@
-flag_value {
- package: "com.android.aconfig.second_test"
- name: "testing_flag"
- state: DISABLED
- permission: READ_WRITE
-}
diff --git a/tools/aconfig/aconfig_device_paths/Android.bp b/tools/aconfig/aconfig_device_paths/Android.bp
index 932dfbf..dda7a55 100644
--- a/tools/aconfig/aconfig_device_paths/Android.bp
+++ b/tools/aconfig/aconfig_device_paths/Android.bp
@@ -56,3 +56,16 @@
"//apex_available:platform",
],
}
+
+genrule {
+ name: "libaconfig_java_host_device_paths_src",
+ srcs: ["src/HostDeviceProtosTemplate.java"],
+ out: ["HostDeviceProtos.java"],
+ tool_files: ["partition_aconfig_flags_paths.txt"],
+ cmd: "sed -e '/TEMPLATE/{r$(location partition_aconfig_flags_paths.txt)' -e 'd}' $(in) > $(out)",
+}
+
+java_library_host {
+ name: "aconfig_host_device_paths_java",
+ srcs: [":libaconfig_java_host_device_paths_src"],
+}
diff --git a/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt b/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt
index 140cd21..e997e3d 100644
--- a/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt
+++ b/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt
@@ -1,4 +1,3 @@
"/system/etc/aconfig_flags.pb",
-"/system_ext/etc/aconfig_flags.pb",
"/product/etc/aconfig_flags.pb",
"/vendor/etc/aconfig_flags.pb",
diff --git a/tools/aconfig/aconfig_device_paths/src/HostDeviceProtosTemplate.java b/tools/aconfig/aconfig_device_paths/src/HostDeviceProtosTemplate.java
new file mode 100644
index 0000000..844232b
--- /dev/null
+++ b/tools/aconfig/aconfig_device_paths/src/HostDeviceProtosTemplate.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.aconfig;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * A host lib that can read all aconfig proto file paths on a given device.
+ */
+public class HostDeviceProtos {
+ /**
+ * An interface that executes ADB command and return the result.
+ */
+ public static interface AdbCommandExecutor {
+ /** Executes the ADB command. */
+ String executeAdbCommand(String command);
+ }
+
+ static final String[] PATHS = {
+ TEMPLATE
+ };
+
+ private static final String APEX_DIR = "/apex";
+ private static final String RECURSIVELY_LIST_APEX_DIR_COMMAND = "shell find /apex | grep aconfig_flags";
+ private static final String APEX_ACONFIG_PATH_SUFFIX = "/etc/aconfig_flags.pb";
+
+
+ /**
+ * Returns the list of all on-device aconfig proto paths from host side.
+ */
+ public static List<String> parsedFlagsProtoPaths(AdbCommandExecutor adbCommandExecutor) {
+ ArrayList<String> paths = new ArrayList(Arrays.asList(PATHS));
+
+ String adbCommandOutput = adbCommandExecutor.executeAdbCommand(
+ RECURSIVELY_LIST_APEX_DIR_COMMAND);
+
+ if (adbCommandOutput == null) {
+ return paths;
+ }
+
+ Set<String> allFiles = new HashSet<>(Arrays.asList(adbCommandOutput.split("\n")));
+
+ Set<String> subdirs = allFiles.stream().map(file -> {
+ String[] filePaths = file.split("/");
+ // The first element is "", the second element is "apex".
+ return filePaths.length > 2 ? filePaths[2] : "";
+ }).collect(Collectors.toSet());
+
+ for (String prefix : subdirs) {
+ // For each mainline modules, there are two directories, one <modulepackage>/,
+ // and one <modulepackage>@<versioncode>/. Just read the former.
+ if (prefix.contains("@")) {
+ continue;
+ }
+
+ String protoPath = APEX_DIR + "/" + prefix + APEX_ACONFIG_PATH_SUFFIX;
+ if (allFiles.contains(protoPath)) {
+ paths.add(protoPath);
+ }
+ }
+ return paths;
+ }
+}
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_info.rs b/tools/aconfig/aconfig_storage_file/src/flag_info.rs
index a49756d..f090396 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_info.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_info.rs
@@ -227,7 +227,7 @@
let bytes = &flag_info_list.into_bytes();
let mut head = 0;
let version = read_u32_from_bytes(bytes, &mut head).unwrap();
- assert_eq!(version, 2);
+ assert_eq!(version, 1);
}
#[test]
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_table.rs b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
index be82c63..0588fe5 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
@@ -253,7 +253,7 @@
let bytes = &flag_table.into_bytes();
let mut head = 0;
let version = read_u32_from_bytes(bytes, &mut head).unwrap();
- assert_eq!(version, 2);
+ assert_eq!(version, 1);
}
#[test]
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_value.rs b/tools/aconfig/aconfig_storage_file/src/flag_value.rs
index c4cf294..b64c10e 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_value.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_value.rs
@@ -159,7 +159,7 @@
let bytes = &flag_value_list.into_bytes();
let mut head = 0;
let version = read_u32_from_bytes(bytes, &mut head).unwrap();
- assert_eq!(version, 2);
+ assert_eq!(version, 1);
}
#[test]
diff --git a/tools/aconfig/aconfig_storage_file/src/lib.rs b/tools/aconfig/aconfig_storage_file/src/lib.rs
index 19d0e51..cf52bc0 100644
--- a/tools/aconfig/aconfig_storage_file/src/lib.rs
+++ b/tools/aconfig/aconfig_storage_file/src/lib.rs
@@ -58,7 +58,7 @@
};
/// Storage file version
-pub const FILE_VERSION: u32 = 2;
+pub const FILE_VERSION: u32 = 1;
/// Good hash table prime number
pub(crate) const HASH_PRIMES: [u32; 29] = [
@@ -254,16 +254,6 @@
Ok(val)
}
-// Read and parse bytes as u64
-pub fn read_u64_from_bytes(buf: &[u8], head: &mut usize) -> Result<u64, AconfigStorageError> {
- let val =
- u64::from_le_bytes(buf[*head..*head + 8].try_into().map_err(|errmsg| {
- BytesParseFail(anyhow!("fail to parse u64 from bytes: {}", errmsg))
- })?);
- *head += 8;
- Ok(val)
-}
-
/// Read and parse bytes as string
pub(crate) fn read_str_from_bytes(
buf: &[u8],
diff --git a/tools/aconfig/aconfig_storage_file/src/package_table.rs b/tools/aconfig/aconfig_storage_file/src/package_table.rs
index 350f072..a5bd9e6 100644
--- a/tools/aconfig/aconfig_storage_file/src/package_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/package_table.rs
@@ -17,10 +17,7 @@
//! package table module defines the package table file format and methods for serialization
//! and deserialization
-use crate::{
- get_bucket_index, read_str_from_bytes, read_u32_from_bytes, read_u64_from_bytes,
- read_u8_from_bytes,
-};
+use crate::{get_bucket_index, read_str_from_bytes, read_u32_from_bytes, read_u8_from_bytes};
use crate::{AconfigStorageError, StorageFileType};
use anyhow::anyhow;
use serde::{Deserialize, Serialize};
@@ -100,7 +97,6 @@
pub struct PackageTableNode {
pub package_name: String,
pub package_id: u32,
- pub fingerprint: u64,
// The index of the first boolean flag in this aconfig package among all boolean
// flags in this container.
pub boolean_start_index: u32,
@@ -112,12 +108,8 @@
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
- "Package: {}, Id: {}, Fingerprint: {}, Boolean flag start index: {}, Next: {:?}",
- self.package_name,
- self.package_id,
- self.fingerprint,
- self.boolean_start_index,
- self.next_offset
+ "Package: {}, Id: {}, Boolean flag start index: {}, Next: {:?}",
+ self.package_name, self.package_id, self.boolean_start_index, self.next_offset
)?;
Ok(())
}
@@ -131,7 +123,6 @@
result.extend_from_slice(&(name_bytes.len() as u32).to_le_bytes());
result.extend_from_slice(name_bytes);
result.extend_from_slice(&self.package_id.to_le_bytes());
- result.extend_from_slice(&self.fingerprint.to_le_bytes());
result.extend_from_slice(&self.boolean_start_index.to_le_bytes());
result.extend_from_slice(&self.next_offset.unwrap_or(0).to_le_bytes());
result
@@ -143,7 +134,6 @@
let node = Self {
package_name: read_str_from_bytes(bytes, &mut head)?,
package_id: read_u32_from_bytes(bytes, &mut head)?,
- fingerprint: read_u64_from_bytes(bytes, &mut head)?,
boolean_start_index: read_u32_from_bytes(bytes, &mut head)?,
next_offset: match read_u32_from_bytes(bytes, &mut head)? {
0 => None,
@@ -261,7 +251,7 @@
let bytes = &package_table.into_bytes();
let mut head = 0;
let version = read_u32_from_bytes(bytes, &mut head).unwrap();
- assert_eq!(version, 2);
+ assert_eq!(version, 1);
}
#[test]
diff --git a/tools/aconfig/aconfig_storage_file/src/test_utils.rs b/tools/aconfig/aconfig_storage_file/src/test_utils.rs
index 11e2dc6..106666c 100644
--- a/tools/aconfig/aconfig_storage_file/src/test_utils.rs
+++ b/tools/aconfig/aconfig_storage_file/src/test_utils.rs
@@ -26,33 +26,30 @@
pub fn create_test_package_table() -> PackageTable {
let header = PackageTableHeader {
- version: 2,
+ version: 1,
container: String::from("mockup"),
file_type: StorageFileType::PackageMap as u8,
- file_size: 233,
+ file_size: 209,
num_packages: 3,
bucket_offset: 31,
node_offset: 59,
};
- let buckets: Vec<Option<u32>> = vec![Some(59), None, None, Some(117), None, None, None];
+ let buckets: Vec<Option<u32>> = vec![Some(59), None, None, Some(109), None, None, None];
let first_node = PackageTableNode {
package_name: String::from("com.android.aconfig.storage.test_2"),
package_id: 1,
- fingerprint: 0,
boolean_start_index: 3,
next_offset: None,
};
let second_node = PackageTableNode {
package_name: String::from("com.android.aconfig.storage.test_1"),
package_id: 0,
- fingerprint: 0,
boolean_start_index: 0,
- next_offset: Some(175),
+ next_offset: Some(159),
};
let third_node = PackageTableNode {
package_name: String::from("com.android.aconfig.storage.test_4"),
package_id: 2,
- fingerprint: 0,
boolean_start_index: 6,
next_offset: None,
};
@@ -81,7 +78,7 @@
pub fn create_test_flag_table() -> FlagTable {
let header = FlagTableHeader {
- version: 2,
+ version: 1,
container: String::from("mockup"),
file_type: StorageFileType::FlagMap as u8,
file_size: 321,
@@ -123,7 +120,7 @@
pub fn create_test_flag_value_list() -> FlagValueList {
let header = FlagValueHeader {
- version: 2,
+ version: 1,
container: String::from("mockup"),
file_type: StorageFileType::FlagVal as u8,
file_size: 35,
@@ -136,7 +133,7 @@
pub fn create_test_flag_info_list() -> FlagInfoList {
let header = FlagInfoHeader {
- version: 2,
+ version: 1,
container: String::from("mockup"),
file_type: StorageFileType::FlagInfo as u8,
file_size: 35,
diff --git a/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/ByteBufferReader.java b/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/ByteBufferReader.java
index 44a82ee..4bea083 100644
--- a/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/ByteBufferReader.java
+++ b/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/ByteBufferReader.java
@@ -37,10 +37,6 @@
return Short.toUnsignedInt(mByteBuffer.getShort());
}
- public long readLong() {
- return mByteBuffer.getLong();
- }
-
public int readInt() {
return this.mByteBuffer.getInt();
}
diff --git a/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/FlagTable.java b/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/FlagTable.java
index 9838a7c..757844a 100644
--- a/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/FlagTable.java
+++ b/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/FlagTable.java
@@ -37,9 +37,16 @@
public Node get(int packageId, String flagName) {
int numBuckets = (mHeader.mNodeOffset - mHeader.mBucketOffset) / 4;
int bucketIndex = TableUtils.getBucketIndex(makeKey(packageId, flagName), numBuckets);
+ int newPosition = mHeader.mBucketOffset + bucketIndex * 4;
+ if (newPosition >= mHeader.mNodeOffset) {
+ return null;
+ }
- mReader.position(mHeader.mBucketOffset + bucketIndex * 4);
+ mReader.position(newPosition);
int nodeIndex = mReader.readInt();
+ if (nodeIndex < mHeader.mNodeOffset || nodeIndex >= mHeader.mFileSize) {
+ return null;
+ }
while (nodeIndex != -1) {
mReader.position(nodeIndex);
@@ -50,7 +57,7 @@
nodeIndex = node.mNextOffset;
}
- throw new AconfigStorageException("get cannot find flag: " + flagName);
+ return null;
}
public Header getHeader() {
diff --git a/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/PackageTable.java b/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/PackageTable.java
index f1288f5..dc1c583 100644
--- a/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/PackageTable.java
+++ b/tools/aconfig/aconfig_storage_file/srcs/android/aconfig/storage/PackageTable.java
@@ -35,13 +35,19 @@
}
public Node get(String packageName) {
-
int numBuckets = (mHeader.mNodeOffset - mHeader.mBucketOffset) / 4;
int bucketIndex = TableUtils.getBucketIndex(packageName.getBytes(UTF_8), numBuckets);
-
- mReader.position(mHeader.mBucketOffset + bucketIndex * 4);
+ int newPosition = mHeader.mBucketOffset + bucketIndex * 4;
+ if (newPosition >= mHeader.mNodeOffset) {
+ return null;
+ }
+ mReader.position(newPosition);
int nodeIndex = mReader.readInt();
+ if (nodeIndex < mHeader.mNodeOffset || nodeIndex >= mHeader.mFileSize) {
+ return null;
+ }
+
while (nodeIndex != -1) {
mReader.position(nodeIndex);
Node node = Node.fromBytes(mReader);
@@ -51,7 +57,7 @@
nodeIndex = node.mNextOffset;
}
- throw new AconfigStorageException("get cannot find package: " + packageName);
+ return null;
}
public Header getHeader() {
@@ -118,7 +124,6 @@
private String mPackageName;
private int mPackageId;
- private long mFingerprint;
private int mBooleanStartIndex;
private int mNextOffset;
@@ -126,7 +131,6 @@
Node node = new Node();
node.mPackageName = reader.readString();
node.mPackageId = reader.readInt();
- node.mFingerprint = reader.readLong();
node.mBooleanStartIndex = reader.readInt();
node.mNextOffset = reader.readInt();
node.mNextOffset = node.mNextOffset == 0 ? -1 : node.mNextOffset;
@@ -152,7 +156,6 @@
return Objects.equals(mPackageName, other.mPackageName)
&& mPackageId == other.mPackageId
&& mBooleanStartIndex == other.mBooleanStartIndex
- && mFingerprint == other.mFingerprint
&& mNextOffset == other.mNextOffset;
}
@@ -168,10 +171,6 @@
return mBooleanStartIndex;
}
- public long getFingerprint() {
- return mFingerprint;
- }
-
public int getNextOffset() {
return mNextOffset;
}
diff --git a/tools/aconfig/aconfig_storage_file/tests/flag.info b/tools/aconfig/aconfig_storage_file/tests/flag.info
index 9db7fde..6223edf 100644
--- a/tools/aconfig/aconfig_storage_file/tests/flag.info
+++ b/tools/aconfig/aconfig_storage_file/tests/flag.info
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/flag.map b/tools/aconfig/aconfig_storage_file/tests/flag.map
index cf4685c..e868f53 100644
--- a/tools/aconfig/aconfig_storage_file/tests/flag.map
+++ b/tools/aconfig/aconfig_storage_file/tests/flag.map
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/flag.val b/tools/aconfig/aconfig_storage_file/tests/flag.val
index 37d4750..ed203d4 100644
--- a/tools/aconfig/aconfig_storage_file/tests/flag.val
+++ b/tools/aconfig/aconfig_storage_file/tests/flag.val
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/package.map b/tools/aconfig/aconfig_storage_file/tests/package.map
index 358010c..6c46a03 100644
--- a/tools/aconfig/aconfig_storage_file/tests/package.map
+++ b/tools/aconfig/aconfig_storage_file/tests/package.map
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/srcs/FlagTableTest.java b/tools/aconfig/aconfig_storage_file/tests/srcs/FlagTableTest.java
index e3b02cd..fd40d4c 100644
--- a/tools/aconfig/aconfig_storage_file/tests/srcs/FlagTableTest.java
+++ b/tools/aconfig/aconfig_storage_file/tests/srcs/FlagTableTest.java
@@ -33,7 +33,7 @@
public void testFlagTable_rightHeader() throws Exception {
FlagTable flagTable = FlagTable.fromBytes(TestDataUtils.getTestFlagMapByteBuffer());
FlagTable.Header header = flagTable.getHeader();
- assertEquals(2, header.getVersion());
+ assertEquals(1, header.getVersion());
assertEquals("mockup", header.getContainer());
assertEquals(FileType.FLAG_MAP, header.getFileType());
assertEquals(321, header.getFileSize());
diff --git a/tools/aconfig/aconfig_storage_file/tests/srcs/FlagValueListTest.java b/tools/aconfig/aconfig_storage_file/tests/srcs/FlagValueListTest.java
index ebc231c..1b0de63 100644
--- a/tools/aconfig/aconfig_storage_file/tests/srcs/FlagValueListTest.java
+++ b/tools/aconfig/aconfig_storage_file/tests/srcs/FlagValueListTest.java
@@ -36,7 +36,7 @@
FlagValueList flagValueList =
FlagValueList.fromBytes(TestDataUtils.getTestFlagValByteBuffer());
FlagValueList.Header header = flagValueList.getHeader();
- assertEquals(2, header.getVersion());
+ assertEquals(1, header.getVersion());
assertEquals("mockup", header.getContainer());
assertEquals(FileType.FLAG_VAL, header.getFileType());
assertEquals(35, header.getFileSize());
diff --git a/tools/aconfig/aconfig_storage_file/tests/srcs/PackageTableTest.java b/tools/aconfig/aconfig_storage_file/tests/srcs/PackageTableTest.java
index 6d56cee..e7e19d8 100644
--- a/tools/aconfig/aconfig_storage_file/tests/srcs/PackageTableTest.java
+++ b/tools/aconfig/aconfig_storage_file/tests/srcs/PackageTableTest.java
@@ -20,6 +20,7 @@
import android.aconfig.storage.FileType;
import android.aconfig.storage.PackageTable;
+
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@@ -27,40 +28,42 @@
@RunWith(JUnit4.class)
public class PackageTableTest {
- @Test
- public void testPackageTable_rightHeader() throws Exception {
- PackageTable packageTable = PackageTable.fromBytes(TestDataUtils.getTestPackageMapByteBuffer());
- PackageTable.Header header = packageTable.getHeader();
- assertEquals(2, header.getVersion());
- assertEquals("mockup", header.getContainer());
- assertEquals(FileType.PACKAGE_MAP, header.getFileType());
- assertEquals(209, header.getFileSize());
- assertEquals(3, header.getNumPackages());
- assertEquals(31, header.getBucketOffset());
- assertEquals(59, header.getNodeOffset());
- }
+ @Test
+ public void testPackageTable_rightHeader() throws Exception {
+ PackageTable packageTable =
+ PackageTable.fromBytes(TestDataUtils.getTestPackageMapByteBuffer());
+ PackageTable.Header header = packageTable.getHeader();
+ assertEquals(1, header.getVersion());
+ assertEquals("mockup", header.getContainer());
+ assertEquals(FileType.PACKAGE_MAP, header.getFileType());
+ assertEquals(209, header.getFileSize());
+ assertEquals(3, header.getNumPackages());
+ assertEquals(31, header.getBucketOffset());
+ assertEquals(59, header.getNodeOffset());
+ }
- @Test
- public void testPackageTable_rightNode() throws Exception {
- PackageTable packageTable = PackageTable.fromBytes(TestDataUtils.getTestPackageMapByteBuffer());
+ @Test
+ public void testPackageTable_rightNode() throws Exception {
+ PackageTable packageTable =
+ PackageTable.fromBytes(TestDataUtils.getTestPackageMapByteBuffer());
- PackageTable.Node node1 = packageTable.get("com.android.aconfig.storage.test_1");
- PackageTable.Node node2 = packageTable.get("com.android.aconfig.storage.test_2");
- PackageTable.Node node4 = packageTable.get("com.android.aconfig.storage.test_4");
+ PackageTable.Node node1 = packageTable.get("com.android.aconfig.storage.test_1");
+ PackageTable.Node node2 = packageTable.get("com.android.aconfig.storage.test_2");
+ PackageTable.Node node4 = packageTable.get("com.android.aconfig.storage.test_4");
- assertEquals("com.android.aconfig.storage.test_1", node1.getPackageName());
- assertEquals("com.android.aconfig.storage.test_2", node2.getPackageName());
- assertEquals("com.android.aconfig.storage.test_4", node4.getPackageName());
+ assertEquals("com.android.aconfig.storage.test_1", node1.getPackageName());
+ assertEquals("com.android.aconfig.storage.test_2", node2.getPackageName());
+ assertEquals("com.android.aconfig.storage.test_4", node4.getPackageName());
- assertEquals(0, node1.getPackageId());
- assertEquals(1, node2.getPackageId());
- assertEquals(2, node4.getPackageId());
+ assertEquals(0, node1.getPackageId());
+ assertEquals(1, node2.getPackageId());
+ assertEquals(2, node4.getPackageId());
- assertEquals(0, node1.getBooleanStartIndex());
- assertEquals(3, node2.getBooleanStartIndex());
- assertEquals(6, node4.getBooleanStartIndex());
+ assertEquals(0, node1.getBooleanStartIndex());
+ assertEquals(3, node2.getBooleanStartIndex());
+ assertEquals(6, node4.getBooleanStartIndex());
- assertEquals(175, node1.getNextOffset());
+ assertEquals(159, node1.getNextOffset());
assertEquals(-1, node2.getNextOffset());
assertEquals(-1, node4.getNextOffset());
}
diff --git a/tools/aconfig/aconfig_storage_read_api/src/lib.rs b/tools/aconfig/aconfig_storage_read_api/src/lib.rs
index 59aa749..d76cf3f 100644
--- a/tools/aconfig/aconfig_storage_read_api/src/lib.rs
+++ b/tools/aconfig/aconfig_storage_read_api/src/lib.rs
@@ -507,9 +507,9 @@
#[test]
// this test point locks down flag storage file version number query api
fn test_storage_version_query() {
- assert_eq!(get_storage_file_version("./tests/package.map").unwrap(), 2);
- assert_eq!(get_storage_file_version("./tests/flag.map").unwrap(), 2);
- assert_eq!(get_storage_file_version("./tests/flag.val").unwrap(), 2);
- assert_eq!(get_storage_file_version("./tests/flag.info").unwrap(), 2);
+ assert_eq!(get_storage_file_version("./tests/package.map").unwrap(), 1);
+ assert_eq!(get_storage_file_version("./tests/flag.map").unwrap(), 1);
+ assert_eq!(get_storage_file_version("./tests/flag.val").unwrap(), 1);
+ assert_eq!(get_storage_file_version("./tests/flag.info").unwrap(), 1);
}
}
diff --git a/tools/aconfig/aconfig_storage_read_api/srcs/android/aconfig/storage/StorageInternalReader.java b/tools/aconfig/aconfig_storage_read_api/srcs/android/aconfig/storage/StorageInternalReader.java
index 29ebee5..6fbcdb3 100644
--- a/tools/aconfig/aconfig_storage_read_api/srcs/android/aconfig/storage/StorageInternalReader.java
+++ b/tools/aconfig/aconfig_storage_read_api/srcs/android/aconfig/storage/StorageInternalReader.java
@@ -53,9 +53,6 @@
@UnsupportedAppUsage
public boolean getBooleanFlagValue(int index) {
index += mPackageBooleanStartOffset;
- if (index >= mFlagValueList.size()) {
- throw new AconfigStorageException("Fail to get boolean flag value");
- }
return mFlagValueList.getBoolean(index);
}
diff --git a/tools/aconfig/aconfig_storage_read_api/tests/flag.info b/tools/aconfig/aconfig_storage_read_api/tests/flag.info
index 9db7fde..6223edf 100644
--- a/tools/aconfig/aconfig_storage_read_api/tests/flag.info
+++ b/tools/aconfig/aconfig_storage_read_api/tests/flag.info
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_read_api/tests/flag.map b/tools/aconfig/aconfig_storage_read_api/tests/flag.map
index cf4685c..e868f53 100644
--- a/tools/aconfig/aconfig_storage_read_api/tests/flag.map
+++ b/tools/aconfig/aconfig_storage_read_api/tests/flag.map
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_read_api/tests/flag.val b/tools/aconfig/aconfig_storage_read_api/tests/flag.val
index 37d4750..ed203d4 100644
--- a/tools/aconfig/aconfig_storage_read_api/tests/flag.val
+++ b/tools/aconfig/aconfig_storage_read_api/tests/flag.val
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_read_api/tests/package.map b/tools/aconfig/aconfig_storage_read_api/tests/package.map
index 358010c..6c46a03 100644
--- a/tools/aconfig/aconfig_storage_read_api/tests/package.map
+++ b/tools/aconfig/aconfig_storage_read_api/tests/package.map
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.cpp b/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.cpp
index 58460d1..6d29045 100644
--- a/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.cpp
+++ b/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.cpp
@@ -80,16 +80,16 @@
TEST_F(AconfigStorageTest, test_storage_version_query) {
auto version = api::get_storage_file_version(package_map);
ASSERT_TRUE(version.ok());
- ASSERT_EQ(*version, 2);
+ ASSERT_EQ(*version, 1);
version = api::get_storage_file_version(flag_map);
ASSERT_TRUE(version.ok());
- ASSERT_EQ(*version, 2);
+ ASSERT_EQ(*version, 1);
version = api::get_storage_file_version(flag_val);
ASSERT_TRUE(version.ok());
- ASSERT_EQ(*version, 2);
+ ASSERT_EQ(*version, 1);
version = api::get_storage_file_version(flag_info);
ASSERT_TRUE(version.ok());
- ASSERT_EQ(*version, 2);
+ ASSERT_EQ(*version, 1);
}
/// Negative test to lock down the error when mapping none exist storage files
diff --git a/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.rs b/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.rs
index bd1b584..afc44d4 100644
--- a/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.rs
+++ b/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.rs
@@ -200,9 +200,9 @@
#[test]
fn test_storage_version_query() {
- assert_eq!(get_storage_file_version("./package.map").unwrap(), 2);
- assert_eq!(get_storage_file_version("./flag.map").unwrap(), 2);
- assert_eq!(get_storage_file_version("./flag.val").unwrap(), 2);
- assert_eq!(get_storage_file_version("./flag.info").unwrap(), 2);
+ assert_eq!(get_storage_file_version("./package.map").unwrap(), 1);
+ assert_eq!(get_storage_file_version("./flag.map").unwrap(), 1);
+ assert_eq!(get_storage_file_version("./flag.val").unwrap(), 1);
+ assert_eq!(get_storage_file_version("./flag.info").unwrap(), 1);
}
}
diff --git a/tools/aconfig/fake_device_config/Android.bp b/tools/aconfig/fake_device_config/Android.bp
index 7704742..1f17e6b 100644
--- a/tools/aconfig/fake_device_config/Android.bp
+++ b/tools/aconfig/fake_device_config/Android.bp
@@ -15,9 +15,7 @@
java_library {
name: "fake_device_config",
srcs: [
- "src/android/util/Log.java",
- "src/android/provider/DeviceConfig.java",
- "src/android/os/StrictMode.java",
+ "src/**/*.java",
],
sdk_version: "none",
system_modules: "core-all-system-modules",
diff --git a/tools/aconfig/fake_device_config/src/android/provider/AconfigPackage.java b/tools/aconfig/fake_device_config/src/android/provider/AconfigPackage.java
new file mode 100644
index 0000000..2f01b8c
--- /dev/null
+++ b/tools/aconfig/fake_device_config/src/android/provider/AconfigPackage.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.provider;
+
+/*
+ * This class allows generated aconfig code to compile independently of the framework.
+ */
+public class AconfigPackage {
+
+ /** Flag value is true */
+ public static final int FLAG_BOOLEAN_VALUE_TRUE = 1;
+
+ /** Flag value is false */
+ public static final int FLAG_BOOLEAN_VALUE_FALSE = 0;
+
+ /** Flag value doesn't exist */
+ public static final int FLAG_BOOLEAN_VALUE_NOT_EXIST = 2;
+
+ public static int getBooleanFlagValue(String packageName, String flagName) {
+ return 0;
+ }
+
+ public AconfigPackage(String packageName) {}
+
+ public int getBooleanFlagValue(String flagName) {
+ return 0;
+ }
+}
\ No newline at end of file
diff --git a/tools/edit_monitor/Android.bp b/tools/edit_monitor/Android.bp
index 80437c0..b939633 100644
--- a/tools/edit_monitor/Android.bp
+++ b/tools/edit_monitor/Android.bp
@@ -19,3 +19,26 @@
default_applicable_licenses: ["Android-Apache-2.0"],
default_team: "trendy_team_adte",
}
+
+python_library_host {
+ name: "edit_monitor_lib",
+ pkg_path: "edit_monitor",
+ srcs: [
+ "daemon_manager.py",
+ ],
+}
+
+python_test_host {
+ name: "daemon_manager_test",
+ main: "daemon_manager_test.py",
+ pkg_path: "edit_monitor",
+ srcs: [
+ "daemon_manager_test.py",
+ ],
+ libs: [
+ "edit_monitor_lib",
+ ],
+ test_options: {
+ unit_test: true,
+ },
+}
diff --git a/tools/edit_monitor/daemon_manager.py b/tools/edit_monitor/daemon_manager.py
new file mode 100644
index 0000000..8ec2588
--- /dev/null
+++ b/tools/edit_monitor/daemon_manager.py
@@ -0,0 +1,182 @@
+# Copyright 2024, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import hashlib
+import logging
+import multiprocessing
+import os
+import pathlib
+import signal
+import subprocess
+import tempfile
+import time
+
+
+DEFAULT_PROCESS_TERMINATION_TIMEOUT_SECONDS = 1
+
+
+def default_daemon_target():
+ """Place holder for the default daemon target."""
+ print("default daemon target")
+
+
+class DaemonManager:
+ """Class to manage and monitor the daemon run as a subprocess."""
+
+ def __init__(
+ self,
+ binary_path: str,
+ daemon_target: callable = default_daemon_target,
+ daemon_args: tuple = (),
+ ):
+ self.binary_path = binary_path
+ self.daemon_target = daemon_target
+ self.daemon_args = daemon_args
+
+ self.pid = os.getpid()
+ self.daemon_process = None
+
+ pid_file_dir = pathlib.Path(tempfile.gettempdir()).joinpath("edit_monitor")
+ pid_file_dir.mkdir(parents=True, exist_ok=True)
+ self.pid_file_path = self._get_pid_file_path(pid_file_dir)
+
+ def start(self):
+ """Writes the pidfile and starts the daemon proces."""
+ try:
+ self._stop_any_existing_instance()
+ self._write_pid_to_pidfile()
+ self._start_daemon_process()
+ except Exception as e:
+ logging.exception("Failed to start daemon manager with error %s", e)
+
+ def stop(self):
+ """Stops the daemon process and removes the pidfile."""
+
+ logging.debug("in daemon manager cleanup.")
+ try:
+ if self.daemon_process and self.daemon_process.is_alive():
+ self._terminate_process(self.daemon_process.pid)
+ self._remove_pidfile()
+ except Exception as e:
+ logging.exception("Failed to stop daemon manager with error %s", e)
+
+ def _stop_any_existing_instance(self):
+ if not self.pid_file_path.exists():
+ logging.debug("No existing instances.")
+ return
+
+ ex_pid = self._read_pid_from_pidfile()
+
+ if ex_pid:
+ logging.info("Found another instance with pid %d.", ex_pid)
+ self._terminate_process(ex_pid)
+ self._remove_pidfile()
+
+ def _read_pid_from_pidfile(self):
+ with open(self.pid_file_path, "r") as f:
+ return int(f.read().strip())
+
+ def _write_pid_to_pidfile(self):
+ """Creates a pidfile and writes the current pid to the file.
+
+ Raise FileExistsError if the pidfile already exists.
+ """
+ try:
+ # Use the 'x' mode to open the file for exclusive creation
+ with open(self.pid_file_path, "x") as f:
+ f.write(f"{self.pid}")
+ except FileExistsError as e:
+ # This could be caused due to race condition that a user is trying
+ # to start two edit monitors at the same time. Or because there is
+ # already an existing edit monitor running and we can not kill it
+ # for some reason.
+ logging.exception("pidfile %s already exists.", self.pid_file_path)
+ raise e
+
+ def _start_daemon_process(self):
+ """Starts a subprocess to run the daemon."""
+ p = multiprocessing.Process(
+ target=self.daemon_target, args=self.daemon_args
+ )
+ p.start()
+
+ logging.info("Start subprocess with PID %d", p.pid)
+ self.daemon_process = p
+
+ def _terminate_process(
+ self, pid: int, timeout: int = DEFAULT_PROCESS_TERMINATION_TIMEOUT_SECONDS
+ ):
+ """Terminates a process with given pid.
+
+ It first sends a SIGTERM to the process to allow it for proper
+ termination with a timeout. If the process is not terminated within
+ the timeout, kills it forcefully.
+ """
+ try:
+ os.kill(pid, signal.SIGTERM)
+ if not self._wait_for_process_terminate(pid, timeout):
+ logging.warning(
+ "Process %d not terminated within timeout, try force kill", pid
+ )
+ os.kill(pid, signal.SIGKILL)
+ except ProcessLookupError:
+ logging.info("Process with PID %d not found (already terminated)", pid)
+
+ def _wait_for_process_terminate(self, pid: int, timeout: int) -> bool:
+ start_time = time.time()
+
+ while time.time() < start_time + timeout:
+ if not self._is_process_alive(pid):
+ return True
+ time.sleep(1)
+
+ logging.error("Process %d not terminated within %d seconds.", pid, timeout)
+ return False
+
+ def _is_process_alive(self, pid: int) -> bool:
+ try:
+ output = subprocess.check_output(
+ ["ps", "-p", str(pid), "-o", "state="], text=True
+ ).strip()
+ state = output.split()[0]
+ return state != "Z" # Check if the state is not 'Z' (zombie)
+ except subprocess.CalledProcessError:
+ # Process not found (already dead).
+ return False
+ except (FileNotFoundError, OSError, ValueError) as e:
+ logging.warning(
+ "Unable to check the status for process %d with error: %s.", pid, e
+ )
+ return True
+
+ def _remove_pidfile(self):
+ try:
+ os.remove(self.pid_file_path)
+ except FileNotFoundError:
+ logging.info("pid file %s already removed.", self.pid_file_path)
+
+ def _get_pid_file_path(self, pid_file_dir: pathlib.Path) -> pathlib.Path:
+ """Generates the path to store the pidfile.
+
+ The file path should have the format of "/tmp/edit_monitor/xxxx.lock"
+ where xxxx is a hashed value based on the binary path that starts the
+ process.
+ """
+ hash_object = hashlib.sha256()
+ hash_object.update(self.binary_path.encode("utf-8"))
+ pid_file_path = pid_file_dir.joinpath(hash_object.hexdigest() + ".lock")
+ logging.info("pid_file_path: %s", pid_file_path)
+
+ return pid_file_path
diff --git a/tools/edit_monitor/daemon_manager_test.py b/tools/edit_monitor/daemon_manager_test.py
new file mode 100644
index 0000000..214b038
--- /dev/null
+++ b/tools/edit_monitor/daemon_manager_test.py
@@ -0,0 +1,253 @@
+# Copyright 2024, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittests for DaemonManager."""
+
+import logging
+import multiprocessing
+import os
+import pathlib
+import signal
+import subprocess
+import sys
+import tempfile
+import time
+import unittest
+from unittest import mock
+from edit_monitor import daemon_manager
+
+TEST_BINARY_FILE = '/path/to/test_binary'
+TEST_PID_FILE_PATH = (
+ '587239c2d1050afdf54512e2d799f3b929f86b43575eb3c7b4bab105dd9bd25e.lock'
+)
+
+
+def simple_daemon(output_file):
+ with open(output_file, 'w') as f:
+ f.write('running daemon target')
+
+
+def long_running_daemon():
+ while True:
+ time.sleep(1)
+
+
+class DaemonManagerTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ # Configure to print logging to stdout.
+ logging.basicConfig(filename=None, level=logging.DEBUG)
+ console = logging.StreamHandler(sys.stdout)
+ logging.getLogger('').addHandler(console)
+
+ def setUp(self):
+ super().setUp()
+ self.original_tempdir = tempfile.tempdir
+ self.working_dir = tempfile.TemporaryDirectory()
+ # Sets the tempdir under the working dir so any temp files created during
+ # tests will be cleaned.
+ tempfile.tempdir = self.working_dir.name
+
+ def tearDown(self):
+ # Cleans up any child processes left by the tests.
+ self._cleanup_child_processes()
+ self.working_dir.cleanup()
+ # Restores tempdir.
+ tempfile.tempdir = self.original_tempdir
+ super().tearDown()
+
+ def test_start_success_with_no_existing_instance(self):
+ self.assert_run_simple_daemon_success()
+
+ def test_start_success_with_existing_instance_running(self):
+ # Create a long running subprocess
+ p = multiprocessing.Process(target=long_running_daemon)
+ p.start()
+
+ # Create a pidfile with the subprocess pid
+ pid_file_path_dir = pathlib.Path(self.working_dir.name).joinpath(
+ 'edit_monitor'
+ )
+ pid_file_path_dir.mkdir(parents=True, exist_ok=True)
+ with open(pid_file_path_dir.joinpath(TEST_PID_FILE_PATH), 'w') as f:
+ f.write(str(p.pid))
+
+ self.assert_run_simple_daemon_success()
+ p.terminate()
+
+ def test_start_success_with_existing_instance_already_dead(self):
+ # Create a pidfile with pid that does not exist.
+ pid_file_path_dir = pathlib.Path(self.working_dir.name).joinpath(
+ 'edit_monitor'
+ )
+ pid_file_path_dir.mkdir(parents=True, exist_ok=True)
+ with open(pid_file_path_dir.joinpath(TEST_PID_FILE_PATH), 'w') as f:
+ f.write('123456')
+
+ self.assert_run_simple_daemon_success()
+
+ def test_start_success_with_existing_instance_from_different_binary(self):
+ # First start an instance based on "some_binary_path"
+ existing_dm = daemon_manager.DaemonManager(
+ "some_binary_path",
+ daemon_target=long_running_daemon,
+ )
+ existing_dm.start()
+
+ self.assert_run_simple_daemon_success()
+ existing_dm.stop()
+
+ @mock.patch('os.kill')
+ def test_start_failed_to_kill_existing_instance(self, mock_kill):
+ mock_kill.side_effect = OSError('Unknown OSError')
+ pid_file_path_dir = pathlib.Path(self.working_dir.name).joinpath(
+ 'edit_monitor'
+ )
+ pid_file_path_dir.mkdir(parents=True, exist_ok=True)
+ with open(pid_file_path_dir.joinpath(TEST_PID_FILE_PATH), 'w') as f:
+ f.write('123456')
+
+ dm = daemon_manager.DaemonManager(TEST_BINARY_FILE)
+ dm.start()
+
+ # Verify no daemon process is started.
+ self.assertIsNone(dm.daemon_process)
+
+ def test_start_failed_to_write_pidfile(self):
+ pid_file_path_dir = pathlib.Path(self.working_dir.name).joinpath(
+ 'edit_monitor'
+ )
+ pid_file_path_dir.mkdir(parents=True, exist_ok=True)
+ # Makes the directory read-only so write pidfile will fail.
+ os.chmod(pid_file_path_dir, 0o555)
+
+ dm = daemon_manager.DaemonManager(TEST_BINARY_FILE)
+ dm.start()
+
+ # Verifies no daemon process is started.
+ self.assertIsNone(dm.daemon_process)
+
+ def test_start_failed_to_start_daemon_process(self):
+ dm = daemon_manager.DaemonManager(
+ TEST_BINARY_FILE, daemon_target='wrong_target', daemon_args=(1)
+ )
+ dm.start()
+
+ # Verifies no daemon process is started.
+ self.assertIsNone(dm.daemon_process)
+
+ def test_stop_success(self):
+ dm = daemon_manager.DaemonManager(
+ TEST_BINARY_FILE, daemon_target=long_running_daemon
+ )
+ dm.start()
+ dm.stop()
+
+ self.assert_no_subprocess_running()
+ self.assertFalse(dm.pid_file_path.exists())
+
+ @mock.patch('os.kill')
+ def test_stop_failed_to_kill_daemon_process(self, mock_kill):
+ mock_kill.side_effect = OSError('Unknown OSError')
+ dm = daemon_manager.DaemonManager(
+ TEST_BINARY_FILE, daemon_target=long_running_daemon
+ )
+ dm.start()
+ dm.stop()
+
+ self.assertTrue(dm.daemon_process.is_alive())
+ self.assertTrue(dm.pid_file_path.exists())
+
+ @mock.patch('os.remove')
+ def test_stop_failed_to_remove_pidfile(self, mock_remove):
+ mock_remove.side_effect = OSError('Unknown OSError')
+
+ dm = daemon_manager.DaemonManager(
+ TEST_BINARY_FILE, daemon_target=long_running_daemon
+ )
+ dm.start()
+ dm.stop()
+
+ self.assert_no_subprocess_running()
+ self.assertTrue(dm.pid_file_path.exists())
+
+ def assert_run_simple_daemon_success(self):
+ damone_output_file = tempfile.NamedTemporaryFile(
+ dir=self.working_dir.name, delete=False
+ )
+ dm = daemon_manager.DaemonManager(
+ TEST_BINARY_FILE,
+ daemon_target=simple_daemon,
+ daemon_args=(damone_output_file.name,),
+ )
+ dm.start()
+ dm.daemon_process.join()
+
+ # Verifies the expected pid file is created.
+ expected_pid_file_path = pathlib.Path(self.working_dir.name).joinpath(
+ 'edit_monitor', TEST_PID_FILE_PATH
+ )
+ self.assertTrue(expected_pid_file_path.exists())
+
+ # Verify the daemon process is executed successfully.
+ with open(damone_output_file.name, 'r') as f:
+ contents = f.read()
+ self.assertEqual(contents, 'running daemon target')
+
+ def assert_no_subprocess_running(self):
+ child_pids = self._get_child_processes(os.getpid())
+ for child_pid in child_pids:
+ self.assertFalse(
+ self._is_process_alive(child_pid), f'process {child_pid} still alive'
+ )
+
+ def _get_child_processes(self, parent_pid):
+ try:
+ output = subprocess.check_output(
+ ['ps', '-o', 'pid,ppid', '--no-headers'], text=True
+ )
+
+ child_processes = []
+ for line in output.splitlines():
+ pid, ppid = line.split()
+ if int(ppid) == parent_pid:
+ child_processes.append(int(pid))
+ return child_processes
+ except subprocess.CalledProcessError as e:
+ self.fail(f'failed to get child process, error: {e}')
+
+ def _is_process_alive(self, pid):
+ try:
+ output = subprocess.check_output(
+ ['ps', '-p', str(pid), '-o', 'state='], text=True
+ ).strip()
+ state = output.split()[0]
+ return state != 'Z' # Check if the state is not 'Z' (zombie)
+ except subprocess.CalledProcessError:
+ return False
+
+ def _cleanup_child_processes(self):
+ child_pids = self._get_child_processes(os.getpid())
+ for child_pid in child_pids:
+ try:
+ os.kill(child_pid, signal.SIGKILL)
+ except ProcessLookupError:
+ # process already terminated
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/filelistdiff/allowlist b/tools/filelistdiff/allowlist
index 120045e..83f5ab8 100644
--- a/tools/filelistdiff/allowlist
+++ b/tools/filelistdiff/allowlist
@@ -4,23 +4,6 @@
framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.odex.fsv_meta
framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.vdex
framework/oat/x86_64/apex@com.android.compos@javalib@service-compos.jar@classes.vdex.fsv_meta
-lib/aaudio-aidl-cpp.so
-lib/android.hardware.biometrics.fingerprint@2.1.so
-lib/android.hardware.radio.config@1.0.so
-lib/android.hardware.radio.deprecated@1.0.so
-lib/android.hardware.radio@1.0.so
-lib/android.hardware.radio@1.1.so
-lib/android.hardware.radio@1.2.so
-lib/android.hardware.radio@1.3.so
-lib/android.hardware.radio@1.4.so
-lib/android.hardware.secure_element@1.0.so
-lib/com.android.media.aaudio-aconfig-cc.so
-lib/heapprofd_client.so
-lib/heapprofd_client_api.so
-lib/libaaudio.so
-lib/libaaudio_internal.so
-lib/libalarm_jni.so
-lib/libamidi.so
lib/libcups.so
lib/libjni_deviceAsWebcam.so
lib/libprintspooler_jni.so
@@ -29,7 +12,6 @@
lib/libyuv.so
# b/351258461
-adb_keys
init.environ.rc
# Known diffs only in the Soong system image
@@ -37,15 +19,4 @@
lib/libuinputcommand_jni.so
# Known diffs in internal source
-bin/uprobestats
-etc/aconfig/flag.map
-etc/aconfig/flag.val
-etc/aconfig/package.map
-etc/bpf/uprobestats/BitmapAllocation.o
-etc/bpf/uprobestats/GenericInstrumentation.o
-etc/bpf/uprobestats/ProcessManagement.o
-etc/init/UprobeStats.rc
-lib/libuprobestats_client.so
-lib64/libuprobestats_client.so
-priv-app/DeviceDiagnostics/DeviceDiagnostics.apk
-
+etc/aconfig/flag.info
diff --git a/tools/ide_query/ide_query.sh b/tools/ide_query/ide_query.sh
index 6f9b0c4..8dfffc1 100755
--- a/tools/ide_query/ide_query.sh
+++ b/tools/ide_query/ide_query.sh
@@ -19,7 +19,7 @@
require_top
# Ensure cogsetup (out/ will be symlink outside the repo)
-. ${TOP}/build/make/cogsetup.sh
+setup_cog_env_if_needed
case $(uname -s) in
Linux)
diff --git a/tools/sbom/Android.bp b/tools/sbom/Android.bp
index 6901b06..4f6d3b7 100644
--- a/tools/sbom/Android.bp
+++ b/tools/sbom/Android.bp
@@ -33,6 +33,13 @@
],
}
+python_library_host {
+ name: "compliance_metadata",
+ srcs: [
+ "compliance_metadata.py",
+ ],
+}
+
python_binary_host {
name: "gen_sbom",
srcs: [
@@ -44,6 +51,7 @@
},
},
libs: [
+ "compliance_metadata",
"metadata_file_proto_py",
"libprotobuf-python",
"sbom_lib",
@@ -109,3 +117,17 @@
"sbom_lib",
],
}
+
+python_binary_host {
+ name: "gen_notice_xml",
+ srcs: [
+ "gen_notice_xml.py",
+ ],
+ version: {
+ py3: {
+ embedded_launcher: true,
+ },
+ },
+ libs: [
+ ],
+}
diff --git a/tools/sbom/compliance_metadata.py b/tools/sbom/compliance_metadata.py
new file mode 100644
index 0000000..9910217
--- /dev/null
+++ b/tools/sbom/compliance_metadata.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sqlite3
+
+class MetadataDb:
+ def __init__(self, db):
+ self.conn = sqlite3.connect(':memory')
+ self.conn.row_factory = sqlite3.Row
+ with sqlite3.connect(db) as c:
+ c.backup(self.conn)
+ self.reorg()
+
+ def reorg(self):
+ # package_license table
+ self.conn.execute("create table package_license as "
+ "select name as package, pkg_default_applicable_licenses as license "
+ "from modules "
+ "where module_type = 'package' ")
+ cursor = self.conn.execute("select package,license from package_license where license like '% %'")
+ multi_licenses_packages = cursor.fetchall()
+ cursor.close()
+ rows = []
+ for p in multi_licenses_packages:
+ licenses = p['license'].strip().split(' ')
+ for lic in licenses:
+ rows.append((p['package'], lic))
+ self.conn.executemany('insert into package_license values (?, ?)', rows)
+ self.conn.commit()
+
+ self.conn.execute("delete from package_license where license like '% %'")
+ self.conn.commit()
+
+ # module_license table
+ self.conn.execute("create table module_license as "
+ "select distinct name as module, package, licenses as license "
+ "from modules "
+ "where licenses != '' ")
+ cursor = self.conn.execute("select module,package,license from module_license where license like '% %'")
+ multi_licenses_modules = cursor.fetchall()
+ cursor.close()
+ rows = []
+ for m in multi_licenses_modules:
+ licenses = m['license'].strip().split(' ')
+ for lic in licenses:
+ rows.append((m['module'], m['package'],lic))
+ self.conn.executemany('insert into module_license values (?, ?, ?)', rows)
+ self.conn.commit()
+
+ self.conn.execute("delete from module_license where license like '% %'")
+ self.conn.commit()
+
+ # module_installed_file table
+ self.conn.execute("create table module_installed_file as "
+ "select id as module_id, name as module_name, package, installed_files as installed_file "
+ "from modules "
+ "where installed_files != '' ")
+ cursor = self.conn.execute("select module_id, module_name, package, installed_file "
+ "from module_installed_file where installed_file like '% %'")
+ multi_installed_file_modules = cursor.fetchall()
+ cursor.close()
+ rows = []
+ for m in multi_installed_file_modules:
+ installed_files = m['installed_file'].strip().split(' ')
+ for f in installed_files:
+ rows.append((m['module_id'], m['module_name'], m['package'], f))
+ self.conn.executemany('insert into module_installed_file values (?, ?, ?, ?)', rows)
+ self.conn.commit()
+
+ self.conn.execute("delete from module_installed_file where installed_file like '% %'")
+ self.conn.commit()
+
+ # module_built_file table
+ self.conn.execute("create table module_built_file as "
+ "select id as module_id, name as module_name, package, built_files as built_file "
+ "from modules "
+ "where built_files != '' ")
+ cursor = self.conn.execute("select module_id, module_name, package, built_file "
+ "from module_built_file where built_file like '% %'")
+ multi_built_file_modules = cursor.fetchall()
+ cursor.close()
+ rows = []
+ for m in multi_built_file_modules:
+ built_files = m['installed_file'].strip().split(' ')
+ for f in built_files:
+ rows.append((m['module_id'], m['module_name'], m['package'], f))
+ self.conn.executemany('insert into module_built_file values (?, ?, ?, ?)', rows)
+ self.conn.commit()
+
+ self.conn.execute("delete from module_built_file where built_file like '% %'")
+ self.conn.commit()
+
+
+ # Indexes
+ self.conn.execute('create index idx_modules_id on modules (id)')
+ self.conn.execute('create index idx_modules_name on modules (name)')
+ self.conn.execute('create index idx_package_licnese_package on package_license (package)')
+ self.conn.execute('create index idx_package_licnese_license on package_license (license)')
+ self.conn.execute('create index idx_module_licnese_module on module_license (module)')
+ self.conn.execute('create index idx_module_licnese_license on module_license (license)')
+ self.conn.execute('create index idx_module_installed_file_module_id on module_installed_file (module_id)')
+ self.conn.execute('create index idx_module_installed_file_installed_file on module_installed_file (installed_file)')
+ self.conn.execute('create index idx_module_built_file_module_id on module_built_file (module_id)')
+ self.conn.execute('create index idx_module_built_file_built_file on module_built_file (built_file)')
+ self.conn.commit()
+
+ def dump_debug_db(self, debug_db):
+ with sqlite3.connect(debug_db) as c:
+ self.conn.backup(c)
+
+ def get_installed_files(self):
+ # Get all records from table make_metadata, which contains all installed files and corresponding make modules' metadata
+ cursor = self.conn.execute('select installed_file, module_path, is_prebuilt_make_module, product_copy_files, kernel_module_copy_files, is_platform_generated, license_text from make_metadata')
+ rows = cursor.fetchall()
+ cursor.close()
+ installed_files_metadata = []
+ for row in rows:
+ metadata = dict(zip(row.keys(), row))
+ installed_files_metadata.append(metadata)
+ return installed_files_metadata
+
+ def get_soong_modules(self):
+ # Get all records from table modules, which contains metadata of all soong modules
+ cursor = self.conn.execute('select name, package, package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files from modules')
+ rows = cursor.fetchall()
+ cursor.close()
+ soong_modules = []
+ for row in rows:
+ soong_module = dict(zip(row.keys(), row))
+ soong_modules.append(soong_module)
+ return soong_modules
+
+ def get_package_licenses(self, package):
+ cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text '
+ 'from package_license pl join modules m on pl.license = m.name '
+ 'where pl.package = ?',
+ ('//' + package,))
+ rows = cursor.fetchall()
+ licenses = {}
+ for r in rows:
+ licenses[r['name']] = r['license_text']
+ return licenses
+
+ def get_module_licenses(self, module_name, package):
+ licenses = {}
+ # If property "licenses" is defined on module
+ cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text '
+ 'from module_license ml join modules m on ml.license = m.name '
+ 'where ml.module = ? and ml.package = ?',
+ (module_name, package))
+ rows = cursor.fetchall()
+ for r in rows:
+ licenses[r['name']] = r['license_text']
+ if len(licenses) > 0:
+ return licenses
+
+ # Use default package license
+ cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text '
+ 'from package_license pl join modules m on pl.license = m.name '
+ 'where pl.package = ?',
+ ('//' + package,))
+ rows = cursor.fetchall()
+ for r in rows:
+ licenses[r['name']] = r['license_text']
+ return licenses
+
+ def get_soong_module_of_installed_file(self, installed_file):
+ cursor = self.conn.execute('select name, m.package, m.package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files '
+ 'from modules m join module_installed_file mif on m.id = mif.module_id '
+ 'where mif.installed_file = ?',
+ (installed_file,))
+ rows = cursor.fetchall()
+ cursor.close()
+ if rows:
+ soong_module = dict(zip(rows[0].keys(), rows[0]))
+ return soong_module
+
+ return None
+
+ def get_soong_module_of_built_file(self, built_file):
+ cursor = self.conn.execute('select name, m.package, m.package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files '
+ 'from modules m join module_built_file mbf on m.id = mbf.module_id '
+ 'where mbf.built_file = ?',
+ (built_file,))
+ rows = cursor.fetchall()
+ cursor.close()
+ if rows:
+ soong_module = dict(zip(rows[0].keys(), rows[0]))
+ return soong_module
+
+ return None
\ No newline at end of file
diff --git a/tools/sbom/gen_notice_xml.py b/tools/sbom/gen_notice_xml.py
new file mode 100644
index 0000000..eaa6e5a
--- /dev/null
+++ b/tools/sbom/gen_notice_xml.py
@@ -0,0 +1,81 @@
+# !/usr/bin/env python3
+#
+# Copyright (C) 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate NOTICE.xml.gz of a partition.
+Usage example:
+ gen_notice_xml.py --output_file out/soong/.intermediate/.../NOTICE.xml.gz \
+ --metadata out/soong/compliance-metadata/aosp_cf_x86_64_phone/compliance-metadata.db \
+ --partition system \
+ --product_out out/target/vsoc_x86_64 \
+ --soong_out out/soong
+"""
+
+import argparse
+
+
+FILE_HEADER = '''\
+<?xml version="1.0" encoding="utf-8"?>
+<licenses>
+'''
+FILE_FOOTER = '''\
+</licenses>
+'''
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Print more information.')
+ parser.add_argument('-d', '--debug', action='store_true', default=True, help='Debug mode')
+ parser.add_argument('--output_file', required=True, help='The path of the generated NOTICE.xml.gz file.')
+ parser.add_argument('--partition', required=True, help='The name of partition for which the NOTICE.xml.gz is generated.')
+ parser.add_argument('--metadata', required=True, help='The path of compliance metadata DB file.')
+ parser.add_argument('--product_out', required=True, help='The path of PRODUCT_OUT, e.g. out/target/product/vsoc_x86_64.')
+ parser.add_argument('--soong_out', required=True, help='The path of Soong output directory, e.g. out/soong')
+
+ return parser.parse_args()
+
+
+def log(*info):
+ if args.verbose:
+ for i in info:
+ print(i)
+
+
+def new_file_name_tag(file_metadata, package_name):
+ file_path = file_metadata['installed_file'].removeprefix(args.product_out)
+ lib = 'Android'
+ if package_name:
+ lib = package_name
+ return f'<file-name contentId="" lib="{lib}">{file_path}</file-name>\n'
+
+
+def new_file_content_tag():
+ pass
+
+
+def main():
+ global args
+ args = get_args()
+ log('Args:', vars(args))
+
+ with open(args.output_file, 'w', encoding="utf-8") as notice_xml_file:
+ notice_xml_file.write(FILE_HEADER)
+ notice_xml_file.write(FILE_FOOTER)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/sbom/gen_sbom.py b/tools/sbom/gen_sbom.py
index a203258..9c3a8be 100644
--- a/tools/sbom/gen_sbom.py
+++ b/tools/sbom/gen_sbom.py
@@ -26,6 +26,7 @@
"""
import argparse
+import compliance_metadata
import datetime
import google.protobuf.text_format as text_format
import hashlib
@@ -35,7 +36,6 @@
import metadata_file_pb2
import sbom_data
import sbom_writers
-import sqlite3
# Package type
PKG_SOURCE = 'SOURCE'
@@ -568,202 +568,16 @@
return sorted(all_static_dep_files.keys())
-class MetadataDb:
- def __init__(self, db):
- self.conn = sqlite3.connect(':memory')
- self.conn.row_factory = sqlite3.Row
- with sqlite3.connect(db) as c:
- c.backup(self.conn)
- self.reorg()
-
- def reorg(self):
- # package_license table
- self.conn.execute("create table package_license as "
- "select name as package, pkg_default_applicable_licenses as license "
- "from modules "
- "where module_type = 'package' ")
- cursor = self.conn.execute("select package,license from package_license where license like '% %'")
- multi_licenses_packages = cursor.fetchall()
- cursor.close()
- rows = []
- for p in multi_licenses_packages:
- licenses = p['license'].strip().split(' ')
- for lic in licenses:
- rows.append((p['package'], lic))
- self.conn.executemany('insert into package_license values (?, ?)', rows)
- self.conn.commit()
-
- self.conn.execute("delete from package_license where license like '% %'")
- self.conn.commit()
-
- # module_license table
- self.conn.execute("create table module_license as "
- "select distinct name as module, package, licenses as license "
- "from modules "
- "where licenses != '' ")
- cursor = self.conn.execute("select module,package,license from module_license where license like '% %'")
- multi_licenses_modules = cursor.fetchall()
- cursor.close()
- rows = []
- for m in multi_licenses_modules:
- licenses = m['license'].strip().split(' ')
- for lic in licenses:
- rows.append((m['module'], m['package'],lic))
- self.conn.executemany('insert into module_license values (?, ?, ?)', rows)
- self.conn.commit()
-
- self.conn.execute("delete from module_license where license like '% %'")
- self.conn.commit()
-
- # module_installed_file table
- self.conn.execute("create table module_installed_file as "
- "select id as module_id, name as module_name, package, installed_files as installed_file "
- "from modules "
- "where installed_files != '' ")
- cursor = self.conn.execute("select module_id, module_name, package, installed_file "
- "from module_installed_file where installed_file like '% %'")
- multi_installed_file_modules = cursor.fetchall()
- cursor.close()
- rows = []
- for m in multi_installed_file_modules:
- installed_files = m['installed_file'].strip().split(' ')
- for f in installed_files:
- rows.append((m['module_id'], m['module_name'], m['package'], f))
- self.conn.executemany('insert into module_installed_file values (?, ?, ?, ?)', rows)
- self.conn.commit()
-
- self.conn.execute("delete from module_installed_file where installed_file like '% %'")
- self.conn.commit()
-
- # module_built_file table
- self.conn.execute("create table module_built_file as "
- "select id as module_id, name as module_name, package, built_files as built_file "
- "from modules "
- "where built_files != '' ")
- cursor = self.conn.execute("select module_id, module_name, package, built_file "
- "from module_built_file where built_file like '% %'")
- multi_built_file_modules = cursor.fetchall()
- cursor.close()
- rows = []
- for m in multi_built_file_modules:
- built_files = m['installed_file'].strip().split(' ')
- for f in built_files:
- rows.append((m['module_id'], m['module_name'], m['package'], f))
- self.conn.executemany('insert into module_built_file values (?, ?, ?, ?)', rows)
- self.conn.commit()
-
- self.conn.execute("delete from module_built_file where built_file like '% %'")
- self.conn.commit()
-
-
- # Indexes
- self.conn.execute('create index idx_modules_id on modules (id)')
- self.conn.execute('create index idx_modules_name on modules (name)')
- self.conn.execute('create index idx_package_licnese_package on package_license (package)')
- self.conn.execute('create index idx_package_licnese_license on package_license (license)')
- self.conn.execute('create index idx_module_licnese_module on module_license (module)')
- self.conn.execute('create index idx_module_licnese_license on module_license (license)')
- self.conn.execute('create index idx_module_installed_file_module_id on module_installed_file (module_id)')
- self.conn.execute('create index idx_module_installed_file_installed_file on module_installed_file (installed_file)')
- self.conn.execute('create index idx_module_built_file_module_id on module_built_file (module_id)')
- self.conn.execute('create index idx_module_built_file_built_file on module_built_file (built_file)')
- self.conn.commit()
-
- if args.debug:
- with sqlite3.connect(os.path.dirname(args.metadata) + '/compliance-metadata-debug.db') as c:
- self.conn.backup(c)
-
-
- def get_installed_files(self):
- # Get all records from table make_metadata, which contains all installed files and corresponding make modules' metadata
- cursor = self.conn.execute('select installed_file, module_path, is_prebuilt_make_module, product_copy_files, kernel_module_copy_files, is_platform_generated, license_text from make_metadata')
- rows = cursor.fetchall()
- cursor.close()
- installed_files_metadata = []
- for row in rows:
- metadata = dict(zip(row.keys(), row))
- installed_files_metadata.append(metadata)
- return installed_files_metadata
-
- def get_soong_modules(self):
- # Get all records from table modules, which contains metadata of all soong modules
- cursor = self.conn.execute('select name, package, package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files from modules')
- rows = cursor.fetchall()
- cursor.close()
- soong_modules = []
- for row in rows:
- soong_module = dict(zip(row.keys(), row))
- soong_modules.append(soong_module)
- return soong_modules
-
- def get_package_licenses(self, package):
- cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text '
- 'from package_license pl join modules m on pl.license = m.name '
- 'where pl.package = ?',
- ('//' + package,))
- rows = cursor.fetchall()
- licenses = {}
- for r in rows:
- licenses[r['name']] = r['license_text']
- return licenses
-
- def get_module_licenses(self, module_name, package):
- licenses = {}
- # If property "licenses" is defined on module
- cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text '
- 'from module_license ml join modules m on ml.license = m.name '
- 'where ml.module = ? and ml.package = ?',
- (module_name, package))
- rows = cursor.fetchall()
- for r in rows:
- licenses[r['name']] = r['license_text']
- if len(licenses) > 0:
- return licenses
-
- # Use default package license
- cursor = self.conn.execute('select m.name, m.package, m.lic_license_text as license_text '
- 'from package_license pl join modules m on pl.license = m.name '
- 'where pl.package = ?',
- ('//' + package,))
- rows = cursor.fetchall()
- for r in rows:
- licenses[r['name']] = r['license_text']
- return licenses
-
- def get_soong_module_of_installed_file(self, installed_file):
- cursor = self.conn.execute('select name, m.package, m.package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files '
- 'from modules m join module_installed_file mif on m.id = mif.module_id '
- 'where mif.installed_file = ?',
- (installed_file,))
- rows = cursor.fetchall()
- cursor.close()
- if rows:
- soong_module = dict(zip(rows[0].keys(), rows[0]))
- return soong_module
-
- return None
-
- def get_soong_module_of_built_file(self, built_file):
- cursor = self.conn.execute('select name, m.package, m.package as module_path, module_type as soong_module_type, built_files, installed_files, static_dep_files, whole_static_dep_files '
- 'from modules m join module_built_file mbf on m.id = mbf.module_id '
- 'where mbf.built_file = ?',
- (built_file,))
- rows = cursor.fetchall()
- cursor.close()
- if rows:
- soong_module = dict(zip(rows[0].keys(), rows[0]))
- return soong_module
-
- return None
-
-
def main():
global args
args = get_args()
log('Args:', vars(args))
global db
- db = MetadataDb(args.metadata)
+ db = compliance_metadata.MetadataDb(args.metadata)
+ if args.debug:
+ db.dump_debug_db(os.path.dirname(args.output_file) + '/compliance-metadata-debug.db')
+
global metadata_file_protos
metadata_file_protos = {}
global licenses_text