Merge changes Ife5eb1f8,I8d29c7ee into main
* changes:
Add alias-release-config function
Override release configs in depth
diff --git a/core/packaging/flags.mk b/core/packaging/flags.mk
index 6fc1e4c..62ef3df 100644
--- a/core/packaging/flags.mk
+++ b/core/packaging/flags.mk
@@ -97,6 +97,20 @@
)) \
)
+# Collect the on-device flags into a single file, similar to all_aconfig_declarations.
+required_aconfig_flags_files := \
+ $(sort $(foreach partition, $(filter $(IMAGES_TO_BUILD), $(_FLAG_PARTITIONS)), \
+ $(aconfig_flag_summaries_protobuf.$(partition)) \
+ ))
+
+.PHONY: device_aconfig_declarations
+device_aconfig_declarations: $(PRODUCT_OUT)/device_aconfig_declarations.pb
+$(eval $(call generate-partition-aconfig-flag-file, \
+ $(TARGET_OUT_FLAGS)/device_aconfig_declarations.pb, \
+ $(PRODUCT_OUT)/device_aconfig_declarations.pb, \
+ $(sort $(required_aconfig_flags_files)) \
+)) \
+
# Create a set of storage file for each partition
# $(1): built aconfig flags storage package map file (out)
# $(2): built aconfig flags storage flag map file (out)
@@ -178,6 +192,7 @@
# Clean up
required_flags_files:=
+required_aconfig_flags_files:=
$(foreach partition, $(_FLAG_PARTITIONS), \
$(eval build_flag_summaries.$(partition):=) \
$(eval aconfig_flag_summaries_protobuf.$(partition):=) \
diff --git a/tools/aconfig/aconfig/src/storage/flag_table.rs b/tools/aconfig/aconfig/src/storage/flag_table.rs
index 4dd177c..1381e89 100644
--- a/tools/aconfig/aconfig/src/storage/flag_table.rs
+++ b/tools/aconfig/aconfig/src/storage/flag_table.rs
@@ -74,13 +74,7 @@
// should come from the parsed flag, and we will set the flag_type bit
// mask properly.
let flag_type = 1;
- Ok(Self::new(
- package.package_id,
- pf.name(),
- flag_type,
- *fid,
- num_buckets,
- ))
+ Ok(Self::new(package.package_id, pf.name(), flag_type, *fid, num_buckets))
})
.collect::<Result<Vec<_>>>()
}
@@ -112,8 +106,11 @@
let mut offset = header.node_offset;
for i in 0..node_wrappers.len() {
let node_bucket_idx = node_wrappers[i].bucket_index;
- let next_node_bucket_idx =
- if i + 1 < node_wrappers.len() { Some(node_wrappers[i + 1].bucket_index) } else { None };
+ let next_node_bucket_idx = if i + 1 < node_wrappers.len() {
+ Some(node_wrappers[i + 1].bucket_index)
+ } else {
+ None
+ };
if buckets[node_bucket_idx as usize].is_none() {
buckets[node_bucket_idx as usize] = Some(offset);
@@ -127,11 +124,8 @@
}
}
- let table = FlagTable {
- header,
- buckets,
- nodes: node_wrappers.into_iter().map(|nw| nw.node).collect(),
- };
+ let table =
+ FlagTable { header, buckets, nodes: node_wrappers.into_iter().map(|nw| nw.node).collect() };
Ok(table)
}
diff --git a/tools/aconfig/aconfig/src/storage/package_table.rs b/tools/aconfig/aconfig/src/storage/package_table.rs
index 5ce6165..4c08129 100644
--- a/tools/aconfig/aconfig/src/storage/package_table.rs
+++ b/tools/aconfig/aconfig/src/storage/package_table.rs
@@ -17,8 +17,7 @@
use anyhow::Result;
use aconfig_storage_file::{
- get_table_size, PackageTable, PackageTableHeader, PackageTableNode,
- FILE_VERSION,
+ get_table_size, PackageTable, PackageTableHeader, PackageTableNode, FILE_VERSION,
};
use crate::storage::FlagPackage;
@@ -61,10 +60,8 @@
let num_buckets = get_table_size(num_packages)?;
let mut header = new_header(container, num_packages);
let mut buckets = vec![None; num_buckets as usize];
- let mut node_wrappers: Vec<_> = packages
- .iter()
- .map(|pkg| PackageTableNodeWrapper::new(pkg, num_buckets))
- .collect();
+ let mut node_wrappers: Vec<_> =
+ packages.iter().map(|pkg| PackageTableNodeWrapper::new(pkg, num_buckets)).collect();
// initialize all header fields
header.bucket_offset = header.as_bytes().len() as u32;
@@ -79,8 +76,11 @@
let mut offset = header.node_offset;
for i in 0..node_wrappers.len() {
let node_bucket_idx = node_wrappers[i].bucket_index;
- let next_node_bucket_idx =
- if i + 1 < node_wrappers.len() { Some(node_wrappers[i + 1].bucket_index) } else { None };
+ let next_node_bucket_idx = if i + 1 < node_wrappers.len() {
+ Some(node_wrappers[i + 1].bucket_index)
+ } else {
+ None
+ };
if buckets[node_bucket_idx as usize].is_none() {
buckets[node_bucket_idx as usize] = Some(offset);
diff --git a/tools/aconfig/aconfig_protos/src/lib.rs b/tools/aconfig/aconfig_protos/src/lib.rs
index ef16e06..8f5667f 100644
--- a/tools/aconfig/aconfig_protos/src/lib.rs
+++ b/tools/aconfig/aconfig_protos/src/lib.rs
@@ -150,10 +150,7 @@
ensure_required_fields!("flag declarations", pdf, "package");
// TODO(b/312769710): Make the container field required.
- ensure!(
- is_valid_package_ident(pdf.package()),
- "bad flag declarations: bad package"
- );
+ ensure!(is_valid_package_ident(pdf.package()), "bad flag declarations: bad package");
ensure!(
!pdf.has_container() || is_valid_container_ident(pdf.container()),
"bad flag declarations: bad container"
@@ -898,10 +895,7 @@
"#;
let parsed_flags = try_from_binary_proto_from_text_proto(text_proto).unwrap();
let parsed_flag = &parsed_flags.parsed_flag[0];
- assert_eq!(
- crate::parsed_flag::path_to_declaration(parsed_flag),
- "flags.declarations"
- );
+ assert_eq!(crate::parsed_flag::path_to_declaration(parsed_flag), "flags.declarations");
}
#[test]
diff --git a/tools/aconfig/aconfig_storage_file/Android.bp b/tools/aconfig/aconfig_storage_file/Android.bp
index a2650d8..53b693f 100644
--- a/tools/aconfig/aconfig_storage_file/Android.bp
+++ b/tools/aconfig/aconfig_storage_file/Android.bp
@@ -12,6 +12,7 @@
"libaconfig_storage_protos",
"libonce_cell",
"libprotobuf",
+ "libtempfile",
],
}
diff --git a/tools/aconfig/aconfig_storage_file/Cargo.toml b/tools/aconfig/aconfig_storage_file/Cargo.toml
index e65b1bf..54ba6c7 100644
--- a/tools/aconfig/aconfig_storage_file/Cargo.toml
+++ b/tools/aconfig/aconfig_storage_file/Cargo.toml
@@ -9,7 +9,10 @@
[dependencies]
anyhow = "1.0.69"
+memmap2 = "0.8.0"
protobuf = "3.2.0"
+once_cell = "1.19.0"
+tempfile = "3.9.0"
[build-dependencies]
protobuf-codegen = "3.2.0"
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_table.rs b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
index 99d5a60..dfbd9de 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
@@ -17,7 +17,7 @@
//! flag table module defines the flag table file format and methods for serialization
//! and deserialization
-use crate::{read_str_from_bytes, read_u16_from_bytes, read_u32_from_bytes, get_bucket_index};
+use crate::{get_bucket_index, read_str_from_bytes, read_u16_from_bytes, read_u32_from_bytes};
use anyhow::{anyhow, Result};
/// Flag table header struct
@@ -175,8 +175,7 @@
loop {
let interpreted_node = FlagTableNode::from_bytes(&buf[flag_node_offset..])?;
- if interpreted_node.package_id == package_id &&
- interpreted_node.flag_name == flag {
+ if interpreted_node.package_id == package_id && interpreted_node.flag_name == flag {
return Ok(Some(interpreted_node.flag_id));
}
match interpreted_node.next_offset {
@@ -184,7 +183,6 @@
None => return Ok(None),
}
}
-
}
#[cfg(test)]
@@ -200,13 +198,7 @@
flag_id: u16,
next_offset: Option<u32>,
) -> Self {
- Self {
- package_id,
- flag_name: flag_name.to_string(),
- flag_type,
- flag_id,
- next_offset,
- }
+ Self { package_id, flag_name: flag_name.to_string(), flag_type, flag_id, next_offset }
}
}
@@ -288,9 +280,7 @@
];
for (package_id, flag_name, expected_offset) in baseline.into_iter() {
let flag_offset =
- find_flag_offset(&flag_table[..], package_id, flag_name)
- .unwrap()
- .unwrap();
+ find_flag_offset(&flag_table[..], package_id, flag_name).unwrap().unwrap();
assert_eq!(flag_offset, expected_offset);
}
}
@@ -299,11 +289,9 @@
// this test point locks down table query of a non exist flag
fn test_not_existed_flag_query() {
let flag_table = create_test_flag_table().unwrap().as_bytes();
- let flag_offset =
- find_flag_offset(&flag_table[..], 1, "disabled_fixed_ro").unwrap();
+ let flag_offset = find_flag_offset(&flag_table[..], 1, "disabled_fixed_ro").unwrap();
assert_eq!(flag_offset, None);
- let flag_offset =
- find_flag_offset(&flag_table[..], 2, "disabled_rw").unwrap();
+ let flag_offset = find_flag_offset(&flag_table[..], 2, "disabled_rw").unwrap();
assert_eq!(flag_offset, None);
}
@@ -313,8 +301,7 @@
let mut table = create_test_flag_table().unwrap();
table.header.version = crate::FILE_VERSION + 1;
let flag_table = table.as_bytes();
- let error = find_flag_offset(&flag_table[..], 0, "enabled_ro")
- .unwrap_err();
+ let error = find_flag_offset(&flag_table[..], 0, "enabled_ro").unwrap_err();
assert_eq!(
format!("{:?}", error),
format!(
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_value.rs b/tools/aconfig/aconfig_storage_file/src/flag_value.rs
index c8b52a9..bb8892d 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_value.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_value.rs
@@ -146,9 +146,7 @@
let flag_value_list = create_test_flag_value_list().unwrap().as_bytes();
let baseline: Vec<bool> = vec![false, true, false, false, true, true, false, true];
for (offset, expected_value) in baseline.into_iter().enumerate() {
- let flag_value =
- get_boolean_flag_value(&flag_value_list[..], offset as u32)
- .unwrap();
+ let flag_value = get_boolean_flag_value(&flag_value_list[..], offset as u32).unwrap();
assert_eq!(flag_value, expected_value);
}
}
@@ -157,12 +155,8 @@
// this test point locks down query beyond the end of boolean section
fn test_boolean_out_of_range() {
let flag_value_list = create_test_flag_value_list().unwrap().as_bytes();
- let error = get_boolean_flag_value(&flag_value_list[..], 8)
- .unwrap_err();
- assert_eq!(
- format!("{:?}", error),
- "Flag value offset goes beyond the end of the file."
- );
+ let error = get_boolean_flag_value(&flag_value_list[..], 8).unwrap_err();
+ assert_eq!(format!("{:?}", error), "Flag value offset goes beyond the end of the file.");
}
#[test]
@@ -171,8 +165,7 @@
let mut value_list = create_test_flag_value_list().unwrap();
value_list.header.version = crate::FILE_VERSION + 1;
let flag_value = value_list.as_bytes();
- let error = get_boolean_flag_value(&flag_value[..], 4)
- .unwrap_err();
+ let error = get_boolean_flag_value(&flag_value[..], 4).unwrap_err();
assert_eq!(
format!("{:?}", error),
format!(
diff --git a/tools/aconfig/aconfig_storage_file/src/lib.rs b/tools/aconfig/aconfig_storage_file/src/lib.rs
index f5aecff..a9f5e21 100644
--- a/tools/aconfig/aconfig_storage_file/src/lib.rs
+++ b/tools/aconfig/aconfig_storage_file/src/lib.rs
@@ -21,6 +21,9 @@
pub mod flag_value;
pub mod package_table;
+#[cfg(feature = "cargo")]
+pub mod mapped_file;
+
mod protos;
#[cfg(test)]
mod test_utils;
diff --git a/tools/aconfig/aconfig_storage_file/src/mapped_file.rs b/tools/aconfig/aconfig_storage_file/src/mapped_file.rs
new file mode 100644
index 0000000..4f65df0
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/src/mapped_file.rs
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::collections::HashMap;
+use std::fs::File;
+use std::io::{BufReader, Read};
+use std::sync::{Arc, Mutex};
+
+use anyhow::{bail, ensure, Result};
+use memmap2::Mmap;
+use once_cell::sync::Lazy;
+
+use crate::protos::{
+ storage_files::try_from_binary_proto, ProtoStorageFileInfo, ProtoStorageFiles,
+};
+use crate::StorageFileSelection;
+
+/// Cache for already mapped files
+static ALL_MAPPED_FILES: Lazy<Mutex<HashMap<String, MappedStorageFileSet>>> = Lazy::new(|| {
+ let mapped_files = HashMap::new();
+ Mutex::new(mapped_files)
+});
+
+/// Mapped storage files for a particular container
+#[derive(Debug)]
+struct MappedStorageFileSet {
+ package_map: Arc<Mmap>,
+ flag_map: Arc<Mmap>,
+ flag_val: Arc<Mmap>,
+}
+
+/// Find where storage files are stored for a particular container
+fn find_container_storage_location(
+ location_pb_file: &str,
+ container: &str,
+) -> Result<ProtoStorageFileInfo> {
+ let file = File::open(location_pb_file)?;
+ let mut reader = BufReader::new(file);
+ let mut bytes = Vec::new();
+ reader.read_to_end(&mut bytes)?;
+
+ let storage_locations: ProtoStorageFiles = try_from_binary_proto(&bytes)?;
+ for location_info in storage_locations.files.iter() {
+ if location_info.container() == container {
+ return Ok(location_info.clone());
+ }
+ }
+ bail!("Storage file does not exist for {}", container)
+}
+
+/// Verify the file is read only and then map it
+fn verify_read_only_and_map(file_path: &str) -> Result<Mmap> {
+ let file = File::open(file_path)?;
+ let metadata = file.metadata()?;
+ ensure!(
+ metadata.permissions().readonly(),
+ "Cannot mmap file {} as it is not read only",
+ file_path
+ );
+ // SAFETY:
+ //
+ // Mmap constructors are unsafe as it would have undefined behaviors if the file
+ // is modified after mapped (https://docs.rs/memmap2/latest/memmap2/struct.Mmap.html).
+ //
+ // We either have to make this api unsafe or ensure that the file will not be modified
+ // which means it is read only. Here in the code, we check explicitly that the file
+ // being mapped must only have read permission, otherwise, error out, thus making sure
+ // it is safe.
+ //
+ // We should remove this restriction if we need to support mmap non read only file in
+ // the future (by making this api unsafe). But for now, all flags are boot stable, so
+ // the boot flag file copy should be readonly.
+ unsafe { Ok(Mmap::map(&file)?) }
+}
+
+/// Map all storage files for a particular container
+fn map_container_storage_files(
+ location_pb_file: &str,
+ container: &str,
+) -> Result<MappedStorageFileSet> {
+ let files_location = find_container_storage_location(location_pb_file, container)?;
+ let package_map = Arc::new(verify_read_only_and_map(files_location.package_map())?);
+ let flag_map = Arc::new(verify_read_only_and_map(files_location.flag_map())?);
+ let flag_val = Arc::new(verify_read_only_and_map(files_location.flag_val())?);
+ Ok(MappedStorageFileSet { package_map, flag_map, flag_val })
+}
+
+/// Get a mapped storage file given the container and file type
+pub fn get_mapped_file(
+ location_pb_file: &str,
+ container: &str,
+ file_selection: StorageFileSelection,
+) -> Result<Arc<Mmap>> {
+ let mut all_mapped_files = ALL_MAPPED_FILES.lock().unwrap();
+ match all_mapped_files.get(container) {
+ Some(mapped_files) => Ok(match file_selection {
+ StorageFileSelection::PackageMap => Arc::clone(&mapped_files.package_map),
+ StorageFileSelection::FlagMap => Arc::clone(&mapped_files.flag_map),
+ StorageFileSelection::FlagVal => Arc::clone(&mapped_files.flag_val),
+ }),
+ None => {
+ let mapped_files = map_container_storage_files(location_pb_file, container)?;
+ let file_ptr = match file_selection {
+ StorageFileSelection::PackageMap => Arc::clone(&mapped_files.package_map),
+ StorageFileSelection::FlagMap => Arc::clone(&mapped_files.flag_map),
+ StorageFileSelection::FlagVal => Arc::clone(&mapped_files.flag_val),
+ };
+ all_mapped_files.insert(container.to_string(), mapped_files);
+ Ok(file_ptr)
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::test_utils::{get_binary_storage_proto_bytes, write_bytes_to_temp_file};
+
+ #[test]
+ fn test_find_storage_file_location() {
+ let text_proto = r#"
+files {
+ version: 0
+ container: "system"
+ package_map: "/system/etc/package.map"
+ flag_map: "/system/etc/flag.map"
+ flag_val: "/metadata/aconfig/system.val"
+ timestamp: 12345
+}
+files {
+ version: 1
+ container: "product"
+ package_map: "/product/etc/package.map"
+ flag_map: "/product/etc/flag.map"
+ flag_val: "/metadata/aconfig/product.val"
+ timestamp: 54321
+}
+"#;
+ let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap();
+ let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap();
+ let file_full_path = file.path().display().to_string();
+
+ let file_info = find_container_storage_location(&file_full_path, "system").unwrap();
+ assert_eq!(file_info.version(), 0);
+ assert_eq!(file_info.container(), "system");
+ assert_eq!(file_info.package_map(), "/system/etc/package.map");
+ assert_eq!(file_info.flag_map(), "/system/etc/flag.map");
+ assert_eq!(file_info.flag_val(), "/metadata/aconfig/system.val");
+ assert_eq!(file_info.timestamp(), 12345);
+
+ let file_info = find_container_storage_location(&file_full_path, "product").unwrap();
+ assert_eq!(file_info.version(), 1);
+ assert_eq!(file_info.container(), "product");
+ assert_eq!(file_info.package_map(), "/product/etc/package.map");
+ assert_eq!(file_info.flag_map(), "/product/etc/flag.map");
+ assert_eq!(file_info.flag_val(), "/metadata/aconfig/product.val");
+ assert_eq!(file_info.timestamp(), 54321);
+
+ let err = find_container_storage_location(&file_full_path, "vendor").unwrap_err();
+ assert_eq!(format!("{:?}", err), "Storage file does not exist for vendor");
+ }
+
+ fn map_and_verify(
+ location_pb_file: &str,
+ file_selection: StorageFileSelection,
+ actual_file: &str,
+ ) {
+ let mut opened_file = File::open(actual_file).unwrap();
+ let mut content = Vec::new();
+ opened_file.read_to_end(&mut content).unwrap();
+
+ let mmaped_file = get_mapped_file(location_pb_file, "system", file_selection).unwrap();
+ assert_eq!(mmaped_file[..], content[..]);
+ }
+
+ #[test]
+ fn test_mapped_file_contents() {
+ let text_proto = r#"
+files {
+ version: 0
+ container: "system"
+ package_map: "./tests/package.map"
+ flag_map: "./tests/flag.map"
+ flag_val: "./tests/flag.val"
+ timestamp: 12345
+}
+"#;
+ let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap();
+ let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap();
+ let file_full_path = file.path().display().to_string();
+
+ map_and_verify(&file_full_path, StorageFileSelection::PackageMap, "./tests/package.map");
+
+ map_and_verify(&file_full_path, StorageFileSelection::FlagMap, "./tests/flag.map");
+
+ map_and_verify(&file_full_path, StorageFileSelection::FlagVal, "./tests/flag.val");
+ }
+
+ #[test]
+ fn test_map_non_read_only_file() {
+ let text_proto = r#"
+files {
+ version: 0
+ container: "system"
+ package_map: "./tests/rw.package.map"
+ flag_map: "./tests/rw.flag.map"
+ flag_val: "./tests/rw.flag.val"
+ timestamp: 12345
+}
+"#;
+ let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap();
+ let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap();
+ let file_full_path = file.path().display().to_string();
+
+ let error = map_container_storage_files(&file_full_path, "system").unwrap_err();
+ assert_eq!(
+ format!("{:?}", error),
+ "Cannot mmap file ./tests/rw.package.map as it is not read only"
+ );
+
+ let text_proto = r#"
+files {
+ version: 0
+ container: "system"
+ package_map: "./tests/package.map"
+ flag_map: "./tests/rw.flag.map"
+ flag_val: "./tests/rw.flag.val"
+ timestamp: 12345
+}
+"#;
+ let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap();
+ let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap();
+ let file_full_path = file.path().display().to_string();
+
+ let error = map_container_storage_files(&file_full_path, "system").unwrap_err();
+ assert_eq!(
+ format!("{:?}", error),
+ "Cannot mmap file ./tests/rw.flag.map as it is not read only"
+ );
+
+ let text_proto = r#"
+files {
+ version: 0
+ container: "system"
+ package_map: "./tests/package.map"
+ flag_map: "./tests/flag.map"
+ flag_val: "./tests/rw.flag.val"
+ timestamp: 12345
+}
+"#;
+ let binary_proto_bytes = get_binary_storage_proto_bytes(text_proto).unwrap();
+ let file = write_bytes_to_temp_file(&binary_proto_bytes).unwrap();
+ let file_full_path = file.path().display().to_string();
+
+ let error = map_container_storage_files(&file_full_path, "system").unwrap_err();
+ assert_eq!(
+ format!("{:?}", error),
+ "Cannot mmap file ./tests/rw.flag.val as it is not read only"
+ );
+ }
+}
diff --git a/tools/aconfig/aconfig_storage_file/src/test_utils.rs b/tools/aconfig/aconfig_storage_file/src/test_utils.rs
index e1fb6c7..c468683 100644
--- a/tools/aconfig/aconfig_storage_file/src/test_utils.rs
+++ b/tools/aconfig/aconfig_storage_file/src/test_utils.rs
@@ -14,9 +14,11 @@
* limitations under the License.
*/
+use crate::protos::ProtoStorageFiles;
use anyhow::Result;
use protobuf::Message;
-use crate::protos::ProtoStorageFiles;
+use std::io::Write;
+use tempfile::NamedTempFile;
pub fn get_binary_storage_proto_bytes(text_proto: &str) -> Result<Vec<u8>> {
let storage_files: ProtoStorageFiles = protobuf::text_format::parse_from_str(text_proto)?;
@@ -24,3 +26,9 @@
storage_files.write_to_vec(&mut binary_proto)?;
Ok(binary_proto)
}
+
+pub fn write_bytes_to_temp_file(bytes: &[u8]) -> Result<NamedTempFile> {
+ let mut file = NamedTempFile::new()?;
+ let _ = file.write_all(&bytes);
+ Ok(file)
+}
diff --git a/tools/aconfig/aconfig_storage_file/tests/flag.map b/tools/aconfig/aconfig_storage_file/tests/flag.map
new file mode 100644
index 0000000..43b6f9a
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/flag.map
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/flag.val b/tools/aconfig/aconfig_storage_file/tests/flag.val
new file mode 100644
index 0000000..f39f8d3
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/flag.val
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/package.map b/tools/aconfig/aconfig_storage_file/tests/package.map
new file mode 100644
index 0000000..8ed4767
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/package.map
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/rw.flag.map b/tools/aconfig/aconfig_storage_file/tests/rw.flag.map
new file mode 100644
index 0000000..43b6f9a
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/rw.flag.map
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/rw.flag.val b/tools/aconfig/aconfig_storage_file/tests/rw.flag.val
new file mode 100644
index 0000000..f39f8d3
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/rw.flag.val
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_file/tests/rw.package.map b/tools/aconfig/aconfig_storage_file/tests/rw.package.map
new file mode 100644
index 0000000..8ed4767
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_file/tests/rw.package.map
Binary files differ
diff --git a/tools/aconfig/printflags/src/main.rs b/tools/aconfig/printflags/src/main.rs
index 7fcde61..a0c9ee8 100644
--- a/tools/aconfig/printflags/src/main.rs
+++ b/tools/aconfig/printflags/src/main.rs
@@ -17,7 +17,7 @@
//! `printflags` is a device binary to print feature flags.
use aconfig_protos::ProtoFlagState as State;
-use aconfig_protos::ProtoParsedFlags as ProtoParsedFlags;
+use aconfig_protos::ProtoParsedFlags;
use anyhow::{bail, Context, Result};
use regex::Regex;
use std::collections::BTreeMap;
diff --git a/tools/perf/benchmarks b/tools/perf/benchmarks
index acc53bb..ad34586 100755
--- a/tools/perf/benchmarks
+++ b/tools/perf/benchmarks
@@ -29,6 +29,7 @@
import subprocess
import time
import uuid
+from typing import Optional
import pretty
import utils
@@ -80,6 +81,33 @@
undo: callable
"Function to revert the source tree to its previous condition in the most minimal way possible."
+_DUMPVARS_VARS=[
+ "COMMON_LUNCH_CHOICES",
+ "HOST_PREBUILT_TAG",
+ "print",
+ "PRODUCT_OUT",
+ "report_config",
+ "TARGET_ARCH",
+ "TARGET_BUILD_VARIANT",
+ "TARGET_DEVICE",
+ "TARGET_PRODUCT",
+]
+
+_DUMPVARS_ABS_VARS =[
+ "ANDROID_CLANG_PREBUILTS",
+ "ANDROID_JAVA_HOME",
+ "ANDROID_JAVA_TOOLCHAIN",
+ "ANDROID_PREBUILTS",
+ "HOST_OUT",
+ "HOST_OUT_EXECUTABLES",
+ "HOST_OUT_TESTCASES",
+ "OUT_DIR",
+ "print",
+ "PRODUCT_OUT",
+ "SOONG_HOST_OUT",
+ "SOONG_HOST_OUT_EXECUTABLES",
+ "TARGET_OUT_TESTCASES",
+]
@dataclasses.dataclass(frozen=True)
class Benchmark:
@@ -94,15 +122,47 @@
change: Change
"Source tree modification for the benchmark that will be measured"
- modules: list[str]
+ dumpvars: Optional[bool] = False
+ "If specified, soong will run in dumpvars mode rather than build-mode."
+
+ modules: Optional[list[str]] = None
"Build modules to build on soong command line"
- preroll: int
+ preroll: Optional[int] = 0
"Number of times to run the build command to stabilize"
- postroll: int
+ postroll: Optional[int] = 3
"Number of times to run the build command after reverting the action to stabilize"
+ def build_description(self):
+ "Short description of the benchmark's Soong invocation."
+ if self.dumpvars:
+ return "dumpvars"
+ elif self.modules:
+ return " ".join(self.modules)
+ return ""
+
+
+ def soong_command(self, root):
+ "Command line args to soong_ui for this benchmark."
+ if self.dumpvars:
+ return [
+ "--dumpvars-mode",
+ f"--vars=\"{' '.join(_DUMPVARS_VARS)}\"",
+ f"--abs-vars=\"{' '.join(_DUMPVARS_ABS_VARS)}\"",
+ "--var-prefix=var_cache_",
+ "--abs-var-prefix=abs_var_cache_",
+ ]
+ elif self.modules:
+ return [
+ "--build-mode",
+ "--all-modules",
+ f"--dir={root}",
+ "--skip-metrics-upload",
+ ] + self.modules
+ else:
+ raise Exception("Benchmark must specify dumpvars or modules")
+
@dataclasses.dataclass(frozen=True)
class FileSnapshot:
@@ -131,8 +191,14 @@
"""Remove the out directory."""
def remove_out():
out_dir = utils.get_out_dir()
+ #only remove actual contents, in case out is a symlink (as is the case for cog)
if os.path.exists(out_dir):
- shutil.rmtree(out_dir)
+ for filename in os.listdir(out_dir):
+ p = os.path.join(out_dir, filename)
+ if os.path.isfile(p) or os.path.islink(p):
+ os.remove(p)
+ elif os.path.isdir(p):
+ shutil.rmtree(p)
return Change(label="Remove out", change=remove_out, undo=lambda: None)
@@ -236,6 +302,7 @@
"id": self.benchmark.id,
"title": self.benchmark.title,
"modules": self.benchmark.modules,
+ "dumpvars": self.benchmark.dumpvars,
"change": self.benchmark.change.label,
"iteration": self.iteration,
"log_dir": self.log_dir,
@@ -284,7 +351,7 @@
# Preroll builds
for i in range(benchmark.preroll):
- ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"pre_{i}"), benchmark.modules)
+ ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"pre_{i}"), benchmark)
report.preroll_duration_ns.append(ns)
sys.stderr.write(f"PERFORMING CHANGE: {benchmark.change.label}\n")
@@ -293,18 +360,19 @@
try:
# Measured build
- ns = self._run_build(lunch, benchmark_log_dir.joinpath("measured"), benchmark.modules)
+ ns = self._run_build(lunch, benchmark_log_dir.joinpath("measured"), benchmark)
report.duration_ns = ns
dist_one = self._options.DistOne()
if dist_one:
# If we're disting just one benchmark, save the logs and we can stop here.
- self._dist(utils.get_dist_dir())
+ self._dist(utils.get_dist_dir(), benchmark.dumpvars)
else:
+ self._dist(benchmark_log_dir, benchmark.dumpvars, store_metrics_only=True)
# Postroll builds
- for i in range(benchmark.preroll):
+ for i in range(benchmark.postroll):
ns = self._run_build(lunch, benchmark_log_dir.joinpath(f"post_{i}"),
- benchmark.modules)
+ benchmark)
report.postroll_duration_ns.append(ns)
finally:
@@ -323,21 +391,17 @@
path += ("/%0" + str(len(str(self._options.Iterations()))) + "d") % iteration
return path
- def _run_build(self, lunch, build_log_dir, modules):
+ def _run_build(self, lunch, build_log_dir, benchmark):
"""Builds the modules. Saves interesting log files to log_dir. Raises FatalError
if the build fails.
"""
- sys.stderr.write(f"STARTING BUILD {modules}\n")
+ sys.stderr.write(f"STARTING BUILD {benchmark.build_description()}\n")
before_ns = time.perf_counter_ns()
if not self._options.DryRun():
cmd = [
"build/soong/soong_ui.bash",
- "--build-mode",
- "--all-modules",
- f"--dir={self._options.root}",
- "--skip-metrics-upload",
- ] + modules
+ ] + benchmark.soong_command(self._options.root)
env = dict(os.environ)
env["TARGET_PRODUCT"] = lunch.target_product
env["TARGET_RELEASE"] = lunch.target_release
@@ -351,20 +415,25 @@
# TODO: Copy some log files.
- sys.stderr.write(f"FINISHED BUILD {modules}\n")
+ sys.stderr.write(f"FINISHED BUILD {benchmark.build_description()}\n")
return after_ns - before_ns
- def _dist(self, dist_dir):
+ def _dist(self, dist_dir, dumpvars, store_metrics_only=False):
out_dir = utils.get_out_dir()
dest_dir = dist_dir.joinpath("logs")
os.makedirs(dest_dir, exist_ok=True)
basenames = [
- "build.trace.gz",
- "soong.log",
"soong_build_metrics.pb",
"soong_metrics",
]
+ if not store_metrics_only:
+ basenames.extend([
+ "build.trace.gz",
+ "soong.log",
+ ])
+ if dumpvars:
+ basenames = ['dumpvars-'+b for b in basenames]
for base in basenames:
src = out_dir.joinpath(base)
if src.exists():
@@ -387,7 +456,7 @@
def benchmark_table(benchmarks):
rows = [("ID", "DESCRIPTION", "REBUILD"),]
- rows += [(benchmark.id, benchmark.title, " ".join(benchmark.modules)) for benchmark in
+ rows += [(benchmark.id, benchmark.title, benchmark.build_description()) for benchmark in
benchmarks]
return rows
@@ -571,6 +640,22 @@
"""Initialize the list of benchmarks."""
# Assumes that we've already chdired to the root of the tree.
self._benchmarks = [
+ Benchmark(
+ id="full_lunch",
+ title="Lunch from clean out",
+ change=Clean(),
+ dumpvars=True,
+ preroll=0,
+ postroll=0,
+ ),
+ Benchmark(
+ id="noop_lunch",
+ title="Lunch with no change",
+ change=NoChange(),
+ dumpvars=True,
+ preroll=1,
+ postroll=0,
+ ),
Benchmark(id="full",
title="Full build",
change=Clean(),
diff --git a/tools/perf/utils.py b/tools/perf/utils.py
index 934130d..0e66d4c 100644
--- a/tools/perf/utils.py
+++ b/tools/perf/utils.py
@@ -19,9 +19,11 @@
def get_root():
top_dir = os.environ.get("ANDROID_BUILD_TOP")
- if top_dir:
- return pathlib.Path(top_dir).resolve()
d = pathlib.Path.cwd()
+ # with cog, someone may have a new workspace and new source tree top, but
+ # not run lunch yet, resulting in a misleading ANDROID_BUILD_TOP value
+ if top_dir and d.is_relative_to(top_dir):
+ return pathlib.Path(top_dir).resolve()
while True:
if d.joinpath("build", "soong", "soong_ui.bash").exists():
return d.resolve().absolute()