Merge "Update benchmarks to handle cog workflows" into main
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index 6ae8b0d..18d955c 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -163,6 +163,7 @@
$(call add_soong_config_var_value,ANDROID,release_avf_enable_multi_tenant_microdroid_vm,$(RELEASE_AVF_ENABLE_MULTI_TENANT_MICRODROID_VM))
$(call add_soong_config_var_value,ANDROID,release_avf_enable_remote_attestation,$(RELEASE_AVF_ENABLE_REMOTE_ATTESTATION))
$(call add_soong_config_var_value,ANDROID,release_avf_enable_vendor_modules,$(RELEASE_AVF_ENABLE_VENDOR_MODULES))
+$(call add_soong_config_var_value,ANDROID,release_avf_enable_virt_cpufreq,$(RELEASE_AVF_ENABLE_VIRT_CPUFREQ))
$(call add_soong_config_var_value,ANDROID,release_binder_death_recipient_weak_from_jni,$(RELEASE_BINDER_DEATH_RECIPIENT_WEAK_FROM_JNI))
diff --git a/core/instrumentation_test_config_template.xml b/core/instrumentation_test_config_template.xml
index 9dfc001..379126c 100644
--- a/core/instrumentation_test_config_template.xml
+++ b/core/instrumentation_test_config_template.xml
@@ -24,7 +24,7 @@
</target_preparer>
<test class="com.android.tradefed.testtype.{TEST_TYPE}" >
- <option name="package" value="{PACKAGE}" />
+ {EXTRA_TEST_RUNNER_CONFIGS}<option name="package" value="{PACKAGE}" />
<option name="runner" value="{RUNNER}" />
</test>
</configuration>
diff --git a/core/packaging/flags.mk b/core/packaging/flags.mk
index 6fc1e4c..62ef3df 100644
--- a/core/packaging/flags.mk
+++ b/core/packaging/flags.mk
@@ -97,6 +97,20 @@
)) \
)
+# Collect the on-device flags into a single file, similar to all_aconfig_declarations.
+required_aconfig_flags_files := \
+ $(sort $(foreach partition, $(filter $(IMAGES_TO_BUILD), $(_FLAG_PARTITIONS)), \
+ $(aconfig_flag_summaries_protobuf.$(partition)) \
+ ))
+
+.PHONY: device_aconfig_declarations
+device_aconfig_declarations: $(PRODUCT_OUT)/device_aconfig_declarations.pb
+$(eval $(call generate-partition-aconfig-flag-file, \
+ $(TARGET_OUT_FLAGS)/device_aconfig_declarations.pb, \
+ $(PRODUCT_OUT)/device_aconfig_declarations.pb, \
+ $(sort $(required_aconfig_flags_files)) \
+)) \
+
# Create a set of storage file for each partition
# $(1): built aconfig flags storage package map file (out)
# $(2): built aconfig flags storage flag map file (out)
@@ -178,6 +192,7 @@
# Clean up
required_flags_files:=
+required_aconfig_flags_files:=
$(foreach partition, $(_FLAG_PARTITIONS), \
$(eval build_flag_summaries.$(partition):=) \
$(eval aconfig_flag_summaries_protobuf.$(partition):=) \
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 0d88046..7d2b3ba 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -441,6 +441,7 @@
logpersist.start \
logtagd.rc \
ot-cli-ftd \
+ ot-ctl \
procrank \
profcollectd \
profcollectctl \
diff --git a/target/product/virtual_ab_ota/android_t_baseline.mk b/target/product/virtual_ab_ota/android_t_baseline.mk
index f862485..af0f7a9 100644
--- a/target/product/virtual_ab_ota/android_t_baseline.mk
+++ b/target/product/virtual_ab_ota/android_t_baseline.mk
@@ -21,4 +21,4 @@
# All U+ launching devices should instead use vabc_features.mk.
$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota/vabc_features.mk)
-PRODUCT_VIRTUAL_AB_COW_VERSION := 2
+PRODUCT_VIRTUAL_AB_COW_VERSION ?= 2
diff --git a/tools/aconfig/aconfig/src/storage/flag_table.rs b/tools/aconfig/aconfig/src/storage/flag_table.rs
index 70878a8..1381e89 100644
--- a/tools/aconfig/aconfig/src/storage/flag_table.rs
+++ b/tools/aconfig/aconfig/src/storage/flag_table.rs
@@ -17,7 +17,7 @@
use crate::commands::assign_flag_ids;
use crate::storage::FlagPackage;
use aconfig_storage_file::{
- get_bucket_index, get_table_size, FlagTable, FlagTableHeader, FlagTableNode, FILE_VERSION,
+ get_table_size, FlagTable, FlagTableHeader, FlagTableNode, FILE_VERSION,
};
use anyhow::{anyhow, Result};
@@ -32,41 +32,52 @@
}
}
-fn new_node(
- package_id: u32,
- flag_name: &str,
- flag_type: u16,
- flag_id: u16,
- num_buckets: u32,
-) -> FlagTableNode {
- let full_flag_name = package_id.to_string() + "/" + flag_name;
- let bucket_index = get_bucket_index(&full_flag_name, num_buckets);
- FlagTableNode {
- package_id,
- flag_name: flag_name.to_string(),
- flag_type,
- flag_id,
- next_offset: None,
- bucket_index,
- }
+// a struct that contains FlagTableNode and a bunch of other information to help
+// flag table creation
+#[derive(PartialEq, Debug, Clone)]
+struct FlagTableNodeWrapper {
+ pub node: FlagTableNode,
+ pub bucket_index: u32,
}
-fn create_nodes(package: &FlagPackage, num_buckets: u32) -> Result<Vec<FlagTableNode>> {
- let flag_ids = assign_flag_ids(package.package_name, package.boolean_flags.iter().copied())?;
- package
- .boolean_flags
- .iter()
- .map(|&pf| {
- let fid = flag_ids
- .get(pf.name())
- .ok_or(anyhow!(format!("missing flag id for {}", pf.name())))?;
- // all flags are boolean value at the moment, thus using the last bit. When more
- // flag value types are supported, flag value type information should come from the
- // parsed flag, and we will set the flag_type bit mask properly.
- let flag_type = 1;
- Ok(new_node(package.package_id, pf.name(), flag_type, *fid, num_buckets))
- })
- .collect::<Result<Vec<_>>>()
+impl FlagTableNodeWrapper {
+ fn new(
+ package_id: u32,
+ flag_name: &str,
+ flag_type: u16,
+ flag_id: u16,
+ num_buckets: u32,
+ ) -> Self {
+ let bucket_index = FlagTableNode::find_bucket_index(package_id, flag_name, num_buckets);
+ let node = FlagTableNode {
+ package_id,
+ flag_name: flag_name.to_string(),
+ flag_type,
+ flag_id,
+ next_offset: None,
+ };
+ Self { node, bucket_index }
+ }
+
+ fn create_nodes(package: &FlagPackage, num_buckets: u32) -> Result<Vec<Self>> {
+ let flag_ids =
+ assign_flag_ids(package.package_name, package.boolean_flags.iter().copied())?;
+ package
+ .boolean_flags
+ .iter()
+ .map(|&pf| {
+ let fid = flag_ids
+ .get(pf.name())
+ .ok_or(anyhow!(format!("missing flag id for {}", pf.name())))?;
+ // all flags are boolean value at the moment, thus using the last bit.
+ // When more flag value types are supported, flag value type information
+ // should come from the parsed flag, and we will set the flag_type bit
+ // mask properly.
+ let flag_type = 1;
+ Ok(Self::new(package.package_id, pf.name(), flag_type, *fid, num_buckets))
+ })
+ .collect::<Result<Vec<_>>>()
+ }
}
pub fn create_flag_table(container: &str, packages: &[FlagPackage]) -> Result<FlagTable> {
@@ -74,44 +85,48 @@
let num_flags = packages.iter().map(|pkg| pkg.boolean_flags.len() as u32).sum();
let num_buckets = get_table_size(num_flags)?;
- let mut table = FlagTable {
- header: new_header(container, num_flags),
- buckets: vec![None; num_buckets as usize],
- nodes: packages
- .iter()
- .map(|pkg| create_nodes(pkg, num_buckets))
- .collect::<Result<Vec<_>>>()?
- .concat(),
- };
+ let mut header = new_header(container, num_flags);
+ let mut buckets = vec![None; num_buckets as usize];
+ let mut node_wrappers = packages
+ .iter()
+ .map(|pkg| FlagTableNodeWrapper::create_nodes(pkg, num_buckets))
+ .collect::<Result<Vec<_>>>()?
+ .concat();
// initialize all header fields
- table.header.bucket_offset = table.header.as_bytes().len() as u32;
- table.header.node_offset = table.header.bucket_offset + num_buckets * 4;
- table.header.file_size = table.header.node_offset
- + table.nodes.iter().map(|x| x.as_bytes().len()).sum::<usize>() as u32;
+ header.bucket_offset = header.as_bytes().len() as u32;
+ header.node_offset = header.bucket_offset + num_buckets * 4;
+ header.file_size = header.node_offset
+ + node_wrappers.iter().map(|x| x.node.as_bytes().len()).sum::<usize>() as u32;
// sort nodes by bucket index for efficiency
- table.nodes.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index));
+ node_wrappers.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index));
// fill all node offset
- let mut offset = table.header.node_offset;
- for i in 0..table.nodes.len() {
- let node_bucket_idx = table.nodes[i].bucket_index;
- let next_node_bucket_idx =
- if i + 1 < table.nodes.len() { Some(table.nodes[i + 1].bucket_index) } else { None };
+ let mut offset = header.node_offset;
+ for i in 0..node_wrappers.len() {
+ let node_bucket_idx = node_wrappers[i].bucket_index;
+ let next_node_bucket_idx = if i + 1 < node_wrappers.len() {
+ Some(node_wrappers[i + 1].bucket_index)
+ } else {
+ None
+ };
- if table.buckets[node_bucket_idx as usize].is_none() {
- table.buckets[node_bucket_idx as usize] = Some(offset);
+ if buckets[node_bucket_idx as usize].is_none() {
+ buckets[node_bucket_idx as usize] = Some(offset);
}
- offset += table.nodes[i].as_bytes().len() as u32;
+ offset += node_wrappers[i].node.as_bytes().len() as u32;
if let Some(index) = next_node_bucket_idx {
if index == node_bucket_idx {
- table.nodes[i].next_offset = Some(offset);
+ node_wrappers[i].node.next_offset = Some(offset);
}
}
}
+ let table =
+ FlagTable { header, buckets, nodes: node_wrappers.into_iter().map(|nw| nw.node).collect() };
+
Ok(table)
}
@@ -127,7 +142,6 @@
flag_type: u16,
flag_id: u16,
next_offset: Option<u32>,
- bucket_index: u32,
) -> FlagTableNode {
FlagTableNode {
package_id,
@@ -135,7 +149,6 @@
flag_type,
flag_id,
next_offset,
- bucket_index,
}
}
@@ -187,13 +200,13 @@
let nodes: &Vec<FlagTableNode> = &flag_table.as_ref().unwrap().nodes;
assert_eq!(nodes.len(), 8);
- assert_eq!(nodes[0], new_expected_node(0, "enabled_ro", 1, 1, None, 0));
- assert_eq!(nodes[1], new_expected_node(0, "enabled_rw", 1, 2, Some(150), 1));
- assert_eq!(nodes[2], new_expected_node(1, "disabled_ro", 1, 0, None, 1));
- assert_eq!(nodes[3], new_expected_node(2, "enabled_ro", 1, 1, None, 5));
- assert_eq!(nodes[4], new_expected_node(1, "enabled_fixed_ro", 1, 1, Some(235), 7));
- assert_eq!(nodes[5], new_expected_node(1, "enabled_ro", 1, 2, None, 7));
- assert_eq!(nodes[6], new_expected_node(2, "enabled_fixed_ro", 1, 0, None, 9));
- assert_eq!(nodes[7], new_expected_node(0, "disabled_rw", 1, 0, None, 15));
+ assert_eq!(nodes[0], new_expected_node(0, "enabled_ro", 1, 1, None));
+ assert_eq!(nodes[1], new_expected_node(0, "enabled_rw", 1, 2, Some(150)));
+ assert_eq!(nodes[2], new_expected_node(1, "disabled_ro", 1, 0, None));
+ assert_eq!(nodes[3], new_expected_node(2, "enabled_ro", 1, 1, None));
+ assert_eq!(nodes[4], new_expected_node(1, "enabled_fixed_ro", 1, 1, Some(235)));
+ assert_eq!(nodes[5], new_expected_node(1, "enabled_ro", 1, 2, None));
+ assert_eq!(nodes[6], new_expected_node(2, "enabled_fixed_ro", 1, 0, None));
+ assert_eq!(nodes[7], new_expected_node(0, "disabled_rw", 1, 0, None));
}
}
diff --git a/tools/aconfig/aconfig/src/storage/package_table.rs b/tools/aconfig/aconfig/src/storage/package_table.rs
index f82e932..4c08129 100644
--- a/tools/aconfig/aconfig/src/storage/package_table.rs
+++ b/tools/aconfig/aconfig/src/storage/package_table.rs
@@ -17,8 +17,7 @@
use anyhow::Result;
use aconfig_storage_file::{
- get_bucket_index, get_table_size, PackageTable, PackageTableHeader, PackageTableNode,
- FILE_VERSION,
+ get_table_size, PackageTable, PackageTableHeader, PackageTableNode, FILE_VERSION,
};
use crate::storage::FlagPackage;
@@ -34,14 +33,24 @@
}
}
-fn new_node(package: &FlagPackage, num_buckets: u32) -> PackageTableNode {
- let bucket_index = get_bucket_index(&package.package_name.to_string(), num_buckets);
- PackageTableNode {
- package_name: String::from(package.package_name),
- package_id: package.package_id,
- boolean_offset: package.boolean_offset,
- next_offset: None,
- bucket_index,
+// a struct that contains PackageTableNode and a bunch of other information to help
+// package table creation
+#[derive(PartialEq, Debug)]
+struct PackageTableNodeWrapper {
+ pub node: PackageTableNode,
+ pub bucket_index: u32,
+}
+
+impl PackageTableNodeWrapper {
+ fn new(package: &FlagPackage, num_buckets: u32) -> Self {
+ let node = PackageTableNode {
+ package_name: String::from(package.package_name),
+ package_id: package.package_id,
+ boolean_offset: package.boolean_offset,
+ next_offset: None,
+ };
+ let bucket_index = PackageTableNode::find_bucket_index(package.package_name, num_buckets);
+ Self { node, bucket_index }
}
}
@@ -49,40 +58,47 @@
// create table
let num_packages = packages.len() as u32;
let num_buckets = get_table_size(num_packages)?;
- let mut table = PackageTable {
- header: new_header(container, num_packages),
- buckets: vec![None; num_buckets as usize],
- nodes: packages.iter().map(|pkg| new_node(pkg, num_buckets)).collect(),
- };
+ let mut header = new_header(container, num_packages);
+ let mut buckets = vec![None; num_buckets as usize];
+ let mut node_wrappers: Vec<_> =
+ packages.iter().map(|pkg| PackageTableNodeWrapper::new(pkg, num_buckets)).collect();
// initialize all header fields
- table.header.bucket_offset = table.header.as_bytes().len() as u32;
- table.header.node_offset = table.header.bucket_offset + num_buckets * 4;
- table.header.file_size = table.header.node_offset
- + table.nodes.iter().map(|x| x.as_bytes().len()).sum::<usize>() as u32;
+ header.bucket_offset = header.as_bytes().len() as u32;
+ header.node_offset = header.bucket_offset + num_buckets * 4;
+ header.file_size = header.node_offset
+ + node_wrappers.iter().map(|x| x.node.as_bytes().len()).sum::<usize>() as u32;
- // sort nodes by bucket index for efficiency
- table.nodes.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index));
+ // sort node_wrappers by bucket index for efficiency
+ node_wrappers.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index));
// fill all node offset
- let mut offset = table.header.node_offset;
- for i in 0..table.nodes.len() {
- let node_bucket_idx = table.nodes[i].bucket_index;
- let next_node_bucket_idx =
- if i + 1 < table.nodes.len() { Some(table.nodes[i + 1].bucket_index) } else { None };
+ let mut offset = header.node_offset;
+ for i in 0..node_wrappers.len() {
+ let node_bucket_idx = node_wrappers[i].bucket_index;
+ let next_node_bucket_idx = if i + 1 < node_wrappers.len() {
+ Some(node_wrappers[i + 1].bucket_index)
+ } else {
+ None
+ };
- if table.buckets[node_bucket_idx as usize].is_none() {
- table.buckets[node_bucket_idx as usize] = Some(offset);
+ if buckets[node_bucket_idx as usize].is_none() {
+ buckets[node_bucket_idx as usize] = Some(offset);
}
- offset += table.nodes[i].as_bytes().len() as u32;
+ offset += node_wrappers[i].node.as_bytes().len() as u32;
if let Some(index) = next_node_bucket_idx {
if index == node_bucket_idx {
- table.nodes[i].next_offset = Some(offset);
+ node_wrappers[i].node.next_offset = Some(offset);
}
}
}
+ let table = PackageTable {
+ header,
+ buckets,
+ nodes: node_wrappers.into_iter().map(|nw| nw.node).collect(),
+ };
Ok(table)
}
@@ -125,7 +141,6 @@
package_id: 1,
boolean_offset: 3,
next_offset: None,
- bucket_index: 0,
};
assert_eq!(nodes[0], first_node_expected);
let second_node_expected = PackageTableNode {
@@ -133,7 +148,6 @@
package_id: 0,
boolean_offset: 0,
next_offset: Some(158),
- bucket_index: 3,
};
assert_eq!(nodes[1], second_node_expected);
let third_node_expected = PackageTableNode {
@@ -141,7 +155,6 @@
package_id: 2,
boolean_offset: 6,
next_offset: None,
- bucket_index: 3,
};
assert_eq!(nodes[2], third_node_expected);
}
diff --git a/tools/aconfig/aconfig_protos/src/lib.rs b/tools/aconfig/aconfig_protos/src/lib.rs
index ef16e06..8f5667f 100644
--- a/tools/aconfig/aconfig_protos/src/lib.rs
+++ b/tools/aconfig/aconfig_protos/src/lib.rs
@@ -150,10 +150,7 @@
ensure_required_fields!("flag declarations", pdf, "package");
// TODO(b/312769710): Make the container field required.
- ensure!(
- is_valid_package_ident(pdf.package()),
- "bad flag declarations: bad package"
- );
+ ensure!(is_valid_package_ident(pdf.package()), "bad flag declarations: bad package");
ensure!(
!pdf.has_container() || is_valid_container_ident(pdf.container()),
"bad flag declarations: bad container"
@@ -898,10 +895,7 @@
"#;
let parsed_flags = try_from_binary_proto_from_text_proto(text_proto).unwrap();
let parsed_flag = &parsed_flags.parsed_flag[0];
- assert_eq!(
- crate::parsed_flag::path_to_declaration(parsed_flag),
- "flags.declarations"
- );
+ assert_eq!(crate::parsed_flag::path_to_declaration(parsed_flag), "flags.declarations");
}
#[test]
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_table.rs b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
index dab752a..dfbd9de 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
@@ -17,8 +17,8 @@
//! flag table module defines the flag table file format and methods for serialization
//! and deserialization
-use crate::{read_str_from_bytes, read_u16_from_bytes, read_u32_from_bytes};
-use anyhow::Result;
+use crate::{get_bucket_index, read_str_from_bytes, read_u16_from_bytes, read_u32_from_bytes};
+use anyhow::{anyhow, Result};
/// Flag table header struct
#[derive(PartialEq, Debug)]
@@ -68,7 +68,6 @@
pub flag_type: u16,
pub flag_id: u16,
pub next_offset: Option<u32>,
- pub bucket_index: u32,
}
impl FlagTableNode {
@@ -86,9 +85,9 @@
}
/// Deserialize from bytes
- pub fn from_bytes(bytes: &[u8], num_buckets: u32) -> Result<Self> {
+ pub fn from_bytes(bytes: &[u8]) -> Result<Self> {
let mut head = 0;
- let mut node = Self {
+ let node = Self {
package_id: read_u32_from_bytes(bytes, &mut head)?,
flag_name: read_str_from_bytes(bytes, &mut head)?,
flag_type: read_u16_from_bytes(bytes, &mut head)?,
@@ -97,12 +96,15 @@
0 => None,
val => Some(val),
},
- bucket_index: 0,
};
- let full_flag_name = node.package_id.to_string() + "/" + &node.flag_name;
- node.bucket_index = crate::get_bucket_index(&full_flag_name, num_buckets);
Ok(node)
}
+
+ /// Calculate node bucket index
+ pub fn find_bucket_index(package_id: u32, flag_name: &str, num_buckets: u32) -> u32 {
+ let full_flag_name = package_id.to_string() + "/" + flag_name;
+ get_bucket_index(&full_flag_name, num_buckets)
+ }
}
#[derive(PartialEq, Debug)]
@@ -138,17 +140,51 @@
.collect();
let nodes = (0..num_flags)
.map(|_| {
- let node = FlagTableNode::from_bytes(&bytes[head..], num_buckets).unwrap();
+ let node = FlagTableNode::from_bytes(&bytes[head..])?;
head += node.as_bytes().len();
- node
+ Ok(node)
})
- .collect();
+ .collect::<Result<Vec<_>>>()?;
let table = Self { header, buckets, nodes };
Ok(table)
}
}
+/// Query flag within package offset
+pub fn find_flag_offset(buf: &[u8], package_id: u32, flag: &str) -> Result<Option<u16>> {
+ let interpreted_header = FlagTableHeader::from_bytes(buf)?;
+ if interpreted_header.version > crate::FILE_VERSION {
+ return Err(anyhow!(
+ "Cannot read storage file with a higher version of {} with lib version {}",
+ interpreted_header.version,
+ crate::FILE_VERSION
+ ));
+ }
+
+ let num_buckets = (interpreted_header.node_offset - interpreted_header.bucket_offset) / 4;
+ let bucket_index = FlagTableNode::find_bucket_index(package_id, flag, num_buckets);
+
+ let mut pos = (interpreted_header.bucket_offset + 4 * bucket_index) as usize;
+ let mut flag_node_offset = read_u32_from_bytes(buf, &mut pos)? as usize;
+ if flag_node_offset < interpreted_header.node_offset as usize
+ || flag_node_offset >= interpreted_header.file_size as usize
+ {
+ return Ok(None);
+ }
+
+ loop {
+ let interpreted_node = FlagTableNode::from_bytes(&buf[flag_node_offset..])?;
+ if interpreted_node.package_id == package_id && interpreted_node.flag_name == flag {
+ return Ok(Some(interpreted_node.flag_id));
+ }
+ match interpreted_node.next_offset {
+ Some(offset) => flag_node_offset = offset as usize,
+ None => return Ok(None),
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -161,16 +197,8 @@
flag_type: u16,
flag_id: u16,
next_offset: Option<u32>,
- bucket_index: u32,
) -> Self {
- Self {
- package_id,
- flag_name: flag_name.to_string(),
- flag_type,
- flag_id,
- next_offset,
- bucket_index,
- }
+ Self { package_id, flag_name: flag_name.to_string(), flag_type, flag_id, next_offset }
}
}
@@ -203,14 +231,14 @@
None,
];
let nodes = vec![
- FlagTableNode::new_expected(0, "enabled_ro", 1, 1, None, 0),
- FlagTableNode::new_expected(0, "enabled_rw", 1, 2, Some(150), 1),
- FlagTableNode::new_expected(1, "disabled_ro", 1, 0, None, 1),
- FlagTableNode::new_expected(2, "enabled_ro", 1, 1, None, 5),
- FlagTableNode::new_expected(1, "enabled_fixed_ro", 1, 1, Some(235), 7),
- FlagTableNode::new_expected(1, "enabled_ro", 1, 2, None, 7),
- FlagTableNode::new_expected(2, "enabled_fixed_ro", 1, 0, None, 9),
- FlagTableNode::new_expected(0, "disabled_rw", 1, 0, None, 15),
+ FlagTableNode::new_expected(0, "enabled_ro", 1, 1, None),
+ FlagTableNode::new_expected(0, "enabled_rw", 1, 2, Some(150)),
+ FlagTableNode::new_expected(1, "disabled_ro", 1, 0, None),
+ FlagTableNode::new_expected(2, "enabled_ro", 1, 1, None),
+ FlagTableNode::new_expected(1, "enabled_fixed_ro", 1, 1, Some(235)),
+ FlagTableNode::new_expected(1, "enabled_ro", 1, 2, None),
+ FlagTableNode::new_expected(2, "enabled_fixed_ro", 1, 0, None),
+ FlagTableNode::new_expected(0, "disabled_rw", 1, 0, None),
];
Ok(FlagTable { header, buckets, nodes })
}
@@ -226,15 +254,61 @@
assert_eq!(header, &reinterpreted_header.unwrap());
let nodes: &Vec<FlagTableNode> = &flag_table.nodes;
- let num_buckets = crate::get_table_size(header.num_flags).unwrap();
for node in nodes.iter() {
- let reinterpreted_node = FlagTableNode::from_bytes(&node.as_bytes(), num_buckets);
- assert!(reinterpreted_node.is_ok());
- assert_eq!(node, &reinterpreted_node.unwrap());
+ let reinterpreted_node = FlagTableNode::from_bytes(&node.as_bytes()).unwrap();
+ assert_eq!(node, &reinterpreted_node);
}
let reinterpreted_table = FlagTable::from_bytes(&flag_table.as_bytes());
assert!(reinterpreted_table.is_ok());
assert_eq!(&flag_table, &reinterpreted_table.unwrap());
}
+
+ #[test]
+ // this test point locks down table query
+ fn test_flag_query() {
+ let flag_table = create_test_flag_table().unwrap().as_bytes();
+ let baseline = vec![
+ (0, "enabled_ro", 1u16),
+ (0, "enabled_rw", 2u16),
+ (1, "disabled_ro", 0u16),
+ (2, "enabled_ro", 1u16),
+ (1, "enabled_fixed_ro", 1u16),
+ (1, "enabled_ro", 2u16),
+ (2, "enabled_fixed_ro", 0u16),
+ (0, "disabled_rw", 0u16),
+ ];
+ for (package_id, flag_name, expected_offset) in baseline.into_iter() {
+ let flag_offset =
+ find_flag_offset(&flag_table[..], package_id, flag_name).unwrap().unwrap();
+ assert_eq!(flag_offset, expected_offset);
+ }
+ }
+
+ #[test]
+ // this test point locks down table query of a non exist flag
+ fn test_not_existed_flag_query() {
+ let flag_table = create_test_flag_table().unwrap().as_bytes();
+ let flag_offset = find_flag_offset(&flag_table[..], 1, "disabled_fixed_ro").unwrap();
+ assert_eq!(flag_offset, None);
+ let flag_offset = find_flag_offset(&flag_table[..], 2, "disabled_rw").unwrap();
+ assert_eq!(flag_offset, None);
+ }
+
+ #[test]
+ // this test point locks down query error when file has a higher version
+ fn test_higher_version_storage_file() {
+ let mut table = create_test_flag_table().unwrap();
+ table.header.version = crate::FILE_VERSION + 1;
+ let flag_table = table.as_bytes();
+ let error = find_flag_offset(&flag_table[..], 0, "enabled_ro").unwrap_err();
+ assert_eq!(
+ format!("{:?}", error),
+ format!(
+ "Cannot read storage file with a higher version of {} with lib version {}",
+ crate::FILE_VERSION + 1,
+ crate::FILE_VERSION
+ )
+ );
+ }
}
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_value.rs b/tools/aconfig/aconfig_storage_file/src/flag_value.rs
index 86f75ce..bb8892d 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_value.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_value.rs
@@ -18,7 +18,7 @@
//! and deserialization
use crate::{read_str_from_bytes, read_u32_from_bytes, read_u8_from_bytes};
-use anyhow::Result;
+use anyhow::{anyhow, Result};
/// Flag value header struct
#[derive(PartialEq, Debug)]
@@ -86,6 +86,29 @@
}
}
+/// Query flag value
+pub fn get_boolean_flag_value(buf: &[u8], flag_offset: u32) -> Result<bool> {
+ let interpreted_header = FlagValueHeader::from_bytes(buf)?;
+ if interpreted_header.version > crate::FILE_VERSION {
+ return Err(anyhow!(
+ "Cannot read storage file with a higher version of {} with lib version {}",
+ interpreted_header.version,
+ crate::FILE_VERSION
+ ));
+ }
+
+ let mut head = (interpreted_header.boolean_value_offset + flag_offset) as usize;
+
+ // TODO: right now, there is only boolean flags, with more flag value types added
+ // later, the end of boolean flag value section should be updated (b/322826265).
+ if head >= interpreted_header.file_size as usize {
+ return Err(anyhow!("Flag value offset goes beyond the end of the file."));
+ }
+
+ let val = read_u8_from_bytes(buf, &mut head)?;
+ Ok(val == 1)
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -116,4 +139,40 @@
assert!(reinterpreted_value_list.is_ok());
assert_eq!(&flag_value_list, &reinterpreted_value_list.unwrap());
}
+
+ #[test]
+ // this test point locks down flag value query
+ fn test_flag_value_query() {
+ let flag_value_list = create_test_flag_value_list().unwrap().as_bytes();
+ let baseline: Vec<bool> = vec![false, true, false, false, true, true, false, true];
+ for (offset, expected_value) in baseline.into_iter().enumerate() {
+ let flag_value = get_boolean_flag_value(&flag_value_list[..], offset as u32).unwrap();
+ assert_eq!(flag_value, expected_value);
+ }
+ }
+
+ #[test]
+ // this test point locks down query beyond the end of boolean section
+ fn test_boolean_out_of_range() {
+ let flag_value_list = create_test_flag_value_list().unwrap().as_bytes();
+ let error = get_boolean_flag_value(&flag_value_list[..], 8).unwrap_err();
+ assert_eq!(format!("{:?}", error), "Flag value offset goes beyond the end of the file.");
+ }
+
+ #[test]
+ // this test point locks down query error when file has a higher version
+ fn test_higher_version_storage_file() {
+ let mut value_list = create_test_flag_value_list().unwrap();
+ value_list.header.version = crate::FILE_VERSION + 1;
+ let flag_value = value_list.as_bytes();
+ let error = get_boolean_flag_value(&flag_value[..], 4).unwrap_err();
+ assert_eq!(
+ format!("{:?}", error),
+ format!(
+ "Cannot read storage file with a higher version of {} with lib version {}",
+ crate::FILE_VERSION + 1,
+ crate::FILE_VERSION
+ )
+ );
+ }
}
diff --git a/tools/aconfig/aconfig_storage_file/src/package_table.rs b/tools/aconfig/aconfig_storage_file/src/package_table.rs
index 14eef70..a3ad6ec 100644
--- a/tools/aconfig/aconfig_storage_file/src/package_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/package_table.rs
@@ -17,8 +17,8 @@
//! package table module defines the package table file format and methods for serialization
//! and deserialization
-use crate::{read_str_from_bytes, read_u32_from_bytes};
-use anyhow::Result;
+use crate::{get_bucket_index, read_str_from_bytes, read_u32_from_bytes};
+use anyhow::{anyhow, Result};
/// Package table header struct
#[derive(PartialEq, Debug)]
@@ -69,7 +69,6 @@
// boolean flag value array in the flag value file
pub boolean_offset: u32,
pub next_offset: Option<u32>,
- pub bucket_index: u32,
}
impl PackageTableNode {
@@ -86,9 +85,9 @@
}
/// Deserialize from bytes
- pub fn from_bytes(bytes: &[u8], num_buckets: u32) -> Result<Self> {
+ pub fn from_bytes(bytes: &[u8]) -> Result<Self> {
let mut head = 0;
- let mut node = Self {
+ let node = Self {
package_name: read_str_from_bytes(bytes, &mut head)?,
package_id: read_u32_from_bytes(bytes, &mut head)?,
boolean_offset: read_u32_from_bytes(bytes, &mut head)?,
@@ -96,11 +95,16 @@
0 => None,
val => Some(val),
},
- bucket_index: 0,
};
- node.bucket_index = crate::get_bucket_index(&node.package_name, num_buckets);
Ok(node)
}
+
+ /// Get the bucket index for a package table node, defined it here so the
+ /// construction side (aconfig binary) and consumption side (flag read lib)
+ /// use the same method of hashing
+ pub fn find_bucket_index(package: &str, num_buckets: u32) -> u32 {
+ get_bucket_index(&package, num_buckets)
+ }
}
/// Package table struct
@@ -136,17 +140,61 @@
.collect();
let nodes = (0..num_packages)
.map(|_| {
- let node = PackageTableNode::from_bytes(&bytes[head..], num_buckets).unwrap();
+ let node = PackageTableNode::from_bytes(&bytes[head..])?;
head += node.as_bytes().len();
- node
+ Ok(node)
})
- .collect();
+ .collect::<Result<Vec<_>>>()?;
let table = Self { header, buckets, nodes };
Ok(table)
}
}
+/// Package table query return
+#[derive(PartialEq, Debug)]
+pub struct PackageOffset {
+ pub package_id: u32,
+ pub boolean_offset: u32,
+}
+
+/// Query package id and start offset
+pub fn find_package_offset(buf: &[u8], package: &str) -> Result<Option<PackageOffset>> {
+ let interpreted_header = PackageTableHeader::from_bytes(buf)?;
+ if interpreted_header.version > crate::FILE_VERSION {
+ return Err(anyhow!(
+ "Cannot read storage file with a higher version of {} with lib version {}",
+ interpreted_header.version,
+ crate::FILE_VERSION
+ ));
+ }
+
+ let num_buckets = (interpreted_header.node_offset - interpreted_header.bucket_offset) / 4;
+ let bucket_index = PackageTableNode::find_bucket_index(&package, num_buckets);
+
+ let mut pos = (interpreted_header.bucket_offset + 4 * bucket_index) as usize;
+ let mut package_node_offset = read_u32_from_bytes(buf, &mut pos)? as usize;
+ if package_node_offset < interpreted_header.node_offset as usize
+ || package_node_offset >= interpreted_header.file_size as usize
+ {
+ return Ok(None);
+ }
+
+ loop {
+ let interpreted_node = PackageTableNode::from_bytes(&buf[package_node_offset..])?;
+ if interpreted_node.package_name == package {
+ return Ok(Some(PackageOffset {
+ package_id: interpreted_node.package_id,
+ boolean_offset: interpreted_node.boolean_offset,
+ }));
+ }
+ match interpreted_node.next_offset {
+ Some(offset) => package_node_offset = offset as usize,
+ None => return Ok(None),
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -166,21 +214,18 @@
package_id: 1,
boolean_offset: 3,
next_offset: None,
- bucket_index: 0,
};
let second_node = PackageTableNode {
package_name: String::from("com.android.aconfig.storage.test_1"),
package_id: 0,
boolean_offset: 0,
next_offset: Some(158),
- bucket_index: 3,
};
let third_node = PackageTableNode {
package_name: String::from("com.android.aconfig.storage.test_4"),
package_id: 2,
boolean_offset: 6,
next_offset: None,
- bucket_index: 3,
};
let nodes = vec![first_node, second_node, third_node];
Ok(PackageTable { header, buckets, nodes })
@@ -197,15 +242,69 @@
assert_eq!(header, &reinterpreted_header.unwrap());
let nodes: &Vec<PackageTableNode> = &package_table.nodes;
- let num_buckets = crate::get_table_size(header.num_packages).unwrap();
for node in nodes.iter() {
- let reinterpreted_node = PackageTableNode::from_bytes(&node.as_bytes(), num_buckets);
- assert!(reinterpreted_node.is_ok());
- assert_eq!(node, &reinterpreted_node.unwrap());
+ let reinterpreted_node = PackageTableNode::from_bytes(&node.as_bytes()).unwrap();
+ assert_eq!(node, &reinterpreted_node);
}
let reinterpreted_table = PackageTable::from_bytes(&package_table.as_bytes());
assert!(reinterpreted_table.is_ok());
assert_eq!(&package_table, &reinterpreted_table.unwrap());
}
+
+ #[test]
+ // this test point locks down table query
+ fn test_package_query() {
+ let package_table = create_test_package_table().unwrap().as_bytes();
+ let package_offset =
+ find_package_offset(&package_table[..], "com.android.aconfig.storage.test_1")
+ .unwrap()
+ .unwrap();
+ let expected_package_offset = PackageOffset { package_id: 0, boolean_offset: 0 };
+ assert_eq!(package_offset, expected_package_offset);
+ let package_offset =
+ find_package_offset(&package_table[..], "com.android.aconfig.storage.test_2")
+ .unwrap()
+ .unwrap();
+ let expected_package_offset = PackageOffset { package_id: 1, boolean_offset: 3 };
+ assert_eq!(package_offset, expected_package_offset);
+ let package_offset =
+ find_package_offset(&package_table[..], "com.android.aconfig.storage.test_4")
+ .unwrap()
+ .unwrap();
+ let expected_package_offset = PackageOffset { package_id: 2, boolean_offset: 6 };
+ assert_eq!(package_offset, expected_package_offset);
+ }
+
+ #[test]
+ // this test point locks down table query of a non exist package
+ fn test_not_existed_package_query() {
+ // this will land at an empty bucket
+ let package_table = create_test_package_table().unwrap().as_bytes();
+ let package_offset =
+ find_package_offset(&package_table[..], "com.android.aconfig.storage.test_3").unwrap();
+ assert_eq!(package_offset, None);
+ // this will land at the end of a linked list
+ let package_offset =
+ find_package_offset(&package_table[..], "com.android.aconfig.storage.test_5").unwrap();
+ assert_eq!(package_offset, None);
+ }
+
+ #[test]
+ // this test point locks down query error when file has a higher version
+ fn test_higher_version_storage_file() {
+ let mut table = create_test_package_table().unwrap();
+ table.header.version = crate::FILE_VERSION + 1;
+ let package_table = table.as_bytes();
+ let error = find_package_offset(&package_table[..], "com.android.aconfig.storage.test_1")
+ .unwrap_err();
+ assert_eq!(
+ format!("{:?}", error),
+ format!(
+ "Cannot read storage file with a higher version of {} with lib version {}",
+ crate::FILE_VERSION + 1,
+ crate::FILE_VERSION
+ )
+ );
+ }
}
diff --git a/tools/aconfig/aconfig_storage_file/src/test_utils.rs b/tools/aconfig/aconfig_storage_file/src/test_utils.rs
index e1fb6c7..21bfe5c 100644
--- a/tools/aconfig/aconfig_storage_file/src/test_utils.rs
+++ b/tools/aconfig/aconfig_storage_file/src/test_utils.rs
@@ -14,9 +14,9 @@
* limitations under the License.
*/
+use crate::protos::ProtoStorageFiles;
use anyhow::Result;
use protobuf::Message;
-use crate::protos::ProtoStorageFiles;
pub fn get_binary_storage_proto_bytes(text_proto: &str) -> Result<Vec<u8>> {
let storage_files: ProtoStorageFiles = protobuf::text_format::parse_from_str(text_proto)?;
diff --git a/tools/aconfig/printflags/src/main.rs b/tools/aconfig/printflags/src/main.rs
index 7fcde61..a0c9ee8 100644
--- a/tools/aconfig/printflags/src/main.rs
+++ b/tools/aconfig/printflags/src/main.rs
@@ -17,7 +17,7 @@
//! `printflags` is a device binary to print feature flags.
use aconfig_protos::ProtoFlagState as State;
-use aconfig_protos::ProtoParsedFlags as ProtoParsedFlags;
+use aconfig_protos::ProtoParsedFlags;
use anyhow::{bail, Context, Result};
use regex::Regex;
use std::collections::BTreeMap;
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index bde152f..a7b1d8c 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -260,7 +260,7 @@
if fs_type.startswith("ext4") and partition_headroom > reserved_size:
reserved_size = partition_headroom
- return size + reserved_size
+ return int(size * 1.1) + reserved_size
def BuildImageMkfs(in_dir, prop_dict, out_file, target_out, fs_config):
@@ -636,7 +636,7 @@
size = verity_image_builder.CalculateDynamicPartitionSize(size)
prop_dict["partition_size"] = str(size)
logger.info(
- "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
+ "Allocating %d MB for %s", size // BYTES_IN_MB, out_file)
prop_dict["image_size"] = prop_dict["partition_size"]
@@ -979,7 +979,11 @@
parser.add_argument("target_out",
help="the path to $(TARGET_OUT). Certain tools will use this to look through multiple staging "
"directories for fs config files.")
+ parser.add_argument("-v", action="store_true",
+ help="Enable verbose logging", dest="verbose")
args = parser.parse_args()
+ if args.verbose:
+ OPTIONS.verbose = True
common.InitLogging()