Merge "finalization: remove VNDK current.txt copy" into main
diff --git a/cogsetup.sh b/cogsetup.sh
index 3005d58..6439af0 100644
--- a/cogsetup.sh
+++ b/cogsetup.sh
@@ -35,31 +35,6 @@
ln -s ${DEFAULT_OUTPUT_DIR} `pwd`/out
}
-# This function moves the reclient binaries into a directory that exists in a
-# non-cog part of the overall filesystem. This is to workaround the problem
-# described in b/289391270.
-function _copy_reclient_binaries_from_cog() {
- if [[ "${OUT_DIR}" == "" ]]; then
- OUT_DIR="out"
- fi
- local RECLIENT_VERSION=`readlink prebuilts/remoteexecution-client/live`
-
- local NONCOG_RECLIENT_BIN_DIR_BASE="${OUT_DIR}/.reclient"
- local NONCOG_RECLIENT_BIN_DIR="${NONCOG_RECLIENT_BIN_DIR_BASE}/${RECLIENT_VERSION}"
-
- # Create the non cog directory and setup live symlink.
- mkdir -p ${NONCOG_RECLIENT_BIN_DIR}
-
- if [ `ls ${NONCOG_RECLIENT_BIN_DIR} | wc -l` -lt 8 ]; then
- # Not all binaries exist, copy them from the Cog directory.
- local TOP=$(gettop)
- cp ${TOP}/prebuilts/remoteexecution-client/live/* ${NONCOG_RECLIENT_BIN_DIR}
- fi
-
- ln -sfn ${RECLIENT_VERSION} ${NONCOG_RECLIENT_BIN_DIR_BASE}/live
- export RBE_DIR="${NONCOG_RECLIENT_BIN_DIR_BASE}/live"
-}
-
# This function sets up the build environment to be appropriate for Cog.
function _setup_cog_env() {
_create_out_symlink_for_cog
@@ -67,7 +42,6 @@
echo -e "\e[0;33mWARNING:\e[00m Cog environment setup failed!"
return 1
fi
- _copy_reclient_binaries_from_cog
export ANDROID_BUILD_ENVIRONMENT_CONFIG="googler-cog"
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index 88348e2..1494f68 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -195,6 +195,7 @@
$(call add_soong_config_var_value,ANDROID,release_avf_enable_multi_tenant_microdroid_vm,$(RELEASE_AVF_ENABLE_MULTI_TENANT_MICRODROID_VM))
$(call add_soong_config_var_value,ANDROID,release_avf_enable_remote_attestation,$(RELEASE_AVF_ENABLE_REMOTE_ATTESTATION))
$(call add_soong_config_var_value,ANDROID,release_avf_enable_vendor_modules,$(RELEASE_AVF_ENABLE_VENDOR_MODULES))
+$(call add_soong_config_var_value,ANDROID,release_avf_enable_virt_cpufreq,$(RELEASE_AVF_ENABLE_VIRT_CPUFREQ))
$(call add_soong_config_var_value,ANDROID,release_binder_death_recipient_weak_from_jni,$(RELEASE_BINDER_DEATH_RECIPIENT_WEAK_FROM_JNI))
diff --git a/target/product/virtual_ab_ota/android_t_baseline.mk b/target/product/virtual_ab_ota/android_t_baseline.mk
index f862485..af0f7a9 100644
--- a/target/product/virtual_ab_ota/android_t_baseline.mk
+++ b/target/product/virtual_ab_ota/android_t_baseline.mk
@@ -21,4 +21,4 @@
# All U+ launching devices should instead use vabc_features.mk.
$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota/vabc_features.mk)
-PRODUCT_VIRTUAL_AB_COW_VERSION := 2
+PRODUCT_VIRTUAL_AB_COW_VERSION ?= 2
diff --git a/tools/aconfig/aconfig/src/storage/flag_table.rs b/tools/aconfig/aconfig/src/storage/flag_table.rs
index 70878a8..bebac890 100644
--- a/tools/aconfig/aconfig/src/storage/flag_table.rs
+++ b/tools/aconfig/aconfig/src/storage/flag_table.rs
@@ -17,7 +17,7 @@
use crate::commands::assign_flag_ids;
use crate::storage::FlagPackage;
use aconfig_storage_file::{
- get_bucket_index, get_table_size, FlagTable, FlagTableHeader, FlagTableNode, FILE_VERSION,
+ get_table_size, FlagTable, FlagTableHeader, FlagTableNode, FILE_VERSION,
};
use anyhow::{anyhow, Result};
@@ -39,8 +39,7 @@
flag_id: u16,
num_buckets: u32,
) -> FlagTableNode {
- let full_flag_name = package_id.to_string() + "/" + flag_name;
- let bucket_index = get_bucket_index(&full_flag_name, num_buckets);
+ let bucket_index = FlagTableNode::find_bucket_index(package_id, flag_name, num_buckets);
FlagTableNode {
package_id,
flag_name: flag_name.to_string(),
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_table.rs b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
index dab752a..421f847 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
@@ -17,8 +17,8 @@
//! flag table module defines the flag table file format and methods for serialization
//! and deserialization
-use crate::{read_str_from_bytes, read_u16_from_bytes, read_u32_from_bytes};
-use anyhow::Result;
+use crate::{read_str_from_bytes, read_u16_from_bytes, read_u32_from_bytes, get_bucket_index};
+use anyhow::{anyhow, Result};
/// Flag table header struct
#[derive(PartialEq, Debug)]
@@ -86,9 +86,9 @@
}
/// Deserialize from bytes
- pub fn from_bytes(bytes: &[u8], num_buckets: u32) -> Result<Self> {
+ pub fn from_bytes(bytes: &[u8]) -> Result<Self> {
let mut head = 0;
- let mut node = Self {
+ let node = Self {
package_id: read_u32_from_bytes(bytes, &mut head)?,
flag_name: read_str_from_bytes(bytes, &mut head)?,
flag_type: read_u16_from_bytes(bytes, &mut head)?,
@@ -99,10 +99,14 @@
},
bucket_index: 0,
};
- let full_flag_name = node.package_id.to_string() + "/" + &node.flag_name;
- node.bucket_index = crate::get_bucket_index(&full_flag_name, num_buckets);
Ok(node)
}
+
+ /// Calculate node bucket index
+ pub fn find_bucket_index(package_id: u32, flag_name: &str, num_buckets: u32) -> u32 {
+ let full_flag_name = package_id.to_string() + "/" + flag_name;
+ get_bucket_index(&full_flag_name, num_buckets)
+ }
}
#[derive(PartialEq, Debug)]
@@ -138,8 +142,10 @@
.collect();
let nodes = (0..num_flags)
.map(|_| {
- let node = FlagTableNode::from_bytes(&bytes[head..], num_buckets).unwrap();
+ let mut node = FlagTableNode::from_bytes(&bytes[head..]).unwrap();
head += node.as_bytes().len();
+ node.bucket_index = FlagTableNode::find_bucket_index(
+ node.package_id, &node.flag_name, num_buckets);
node
})
.collect();
@@ -149,6 +155,42 @@
}
}
+/// Query flag within package offset
+pub fn find_flag_offset(buf: &[u8], package_id: u32, flag: &str) -> Result<Option<u16>> {
+ let interpreted_header = FlagTableHeader::from_bytes(buf)?;
+ if interpreted_header.version > crate::FILE_VERSION {
+ return Err(anyhow!(
+ "Cannot read storage file with a higher version of {} with lib version {}",
+ interpreted_header.version,
+ crate::FILE_VERSION
+ ));
+ }
+
+ let num_buckets = (interpreted_header.node_offset - interpreted_header.bucket_offset) / 4;
+ let bucket_index = FlagTableNode::find_bucket_index(package_id, flag, num_buckets);
+
+ let mut pos = (interpreted_header.bucket_offset + 4 * bucket_index) as usize;
+ let mut flag_node_offset = read_u32_from_bytes(buf, &mut pos)? as usize;
+ if flag_node_offset < interpreted_header.node_offset as usize
+ || flag_node_offset >= interpreted_header.file_size as usize
+ {
+ return Ok(None);
+ }
+
+ loop {
+ let interpreted_node = FlagTableNode::from_bytes(&buf[flag_node_offset..])?;
+ if interpreted_node.package_id == package_id &&
+ interpreted_node.flag_name == flag {
+ return Ok(Some(interpreted_node.flag_id));
+ }
+ match interpreted_node.next_offset {
+ Some(offset) => flag_node_offset = offset as usize,
+ None => return Ok(None),
+ }
+ }
+
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -228,13 +270,70 @@
let nodes: &Vec<FlagTableNode> = &flag_table.nodes;
let num_buckets = crate::get_table_size(header.num_flags).unwrap();
for node in nodes.iter() {
- let reinterpreted_node = FlagTableNode::from_bytes(&node.as_bytes(), num_buckets);
- assert!(reinterpreted_node.is_ok());
- assert_eq!(node, &reinterpreted_node.unwrap());
+ let mut reinterpreted_node = FlagTableNode::from_bytes(&node.as_bytes()).unwrap();
+ reinterpreted_node.bucket_index = FlagTableNode::find_bucket_index(
+ reinterpreted_node.package_id,
+ &reinterpreted_node.flag_name,
+ num_buckets
+ );
+ assert_eq!(node, &reinterpreted_node);
}
let reinterpreted_table = FlagTable::from_bytes(&flag_table.as_bytes());
assert!(reinterpreted_table.is_ok());
assert_eq!(&flag_table, &reinterpreted_table.unwrap());
}
+
+ #[test]
+ // this test point locks down table query
+ fn test_flag_query() {
+ let flag_table = create_test_flag_table().unwrap().as_bytes();
+ let baseline = vec![
+ (0, "enabled_ro", 1u16),
+ (0, "enabled_rw", 2u16),
+ (1, "disabled_ro", 0u16),
+ (2, "enabled_ro", 1u16),
+ (1, "enabled_fixed_ro", 1u16),
+ (1, "enabled_ro", 2u16),
+ (2, "enabled_fixed_ro", 0u16),
+ (0, "disabled_rw", 0u16),
+ ];
+ for (package_id, flag_name, expected_offset) in baseline.into_iter() {
+ let flag_offset =
+ find_flag_offset(&flag_table[..], package_id, flag_name)
+ .unwrap()
+ .unwrap();
+ assert_eq!(flag_offset, expected_offset);
+ }
+ }
+
+ #[test]
+ // this test point locks down table query of a non exist flag
+ fn test_not_existed_flag_query() {
+ let flag_table = create_test_flag_table().unwrap().as_bytes();
+ let flag_offset =
+ find_flag_offset(&flag_table[..], 1, "disabled_fixed_ro").unwrap();
+ assert_eq!(flag_offset, None);
+ let flag_offset =
+ find_flag_offset(&flag_table[..], 2, "disabled_rw").unwrap();
+ assert_eq!(flag_offset, None);
+ }
+
+ #[test]
+ // this test point locks down query error when file has a higher version
+ fn test_higher_version_storage_file() {
+ let mut table = create_test_flag_table().unwrap();
+ table.header.version = crate::FILE_VERSION + 1;
+ let flag_table = table.as_bytes();
+ let error = find_flag_offset(&flag_table[..], 0, "enabled_ro")
+ .unwrap_err();
+ assert_eq!(
+ format!("{:?}", error),
+ format!(
+ "Cannot read storage file with a higher version of {} with lib version {}",
+ crate::FILE_VERSION + 1,
+ crate::FILE_VERSION
+ )
+ );
+ }
}
diff --git a/tools/aconfig/aconfig_storage_file/src/package_table.rs b/tools/aconfig/aconfig_storage_file/src/package_table.rs
index 14eef70..8fd57e6 100644
--- a/tools/aconfig/aconfig_storage_file/src/package_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/package_table.rs
@@ -17,8 +17,8 @@
//! package table module defines the package table file format and methods for serialization
//! and deserialization
-use crate::{read_str_from_bytes, read_u32_from_bytes};
-use anyhow::Result;
+use crate::{get_bucket_index, read_str_from_bytes, read_u32_from_bytes};
+use anyhow::{anyhow, Result};
/// Package table header struct
#[derive(PartialEq, Debug)]
@@ -86,9 +86,9 @@
}
/// Deserialize from bytes
- pub fn from_bytes(bytes: &[u8], num_buckets: u32) -> Result<Self> {
+ pub fn from_bytes(bytes: &[u8]) -> Result<Self> {
let mut head = 0;
- let mut node = Self {
+ let node = Self {
package_name: read_str_from_bytes(bytes, &mut head)?,
package_id: read_u32_from_bytes(bytes, &mut head)?,
boolean_offset: read_u32_from_bytes(bytes, &mut head)?,
@@ -98,7 +98,6 @@
},
bucket_index: 0,
};
- node.bucket_index = crate::get_bucket_index(&node.package_name, num_buckets);
Ok(node)
}
}
@@ -136,8 +135,9 @@
.collect();
let nodes = (0..num_packages)
.map(|_| {
- let node = PackageTableNode::from_bytes(&bytes[head..], num_buckets).unwrap();
+ let mut node = PackageTableNode::from_bytes(&bytes[head..]).unwrap();
head += node.as_bytes().len();
+ node.bucket_index = get_bucket_index(&node.package_name, num_buckets);
node
})
.collect();
@@ -147,6 +147,50 @@
}
}
+/// Package table query return
+#[derive(PartialEq, Debug)]
+pub struct PackageOffset {
+ pub package_id: u32,
+ pub boolean_offset: u32,
+}
+
+/// Query package id and start offset
+pub fn find_package_offset(buf: &[u8], package: &str) -> Result<Option<PackageOffset>> {
+ let interpreted_header = PackageTableHeader::from_bytes(buf)?;
+ if interpreted_header.version > crate::FILE_VERSION {
+ return Err(anyhow!(
+ "Cannot read storage file with a higher version of {} with lib version {}",
+ interpreted_header.version,
+ crate::FILE_VERSION
+ ));
+ }
+
+ let num_buckets = (interpreted_header.node_offset - interpreted_header.bucket_offset) / 4;
+ let bucket_index = get_bucket_index(&package, num_buckets);
+
+ let mut pos = (interpreted_header.bucket_offset + 4 * bucket_index) as usize;
+ let mut package_node_offset = read_u32_from_bytes(buf, &mut pos)? as usize;
+ if package_node_offset < interpreted_header.node_offset as usize
+ || package_node_offset >= interpreted_header.file_size as usize
+ {
+ return Ok(None);
+ }
+
+ loop {
+ let interpreted_node = PackageTableNode::from_bytes(&buf[package_node_offset..])?;
+ if interpreted_node.package_name == package {
+ return Ok(Some(PackageOffset {
+ package_id: interpreted_node.package_id,
+ boolean_offset: interpreted_node.boolean_offset,
+ }));
+ }
+ match interpreted_node.next_offset {
+ Some(offset) => package_node_offset = offset as usize,
+ None => return Ok(None),
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -199,13 +243,70 @@
let nodes: &Vec<PackageTableNode> = &package_table.nodes;
let num_buckets = crate::get_table_size(header.num_packages).unwrap();
for node in nodes.iter() {
- let reinterpreted_node = PackageTableNode::from_bytes(&node.as_bytes(), num_buckets);
- assert!(reinterpreted_node.is_ok());
- assert_eq!(node, &reinterpreted_node.unwrap());
+ let mut reinterpreted_node = PackageTableNode::from_bytes(&node.as_bytes()).unwrap();
+ reinterpreted_node.bucket_index =
+ get_bucket_index(&reinterpreted_node.package_name, num_buckets);
+ assert_eq!(node, &reinterpreted_node);
}
let reinterpreted_table = PackageTable::from_bytes(&package_table.as_bytes());
assert!(reinterpreted_table.is_ok());
assert_eq!(&package_table, &reinterpreted_table.unwrap());
}
+
+ #[test]
+ // this test point locks down table query
+ fn test_package_query() {
+ let package_table = create_test_package_table().unwrap().as_bytes();
+ let package_offset =
+ find_package_offset(&package_table[..], "com.android.aconfig.storage.test_1")
+ .unwrap()
+ .unwrap();
+ let expected_package_offset = PackageOffset { package_id: 0, boolean_offset: 0 };
+ assert_eq!(package_offset, expected_package_offset);
+ let package_offset =
+ find_package_offset(&package_table[..], "com.android.aconfig.storage.test_2")
+ .unwrap()
+ .unwrap();
+ let expected_package_offset = PackageOffset { package_id: 1, boolean_offset: 3 };
+ assert_eq!(package_offset, expected_package_offset);
+ let package_offset =
+ find_package_offset(&package_table[..], "com.android.aconfig.storage.test_4")
+ .unwrap()
+ .unwrap();
+ let expected_package_offset = PackageOffset { package_id: 2, boolean_offset: 6 };
+ assert_eq!(package_offset, expected_package_offset);
+ }
+
+ #[test]
+ // this test point locks down table query of a non exist package
+ fn test_not_existed_package_query() {
+ // this will land at an empty bucket
+ let package_table = create_test_package_table().unwrap().as_bytes();
+ let package_offset =
+ find_package_offset(&package_table[..], "com.android.aconfig.storage.test_3").unwrap();
+ assert_eq!(package_offset, None);
+ // this will land at the end of a linked list
+ let package_offset =
+ find_package_offset(&package_table[..], "com.android.aconfig.storage.test_5").unwrap();
+ assert_eq!(package_offset, None);
+ }
+
+ #[test]
+ // this test point locks down query error when file has a higher version
+ fn test_higher_version_storage_file() {
+ let mut table = create_test_package_table().unwrap();
+ table.header.version = crate::FILE_VERSION + 1;
+ let package_table = table.as_bytes();
+ let error = find_package_offset(&package_table[..], "com.android.aconfig.storage.test_1")
+ .unwrap_err();
+ assert_eq!(
+ format!("{:?}", error),
+ format!(
+ "Cannot read storage file with a higher version of {} with lib version {}",
+ crate::FILE_VERSION + 1,
+ crate::FILE_VERSION
+ )
+ );
+ }
}
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index bde152f..a7b1d8c 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -260,7 +260,7 @@
if fs_type.startswith("ext4") and partition_headroom > reserved_size:
reserved_size = partition_headroom
- return size + reserved_size
+ return int(size * 1.1) + reserved_size
def BuildImageMkfs(in_dir, prop_dict, out_file, target_out, fs_config):
@@ -636,7 +636,7 @@
size = verity_image_builder.CalculateDynamicPartitionSize(size)
prop_dict["partition_size"] = str(size)
logger.info(
- "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
+ "Allocating %d MB for %s", size // BYTES_IN_MB, out_file)
prop_dict["image_size"] = prop_dict["partition_size"]
@@ -979,7 +979,11 @@
parser.add_argument("target_out",
help="the path to $(TARGET_OUT). Certain tools will use this to look through multiple staging "
"directories for fs config files.")
+ parser.add_argument("-v", action="store_true",
+ help="Enable verbose logging", dest="verbose")
args = parser.parse_args()
+ if args.verbose:
+ OPTIONS.verbose = True
common.InitLogging()