Merge "[bssl] Improve error processing when BoringSSL API fails" into main
diff --git a/libs/libfdt/src/iterators.rs b/libs/libfdt/src/iterators.rs
index a524655..7406164 100644
--- a/libs/libfdt/src/iterators.rs
+++ b/libs/libfdt/src/iterators.rs
@@ -330,7 +330,7 @@
}
impl<'a> DescendantsIterator<'a> {
- pub(crate) fn new(node: &'a FdtNode) -> Self {
+ pub(crate) fn new(node: &FdtNode<'a>) -> Self {
Self { node: Some((*node, 0)) }
}
}
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index 7eb08b2..d90f5f0 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -487,7 +487,7 @@
}
/// Returns an iterator of descendants
- pub fn descendants(&'a self) -> DescendantsIterator<'a> {
+ pub fn descendants(&self) -> DescendantsIterator<'a> {
DescendantsIterator::new(self)
}
@@ -811,6 +811,41 @@
Ok(next_offset.map(|offset| Self { fdt: self.fdt, offset }))
}
+ fn next_node_offset(&self, depth: usize) -> Result<Option<(c_int, usize)>> {
+ let mut next_depth: c_int = depth.try_into().or(Err(FdtError::BadValue))?;
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe {
+ libfdt_bindgen::fdt_next_node(self.fdt.as_ptr(), self.offset, &mut next_depth)
+ };
+ let Ok(next_depth) = usize::try_from(next_depth) else {
+ return Ok(None);
+ };
+ Ok(fdt_err_or_option(ret)?.map(|offset| (offset, next_depth)))
+ }
+
+ /// Returns the next node
+ pub fn next_node(self, depth: usize) -> Result<Option<(Self, usize)>> {
+ Ok(self
+ .next_node_offset(depth)?
+ .map(|(offset, next_depth)| (FdtNodeMut { fdt: self.fdt, offset }, next_depth)))
+ }
+
+ /// Deletes this and returns the next node
+ pub fn delete_and_next_node(mut self, depth: usize) -> Result<Option<(Self, usize)>> {
+ // Skip all would-be-removed descendants.
+ let mut iter = self.next_node_offset(depth)?;
+ while let Some((descendant_offset, descendant_depth)) = iter {
+ if descendant_depth <= depth {
+ break;
+ }
+ let descendant = FdtNodeMut { fdt: self.fdt, offset: descendant_offset };
+ iter = descendant.next_node_offset(descendant_depth)?;
+ }
+ // SAFETY: This consumes self, so invalid node wouldn't be used any further
+ unsafe { self.nop_self()? };
+ Ok(iter.map(|(offset, next_depth)| (FdtNodeMut { fdt: self.fdt, offset }, next_depth)))
+ }
+
fn parent(&'a self) -> Result<FdtNode<'a>> {
// SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe { libfdt_bindgen::fdt_parent_offset(self.fdt.as_ptr(), self.offset) };
diff --git a/libs/libfdt/tests/api_test.rs b/libs/libfdt/tests/api_test.rs
index e68557f..08fb8a5 100644
--- a/libs/libfdt/tests/api_test.rs
+++ b/libs/libfdt/tests/api_test.rs
@@ -399,3 +399,94 @@
assert_eq!(expected_names, subnode_names);
}
+
+#[test]
+fn node_mut_delete_and_next_node() {
+ let mut data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_mut_slice(&mut data).unwrap();
+
+ let expected_nodes = vec![
+ (Ok(cstr!("node_b")), 1),
+ (Ok(cstr!("node_c")), 1),
+ (Ok(cstr!("node_z")), 1),
+ (Ok(cstr!("node_za")), 2),
+ (Ok(cstr!("node_zb")), 2),
+ (Ok(cstr!("__symbols__")), 1),
+ ];
+
+ let mut expected_nodes_iter = expected_nodes.iter();
+ let mut iter = fdt.root_mut().unwrap().next_node(0).unwrap();
+ while let Some((node, depth)) = iter {
+ let node_name = node.as_node().name();
+ if node_name == Ok(cstr!("node_a")) || node_name == Ok(cstr!("node_zz")) {
+ iter = node.delete_and_next_node(depth).unwrap();
+ } else {
+ // Note: Checking name here is easier than collecting names and assert_eq!(),
+ // because we can't keep name references while iterating with FdtNodeMut.
+ let expected_node = expected_nodes_iter.next();
+ assert_eq!(expected_node, Some(&(node_name, depth)));
+ iter = node.next_node(depth).unwrap();
+ }
+ }
+ assert_eq!(None, expected_nodes_iter.next());
+
+ let root = fdt.root().unwrap();
+ let all_descendants: Vec<_> =
+ root.descendants().map(|(node, depth)| (node.name(), depth)).collect();
+ assert_eq!(expected_nodes, all_descendants);
+}
+
+#[test]
+#[ignore] // Borrow checker test. Compilation success is sufficient.
+fn node_name_lifetime() {
+ let data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_slice(&data).unwrap();
+
+ let name = {
+ let root = fdt.root().unwrap();
+ root.name()
+ // Make root to be dropped
+ };
+ assert_eq!(Ok(cstr!("")), name);
+}
+
+#[test]
+#[ignore] // Borrow checker test. Compilation success is sufficient.
+fn node_subnode_lifetime() {
+ let data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_slice(&data).unwrap();
+
+ let name = {
+ let node_a = {
+ let root = fdt.root().unwrap();
+ root.subnode(cstr!("node_a")).unwrap()
+ // Make root to be dropped
+ };
+ assert_ne!(None, node_a);
+ node_a.unwrap().name()
+ // Make node_a to be dropped
+ };
+ assert_eq!(Ok(cstr!("node_a")), name);
+}
+
+#[test]
+#[ignore] // Borrow checker test. Compilation success is sufficient.
+fn node_descendants_lifetime() {
+ let data = fs::read(TEST_TREE_PHANDLE_PATH).unwrap();
+ let fdt = Fdt::from_slice(&data).unwrap();
+
+ let first_descendant_name = {
+ let (first_descendant, _) = {
+ let mut descendants_iter = {
+ let root = fdt.root().unwrap();
+ root.descendants()
+ // Make root to be dropped
+ };
+ descendants_iter.next().unwrap()
+ // Make descendants_iter to be dropped
+ };
+ first_descendant.name()
+ // Make first_descendant to be dropped
+ };
+ assert_eq!(Ok(cstr!("node_a")), first_descendant_name);
+}
diff --git a/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java b/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
index f5656e2..b176cfc 100644
--- a/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
+++ b/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
@@ -45,7 +45,6 @@
import org.junit.runner.RunWith;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
@@ -413,11 +412,16 @@
for (int round = 0; round < ROUND_COUNT; ++round) {
reInstallApex(REINSTALL_APEX_TIMEOUT_SEC);
- if (isWithCompos) {
- compileStagedApex(COMPILE_STAGED_APEX_TIMEOUT_SEC);
+ try {
+ if (isWithCompos) {
+ compileStagedApex(COMPILE_STAGED_APEX_TIMEOUT_SEC);
+ }
+ } finally {
+ // If compilation fails, we still have a staged APEX, and we need to reboot to
+ // clean that up for further tests.
+ getDevice().nonBlockingReboot();
+ waitForBootCompleted();
}
- getDevice().nonBlockingReboot();
- waitForBootCompleted();
double elapsedSec = getDmesgBootTime();
bootDmesgTime.add(elapsedSec);
@@ -458,8 +462,9 @@
try {
CommandRunner android = new CommandRunner(getDevice());
- String result = android.run(
- COMPOSD_CMD_BIN + " staged-apex-compile");
+ String result =
+ android.runWithTimeout(
+ 3 * 60 * 1000, COMPOSD_CMD_BIN + " staged-apex-compile");
assertWithMessage("Failed to compile staged APEX. Reason: " + result)
.that(result).ignoringCase().contains("all ok");
diff --git a/tests/hostside/helper/java/com/android/microdroid/test/host/CommandRunner.java b/tests/hostside/helper/java/com/android/microdroid/test/host/CommandRunner.java
index 846531d..242dbde 100644
--- a/tests/hostside/helper/java/com/android/microdroid/test/host/CommandRunner.java
+++ b/tests/hostside/helper/java/com/android/microdroid/test/host/CommandRunner.java
@@ -66,9 +66,7 @@
public String runWithTimeout(long timeoutMillis, String... cmd)
throws DeviceNotAvailableException {
- CommandResult result =
- mDevice.executeShellV2Command(
- join(cmd), timeoutMillis, java.util.concurrent.TimeUnit.MILLISECONDS);
+ CommandResult result = runForResultWithTimeout(timeoutMillis, cmd);
if (result.getStatus() != CommandStatus.SUCCESS) {
fail(join(cmd) + " has failed: " + result);
}