Merge "Annotate our CTS tests using CddTests annotation."
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index 30c55b3..cd1ece4 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -148,11 +148,19 @@
self.0.connect_service(COMPOS_VSOCK_PORT).context("Connecting to CompOS service")
}
+ /// Shut down the VM cleanly, by sending a quit request to the service, giving time for any
+ /// relevant logs to be written.
+ pub fn shutdown(self, service: Strong<dyn ICompOsService>) {
+ info!("Requesting CompOS VM to shutdown");
+ let _ = service.quit(); // If this fails, the VM is probably dying anyway
+ self.wait_for_shutdown();
+ }
+
/// Wait for the instance to shut down. If it fails to shutdown within a reasonable time the
/// instance is dropped, which forcibly terminates it.
/// This should only be called when the instance has been requested to quit, or we believe that
/// it is already in the process of exiting due to some failure.
- pub fn wait_for_shutdown(self) {
+ fn wait_for_shutdown(self) {
let death_reason = self.0.wait_for_death_with_timeout(TIMEOUTS.vm_max_time_to_exit);
match death_reason {
Some(vmclient::DeathReason::Shutdown) => info!("VM has exited normally"),
diff --git a/compos/common/timeouts.rs b/compos/common/timeouts.rs
index 952be0a..7bd7679 100644
--- a/compos/common/timeouts.rs
+++ b/compos/common/timeouts.rs
@@ -46,7 +46,7 @@
// Note: the source of truth for this odrefresh timeout is art/odrefresh/odrefresh.cc.
odrefresh_max_execution_time: Duration::from_secs(300),
vm_max_time_to_ready: Duration::from_secs(15),
- vm_max_time_to_exit: Duration::from_secs(3),
+ vm_max_time_to_exit: Duration::from_secs(5),
};
/// The timeouts that we use when running under nested virtualization.
@@ -54,5 +54,5 @@
// Note: the source of truth for this odrefresh timeout is art/odrefresh/odrefresh.cc.
odrefresh_max_execution_time: Duration::from_secs(480),
vm_max_time_to_ready: Duration::from_secs(120),
- vm_max_time_to_exit: Duration::from_secs(10),
+ vm_max_time_to_exit: Duration::from_secs(20),
};
diff --git a/compos/composd/src/instance_manager.rs b/compos/composd/src/instance_manager.rs
index c45f6e7..9a0935c 100644
--- a/compos/composd/src/instance_manager.rs
+++ b/compos/composd/src/instance_manager.rs
@@ -45,13 +45,13 @@
Self { service, state: Default::default() }
}
- pub fn start_current_instance(&self) -> Result<Arc<CompOsInstance>> {
+ pub fn start_current_instance(&self) -> Result<CompOsInstance> {
let mut vm_parameters = new_vm_parameters()?;
vm_parameters.config_path = Some(PREFER_STAGED_VM_CONFIG_PATH.to_owned());
self.start_instance(CURRENT_INSTANCE_DIR, vm_parameters)
}
- pub fn start_test_instance(&self, prefer_staged: bool) -> Result<Arc<CompOsInstance>> {
+ pub fn start_test_instance(&self, prefer_staged: bool) -> Result<CompOsInstance> {
let mut vm_parameters = new_vm_parameters()?;
vm_parameters.debug_mode = true;
if prefer_staged {
@@ -64,28 +64,23 @@
&self,
instance_name: &str,
vm_parameters: VmParameters,
- ) -> Result<Arc<CompOsInstance>> {
+ ) -> Result<CompOsInstance> {
let mut state = self.state.lock().unwrap();
state.mark_starting()?;
// Don't hold the lock while we start the instance to avoid blocking other callers.
drop(state);
let instance_starter = InstanceStarter::new(instance_name, vm_parameters);
- let instance = self.try_start_instance(instance_starter);
+ let instance = instance_starter.start_new_instance(&*self.service);
let mut state = self.state.lock().unwrap();
if let Ok(ref instance) = instance {
- state.mark_started(instance)?;
+ state.mark_started(instance.get_instance_tracker())?;
} else {
state.mark_stopped();
}
instance
}
-
- fn try_start_instance(&self, instance_starter: InstanceStarter) -> Result<Arc<CompOsInstance>> {
- let compos_instance = instance_starter.start_new_instance(&*self.service)?;
- Ok(Arc::new(compos_instance))
- }
}
fn new_vm_parameters() -> Result<VmParameters> {
@@ -110,14 +105,14 @@
// Ensures we only run one instance at a time.
// Valid states:
-// Starting: is_starting is true, running_instance is None.
-// Started: is_starting is false, running_instance is Some(x) and there is a strong ref to x.
-// Stopped: is_starting is false and running_instance is None or a weak ref to a dropped instance.
+// Starting: is_starting is true, instance_tracker is None.
+// Started: is_starting is false, instance_tracker is Some(x) and there is a strong ref to x.
+// Stopped: is_starting is false and instance_tracker is None or a weak ref to a dropped instance.
// The panic calls here should never happen, unless the code above in InstanceManager is buggy.
// In particular nothing the client does should be able to trigger them.
#[derive(Default)]
struct State {
- running_instance: Option<Weak<CompOsInstance>>,
+ instance_tracker: Option<Weak<()>>,
is_starting: bool,
}
@@ -127,34 +122,34 @@
if self.is_starting {
bail!("An instance is already starting");
}
- if let Some(weak) = &self.running_instance {
+ if let Some(weak) = &self.instance_tracker {
if weak.strong_count() != 0 {
bail!("An instance is already running");
}
}
- self.running_instance = None;
+ self.instance_tracker = None;
self.is_starting = true;
Ok(())
}
// Move from Starting to Stopped.
fn mark_stopped(&mut self) {
- if !self.is_starting || self.running_instance.is_some() {
+ if !self.is_starting || self.instance_tracker.is_some() {
panic!("Tried to mark stopped when not starting");
}
self.is_starting = false;
}
// Move from Starting to Started.
- fn mark_started(&mut self, instance: &Arc<CompOsInstance>) -> Result<()> {
+ fn mark_started(&mut self, instance_tracker: &Arc<()>) -> Result<()> {
if !self.is_starting {
panic!("Tried to mark started when not starting")
}
- if self.running_instance.is_some() {
+ if self.instance_tracker.is_some() {
panic!("Attempted to mark started when already started");
}
self.is_starting = false;
- self.running_instance = Some(Arc::downgrade(instance));
+ self.instance_tracker = Some(Arc::downgrade(instance_tracker));
Ok(())
}
}
diff --git a/compos/composd/src/instance_starter.rs b/compos/composd/src/instance_starter.rs
index 6e6253e..111c719 100644
--- a/compos/composd/src/instance_starter.rs
+++ b/compos/composd/src/instance_starter.rs
@@ -29,6 +29,7 @@
use log::info;
use std::fs;
use std::path::{Path, PathBuf};
+use std::sync::Arc;
pub struct CompOsInstance {
service: Strong<dyn ICompOsService>,
@@ -36,12 +37,28 @@
vm_instance: ComposClient,
#[allow(dead_code)] // Keeps composd process alive
lazy_service_guard: LazyServiceGuard,
+ // Keep this alive as long as we are
+ instance_tracker: Arc<()>,
}
impl CompOsInstance {
pub fn get_service(&self) -> Strong<dyn ICompOsService> {
self.service.clone()
}
+
+ /// Returns an Arc that this instance holds a strong reference to as long as it exists. This
+ /// can be used to determine when the instance has been dropped.
+ pub fn get_instance_tracker(&self) -> &Arc<()> {
+ &self.instance_tracker
+ }
+
+ /// Attempt to shut down the VM cleanly, giving time for any relevant logs to be written.
+ pub fn shutdown(self) -> LazyServiceGuard {
+ self.vm_instance.shutdown(self.service);
+ // Return the guard to the caller, since we might be terminated at any point after it is
+ // dropped, and there might still be things to do.
+ self.lazy_service_guard
+ }
}
pub struct InstanceStarter {
@@ -114,7 +131,12 @@
)
.context("Starting VM")?;
let service = vm_instance.connect_service().context("Connecting to CompOS")?;
- Ok(CompOsInstance { vm_instance, service, lazy_service_guard: Default::default() })
+ Ok(CompOsInstance {
+ vm_instance,
+ service,
+ lazy_service_guard: Default::default(),
+ instance_tracker: Default::default(),
+ })
}
fn create_instance_image(
diff --git a/compos/composd/src/odrefresh_task.rs b/compos/composd/src/odrefresh_task.rs
index 51e866f..8fd574c 100644
--- a/compos/composd/src/odrefresh_task.rs
+++ b/compos/composd/src/odrefresh_task.rs
@@ -49,7 +49,9 @@
impl ICompilationTask for OdrefreshTask {
fn cancel(&self) -> BinderResult<()> {
let task = self.take();
- // Drop the VM, which should end compilation - and cause our thread to exit
+ // Drop the VM, which should end compilation - and cause our thread to exit.
+ // Note that we don't do a graceful shutdown here; we've been asked to give up our resources
+ // ASAP, and the VM has not failed so we don't need to ensure VM logs are written.
drop(task);
Ok(())
}
@@ -58,7 +60,7 @@
struct RunningTask {
callback: Strong<dyn ICompilationTaskCallback>,
#[allow(dead_code)] // Keeps the CompOS VM alive
- comp_os: Arc<CompOsInstance>,
+ comp_os: CompOsInstance,
}
impl OdrefreshTask {
@@ -70,7 +72,7 @@
}
pub fn start(
- comp_os: Arc<CompOsInstance>,
+ comp_os: CompOsInstance,
compilation_mode: CompilationMode,
target_dir_name: String,
callback: &Strong<dyn ICompilationTaskCallback>,
@@ -95,27 +97,30 @@
let task = self.take();
// We don't do the callback if cancel has already happened.
- if let Some(task) = task {
+ if let Some(RunningTask { callback, comp_os }) = task {
+ // Make sure we keep our service alive until we have called the callback.
+ let lazy_service_guard = comp_os.shutdown();
+
let result = match exit_code {
Ok(ExitCode::CompilationSuccess) => {
info!("CompilationSuccess");
- task.callback.onSuccess()
+ callback.onSuccess()
}
Ok(exit_code) => {
let message = format!("Unexpected odrefresh result: {:?}", exit_code);
error!("{}", message);
- task.callback
- .onFailure(FailureReason::UnexpectedCompilationResult, &message)
+ callback.onFailure(FailureReason::UnexpectedCompilationResult, &message)
}
Err(e) => {
let message = format!("Running odrefresh failed: {:?}", e);
error!("{}", message);
- task.callback.onFailure(FailureReason::CompilationFailed, &message)
+ callback.onFailure(FailureReason::CompilationFailed, &message)
}
};
if let Err(e) = result {
warn!("Failed to deliver callback: {:?}", e);
}
+ drop(lazy_service_guard);
}
});
}
diff --git a/compos/src/compsvc.rs b/compos/src/compsvc.rs
index 9fa68d6..5d58221 100644
--- a/compos/src/compsvc.rs
+++ b/compos/src/compsvc.rs
@@ -20,7 +20,7 @@
use anyhow::{bail, Context, Result};
use binder_common::new_binder_exception;
-use log::error;
+use log::{error, info};
use rustutils::system_properties;
use std::default::Default;
use std::fs::read_dir;
@@ -153,8 +153,8 @@
}
fn quit(&self) -> BinderResult<()> {
- // TODO(b/236581575) Consider shutting down the binder server a bit more gracefully.
// When our process exits, Microdroid will shut down the VM.
+ info!("Received quit request, exiting");
std::process::exit(0);
}
}
diff --git a/compos/verify/verify.rs b/compos/verify/verify.rs
index e6848c7..224cde7 100644
--- a/compos/verify/verify.rs
+++ b/compos/verify/verify.rs
@@ -110,9 +110,7 @@
let service = vm_instance.connect_service()?;
let public_key = service.getPublicKey().context("Getting public key");
- // Shut down the VM cleanly, giving time for any relevant logs to be written
- let _ = service.quit(); // If this fails, the VM is probably dying anyway
- vm_instance.wait_for_shutdown();
+ vm_instance.shutdown(service);
if !compos_verify_native::verify(&public_key?, &signature, &info) {
bail!("Signature verification failed");
diff --git a/libs/binder_common/rpc_client.rs b/libs/binder_common/rpc_client.rs
index 1aabe84..33fd732 100644
--- a/libs/binder_common/rpc_client.rs
+++ b/libs/binder_common/rpc_client.rs
@@ -17,10 +17,11 @@
//! Helpers for implementing an RPC Binder client.
use binder::unstable_api::{new_spibinder, AIBinder};
-use binder::{StatusCode, Strong};
+use binder::{FromIBinder, StatusCode, Strong};
+use std::os::{raw, unix::io::RawFd};
/// Connects to a binder RPC server.
-pub fn connect_rpc_binder<T: binder::FromIBinder + ?Sized>(
+pub fn connect_rpc_binder<T: FromIBinder + ?Sized>(
cid: u32,
port: u32,
) -> Result<Strong<T>, StatusCode> {
@@ -35,3 +36,40 @@
Err(StatusCode::BAD_VALUE)
}
}
+
+type RequestFd<'a> = &'a mut dyn FnMut() -> Option<RawFd>;
+
+/// Connects to a Binder RPC server, using the given callback to get (and take ownership of) file
+/// descriptors already connected to it.
+pub fn connect_preconnected_rpc_binder<T: FromIBinder + ?Sized>(
+ mut request_fd: impl FnMut() -> Option<RawFd>,
+) -> Result<Strong<T>, StatusCode> {
+ // Double reference the factory because trait objects aren't FFI safe.
+ let mut request_fd_ref: RequestFd = &mut request_fd;
+ let param = &mut request_fd_ref as *mut RequestFd as *mut raw::c_void;
+
+ // SAFETY: AIBinder returned by RpcPreconnectedClient has correct reference count, and the
+ // ownership can be safely taken by new_spibinder. RpcPreconnectedClient does not take ownership
+ // of param, only passing it to request_fd_wrapper.
+ let ibinder = unsafe {
+ new_spibinder(binder_rpc_unstable_bindgen::RpcPreconnectedClient(
+ Some(request_fd_wrapper),
+ param,
+ ) as *mut AIBinder)
+ };
+
+ if let Some(ibinder) = ibinder {
+ <T>::try_from(ibinder)
+ } else {
+ Err(StatusCode::BAD_VALUE)
+ }
+}
+
+unsafe extern "C" fn request_fd_wrapper(param: *mut raw::c_void) -> raw::c_int {
+ // SAFETY: This is only ever called by RpcPreconnectedClient, within the lifetime of the
+ // BinderFdFactory reference, with param being a properly aligned non-null pointer to an
+ // initialized instance.
+ let request_fd_ptr = param as *mut RequestFd;
+ let request_fd = request_fd_ptr.as_mut().unwrap();
+ request_fd().unwrap_or(-1)
+}
diff --git a/libs/binder_common/rpc_server.rs b/libs/binder_common/rpc_server.rs
index 5c9d2a0..4261358 100644
--- a/libs/binder_common/rpc_server.rs
+++ b/libs/binder_common/rpc_server.rs
@@ -18,14 +18,17 @@
use binder::unstable_api::AsNative;
use binder::SpIBinder;
-use std::os::raw;
+use std::{os::raw, ptr::null_mut};
-/// Run a binder RPC server, serving the supplied binder service implementation on the given vsock
+/// Runs a binder RPC server, serving the supplied binder service implementation on the given vsock
/// port.
-/// If and when the server is ready for connections (it is listening on the port) on_ready
-/// is called to allow appropriate action to be taken - e.g. to notify clients they
-/// may now attempt to connect.
+///
+/// If and when the server is ready for connections (it is listening on the port), `on_ready` is
+/// called to allow appropriate action to be taken - e.g. to notify clients that they may now
+/// attempt to connect.
+///
/// The current thread is joined to the binder thread pool to handle incoming messages.
+///
/// Returns true if the server has shutdown normally, false if it failed in some way.
pub fn run_rpc_server<F>(service: SpIBinder, port: u32, on_ready: F) -> bool
where
@@ -79,3 +82,46 @@
}
}
}
+
+type RpcServerFactoryRef<'a> = &'a mut (dyn FnMut(u32) -> Option<SpIBinder> + Send + Sync);
+
+/// Runs a binder RPC server, using the given factory function to construct a binder service
+/// implementation for each connection.
+///
+/// The current thread is joined to the binder thread pool to handle incoming messages.
+///
+/// Returns true if the server has shutdown normally, false if it failed in some way.
+pub fn run_rpc_server_with_factory(
+ port: u32,
+ mut factory: impl FnMut(u32) -> Option<SpIBinder> + Send + Sync,
+) -> bool {
+ // Double reference the factory because trait objects aren't FFI safe.
+ // NB: The type annotation is necessary to ensure that we have a `dyn` rather than an `impl`.
+ let mut factory_ref: RpcServerFactoryRef = &mut factory;
+ let context = &mut factory_ref as *mut RpcServerFactoryRef as *mut raw::c_void;
+
+ // SAFETY: `factory_wrapper` is only ever called by `RunRpcServerWithFactory`, with context
+ // taking the pointer value above (so a properly aligned non-null pointer to an initialized
+ // `RpcServerFactoryRef`), within the lifetime of `factory_ref` (i.e. no more calls will be made
+ // after `RunRpcServerWithFactory` returns).
+ unsafe {
+ binder_rpc_unstable_bindgen::RunRpcServerWithFactory(Some(factory_wrapper), context, port)
+ }
+}
+
+unsafe extern "C" fn factory_wrapper(
+ cid: u32,
+ context: *mut raw::c_void,
+) -> *mut binder_rpc_unstable_bindgen::AIBinder {
+ // SAFETY: `context` was created from an `&mut RpcServerFactoryRef` by
+ // `run_rpc_server_with_factory`, and we are still within the lifetime of the value it is
+ // pointing to.
+ let factory_ptr = context as *mut RpcServerFactoryRef;
+ let factory = factory_ptr.as_mut().unwrap();
+
+ if let Some(mut service) = factory(cid) {
+ service.as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder
+ } else {
+ null_mut()
+ }
+}
diff --git a/rialto/tests/test.rs b/rialto/tests/test.rs
index 6cd3f2f..b6ccd9e 100644
--- a/rialto/tests/test.rs
+++ b/rialto/tests/test.rs
@@ -21,13 +21,14 @@
},
binder::{ParcelFileDescriptor, ProcessState},
};
-use anyhow::{Context, Error};
+use anyhow::{anyhow, Context, Error};
use log::info;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::os::unix::io::FromRawFd;
use std::panic;
use std::thread;
+use std::time::Duration;
use vmclient::{DeathReason, VmInstance};
const RIALTO_PATH: &str = "/data/local/tmp/rialto_test/arm64/rialto.bin";
@@ -71,7 +72,9 @@
vm.start().context("Failed to start VM")?;
// Wait for VM to finish, and check that it shut down cleanly.
- let death_reason = vm.wait_for_death();
+ let death_reason = vm
+ .wait_for_death_with_timeout(Duration::from_secs(10))
+ .ok_or_else(|| anyhow!("Timed out waiting for VM exit"))?;
assert_eq!(death_reason, DeathReason::Shutdown);
Ok(())
diff --git a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
index 7ee2d39..90aac1e 100644
--- a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
+++ b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
@@ -122,37 +122,34 @@
final int trialCount = 10;
- double sum = 0;
- double squareSum = 0;
- double min = Double.MAX_VALUE;
- double max = Double.MIN_VALUE;
+ List<Double> bootTimeMetrics = new ArrayList<>();
+ List<Double> bootloaderTimeMetrics = new ArrayList<>();
+ List<Double> kernelBootTimeMetrics = new ArrayList<>();
+ List<Double> userspaceBootTimeMetrics = new ArrayList<>();
+
for (int i = 0; i < trialCount; i++) {
VirtualMachineConfig.Builder builder =
mInner.newVmConfigBuilder("assets/vm_config.json");
+
+ // To grab boot events from log, set debug mode to FULL
VirtualMachineConfig normalConfig =
- builder.debugLevel(DebugLevel.NONE).memoryMib(256).build();
+ builder.debugLevel(DebugLevel.FULL).memoryMib(256).build();
mInner.forceCreateNewVirtualMachine("test_vm_boot_time", normalConfig);
BootResult result = tryBootVm(TAG, "test_vm_boot_time");
assertThat(result.payloadStarted).isTrue();
- double elapsedMilliseconds = result.elapsedNanoTime / 1000000.0;
-
- sum += elapsedMilliseconds;
- squareSum += elapsedMilliseconds * elapsedMilliseconds;
- if (min > elapsedMilliseconds) min = elapsedMilliseconds;
- if (max < elapsedMilliseconds) max = elapsedMilliseconds;
+ final Double nanoToMilli = 1000000.0;
+ bootTimeMetrics.add(result.endToEndNanoTime / nanoToMilli);
+ bootloaderTimeMetrics.add(result.getBootloaderElapsedNanoTime() / nanoToMilli);
+ kernelBootTimeMetrics.add(result.getKernelElapsedNanoTime() / nanoToMilli);
+ userspaceBootTimeMetrics.add(result.getUserspaceElapsedNanoTime() / nanoToMilli);
}
- Bundle bundle = new Bundle();
- double average = sum / trialCount;
- double variance = squareSum / trialCount - average * average;
- double stdev = Math.sqrt(variance);
- bundle.putDouble("avf_perf/microdroid/boot_time_average_ms", average);
- bundle.putDouble("avf_perf/microdroid/boot_time_min_ms", min);
- bundle.putDouble("avf_perf/microdroid/boot_time_max_ms", max);
- bundle.putDouble("avf_perf/microdroid/boot_time_stdev_ms", stdev);
- mInstrumentation.sendStatus(0, bundle);
+ reportMetrics(bootTimeMetrics, "avf_perf/microdroid/boot_time_", "_ms");
+ reportMetrics(bootloaderTimeMetrics, "avf_perf/microdroid/bootloader_time_", "_ms");
+ reportMetrics(kernelBootTimeMetrics, "avf_perf/microdroid/kernel_boot_time_", "_ms");
+ reportMetrics(userspaceBootTimeMetrics, "avf_perf/microdroid/userspace_boot_time_", "_ms");
}
@Test
@@ -199,30 +196,36 @@
VirtioBlkVmEventListener listener = new VirtioBlkVmEventListener(readRates, isRand);
listener.runToFinish(TAG, vm);
}
- reportMetrics(readRates, isRand);
- }
- private void reportMetrics(List<Double> readRates, boolean isRand) {
- double sum = 0;
- for (double rate : readRates) {
- sum += rate;
- }
- double mean = sum / readRates.size();
- double sqSum = 0;
- for (double rate : readRates) {
- sqSum += (rate - mean) * (rate - mean);
- }
- double stdDev = Math.sqrt(sqSum / (readRates.size() - 1));
-
- Bundle bundle = new Bundle();
String metricNamePrefix =
"avf_perf/virtio-blk/"
+ (mProtectedVm ? "protected-vm/" : "unprotected-vm/")
+ (isRand ? "rand_read_" : "seq_read_");
String unit = "_mb_per_sec";
+ reportMetrics(readRates, metricNamePrefix, unit);
+ }
- bundle.putDouble(metricNamePrefix + "mean" + unit, mean);
- bundle.putDouble(metricNamePrefix + "std" + unit, stdDev);
+ private void reportMetrics(List<Double> data, String prefix, String suffix) {
+ double sum = 0;
+ double min = Double.MAX_VALUE;
+ double max = Double.MIN_VALUE;
+ for (double d : data) {
+ sum += d;
+ if (min > d) min = d;
+ if (max < d) max = d;
+ }
+ double avg = sum / data.size();
+ double sqSum = 0;
+ for (double d : data) {
+ sqSum += (d - avg) * (d - avg);
+ }
+ double stdDev = Math.sqrt(sqSum / (data.size() - 1));
+
+ Bundle bundle = new Bundle();
+ bundle.putDouble(prefix + "min" + suffix, min);
+ bundle.putDouble(prefix + "max" + suffix, max);
+ bundle.putDouble(prefix + "average" + suffix, avg);
+ bundle.putDouble(prefix + "stdev" + suffix, stdDev);
mInstrumentation.sendStatus(0, bundle);
}
diff --git a/tests/helper/src/java/com/android/microdroid/test/MicrodroidDeviceTestBase.java b/tests/helper/src/java/com/android/microdroid/test/MicrodroidDeviceTestBase.java
index 1f57634..84e189a 100644
--- a/tests/helper/src/java/com/android/microdroid/test/MicrodroidDeviceTestBase.java
+++ b/tests/helper/src/java/com/android/microdroid/test/MicrodroidDeviceTestBase.java
@@ -39,30 +39,13 @@
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
+import java.util.OptionalLong;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
public abstract class MicrodroidDeviceTestBase {
- /** Copy output from the VM to logcat. This is helpful when things go wrong. */
- protected static void logVmOutput(String tag, InputStream vmOutputStream, String name) {
- new Thread(
- () -> {
- try {
- BufferedReader reader =
- new BufferedReader(new InputStreamReader(vmOutputStream));
- String line;
- while ((line = reader.readLine()) != null
- && !Thread.interrupted()) {
- Log.i(tag, name + ": " + line);
- }
- } catch (Exception e) {
- Log.w(tag, name, e);
- }
- }).start();
- }
-
public static boolean isCuttlefish() {
return VirtualizationTestHelper.isCuttlefish(SystemProperties.get("ro.product.name"));
}
@@ -138,16 +121,84 @@
protected abstract static class VmEventListener implements VirtualMachineCallback {
private ExecutorService mExecutorService = Executors.newSingleThreadExecutor();
+ private OptionalLong mVcpuStartedNanoTime = OptionalLong.empty();
+ private OptionalLong mKernelStartedNanoTime = OptionalLong.empty();
+ private OptionalLong mInitStartedNanoTime = OptionalLong.empty();
+ private OptionalLong mPayloadStartedNanoTime = OptionalLong.empty();
+
+ private void processBootEvents(String log) {
+ if (!mVcpuStartedNanoTime.isPresent()) {
+ mVcpuStartedNanoTime = OptionalLong.of(System.nanoTime());
+ }
+ if (log.contains("Starting kernel") && !mKernelStartedNanoTime.isPresent()) {
+ mKernelStartedNanoTime = OptionalLong.of(System.nanoTime());
+ }
+ if (log.contains("Run /init as init process") && !mInitStartedNanoTime.isPresent()) {
+ mInitStartedNanoTime = OptionalLong.of(System.nanoTime());
+ }
+ if (log.contains("microdroid_manager") && log.contains("executing main task")
+ && !mPayloadStartedNanoTime.isPresent()) {
+ mPayloadStartedNanoTime = OptionalLong.of(System.nanoTime());
+ }
+ }
+
+ private void logVmOutputAndMonitorBootEvents(String tag,
+ InputStream vmOutputStream,
+ String name,
+ boolean monitorEvents) {
+ new Thread(
+ () -> {
+ try {
+ BufferedReader reader =
+ new BufferedReader(new InputStreamReader(vmOutputStream));
+ String line;
+ while ((line = reader.readLine()) != null
+ && !Thread.interrupted()) {
+ if (monitorEvents) processBootEvents(line);
+ Log.i(tag, name + ": " + line);
+ }
+ } catch (Exception e) {
+ Log.w(tag, name, e);
+ }
+ }).start();
+ }
+
+ private void logVmOutputAndMonitorBootEvents(String tag,
+ InputStream vmOutputStream,
+ String name) {
+ logVmOutputAndMonitorBootEvents(tag, vmOutputStream, name, true);
+ }
+
+ /** Copy output from the VM to logcat. This is helpful when things go wrong. */
+ protected void logVmOutput(String tag, InputStream vmOutputStream, String name) {
+ logVmOutputAndMonitorBootEvents(tag, vmOutputStream, name, false);
+ }
public void runToFinish(String logTag, VirtualMachine vm)
throws VirtualMachineException, InterruptedException {
vm.setCallback(mExecutorService, this);
vm.run();
- logVmOutput(logTag, vm.getConsoleOutputStream(), "Console");
+ logVmOutputAndMonitorBootEvents(logTag, vm.getConsoleOutputStream(), "Console");
logVmOutput(logTag, vm.getLogOutputStream(), "Log");
mExecutorService.awaitTermination(300, TimeUnit.SECONDS);
}
+ public OptionalLong getVcpuStartedNanoTime() {
+ return mVcpuStartedNanoTime;
+ }
+
+ public OptionalLong getKernelStartedNanoTime() {
+ return mKernelStartedNanoTime;
+ }
+
+ public OptionalLong getInitStartedNanoTime() {
+ return mInitStartedNanoTime;
+ }
+
+ public OptionalLong getPayloadStartedNanoTime() {
+ return mPayloadStartedNanoTime;
+ }
+
protected void forceStop(VirtualMachine vm) {
try {
vm.clearCallback();
@@ -183,12 +234,55 @@
public static class BootResult {
public final boolean payloadStarted;
public final int deathReason;
- public final long elapsedNanoTime;
+ public final long endToEndNanoTime;
- BootResult(boolean payloadStarted, int deathReason, long elapsedNanoTime) {
+ public final OptionalLong vcpuStartedNanoTime;
+ public final OptionalLong kernelStartedNanoTime;
+ public final OptionalLong initStartedNanoTime;
+ public final OptionalLong payloadStartedNanoTime;
+
+ BootResult(boolean payloadStarted,
+ int deathReason,
+ long endToEndNanoTime,
+ OptionalLong vcpuStartedNanoTime,
+ OptionalLong kernelStartedNanoTime,
+ OptionalLong initStartedNanoTime,
+ OptionalLong payloadStartedNanoTime) {
this.payloadStarted = payloadStarted;
this.deathReason = deathReason;
- this.elapsedNanoTime = elapsedNanoTime;
+ this.endToEndNanoTime = endToEndNanoTime;
+ this.vcpuStartedNanoTime = vcpuStartedNanoTime;
+ this.kernelStartedNanoTime = kernelStartedNanoTime;
+ this.initStartedNanoTime = initStartedNanoTime;
+ this.payloadStartedNanoTime = payloadStartedNanoTime;
+ }
+
+ private long getVcpuStartedNanoTime() {
+ return vcpuStartedNanoTime.getAsLong();
+ }
+
+ private long getKernelStartedNanoTime() {
+ return kernelStartedNanoTime.getAsLong();
+ }
+
+ private long getInitStartedNanoTime() {
+ return initStartedNanoTime.getAsLong();
+ }
+
+ private long getPayloadStartedNanoTime() {
+ return payloadStartedNanoTime.getAsLong();
+ }
+
+ public long getBootloaderElapsedNanoTime() {
+ return getKernelStartedNanoTime() - getVcpuStartedNanoTime();
+ }
+
+ public long getKernelElapsedNanoTime() {
+ return getInitStartedNanoTime() - getKernelStartedNanoTime();
+ }
+
+ public long getUserspaceElapsedNanoTime() {
+ return getPayloadStartedNanoTime() - getInitStartedNanoTime();
}
}
@@ -218,6 +312,10 @@
return new BootResult(
payloadStarted.getNow(false),
deathReason.getNow(DeathReason.INFRASTRUCTURE_ERROR),
- endTime.getNow(beginTime) - beginTime);
+ endTime.getNow(beginTime) - beginTime,
+ listener.getVcpuStartedNanoTime(),
+ listener.getKernelStartedNanoTime(),
+ listener.getInitStartedNanoTime(),
+ listener.getPayloadStartedNanoTime());
}
}
diff --git a/tests/hostside/Android.bp b/tests/hostside/Android.bp
index 26a9780..5ce19bd 100644
--- a/tests/hostside/Android.bp
+++ b/tests/hostside/Android.bp
@@ -23,8 +23,6 @@
":test.com.android.virt.pem",
":test2.com.android.virt.pem",
":test-payload-metadata",
- ":com.android.adbd{.apex}",
- ":com.android.os.statsd{.apex}",
],
data_native_bins: [
"sepolicy-analyze",
diff --git a/tests/hostside/helper/java/com/android/microdroid/test/MicrodroidHostTestCaseBase.java b/tests/hostside/helper/java/com/android/microdroid/test/MicrodroidHostTestCaseBase.java
index 0712323..2992a43 100644
--- a/tests/hostside/helper/java/com/android/microdroid/test/MicrodroidHostTestCaseBase.java
+++ b/tests/hostside/helper/java/com/android/microdroid/test/MicrodroidHostTestCaseBase.java
@@ -50,6 +50,7 @@
protected static final String TEST_ROOT = "/data/local/tmp/virt/";
protected static final String VIRT_APEX = "/apex/com.android.virt/";
protected static final String LOG_PATH = TEST_ROOT + "log.txt";
+ protected static final String CONSOLE_PATH = TEST_ROOT + "console.txt";
private static final int TEST_VM_ADB_PORT = 8000;
private static final String MICRODROID_SERIAL = "localhost:" + TEST_VM_ADB_PORT;
private static final String INSTANCE_IMG = "instance.img";
@@ -258,6 +259,17 @@
memoryMib, numCpus, cpuAffinity);
}
+ private static void forwardFileToLog(CommandRunner android, String path, String tag)
+ throws DeviceNotAvailableException {
+ android.runWithTimeout(
+ MICRODROID_MAX_LIFETIME_MINUTES * 60 * 1000,
+ "logwrapper",
+ "sh",
+ "-c",
+ "\"$'tail -f -n +0 " + path
+ + " | sed \\'s/^/" + tag + ": /g\\''\""); // add tags in front of lines
+ }
+
public static String startMicrodroid(
ITestDevice androidDevice,
IBuildInfo buildInfo,
@@ -290,6 +302,7 @@
final String instanceImg = TEST_ROOT + INSTANCE_IMG;
final String logPath = LOG_PATH;
+ final String consolePath = CONSOLE_PATH;
final String debugFlag = debug ? "--debug full" : "";
// Run the VM
@@ -298,6 +311,7 @@
"run-app",
"--daemonize",
"--log " + logPath,
+ "--console " + consolePath,
"--mem " + memoryMib,
numCpus.isPresent() ? "--cpus " + numCpus.get() : "",
cpuAffinity.isPresent() ? "--cpu-affinity " + cpuAffinity.get() : "",
@@ -314,22 +328,25 @@
}
String ret = android.run(args.toArray(new String[0]));
- // Redirect log.txt to logd using logwrapper
- ExecutorService executor = Executors.newFixedThreadPool(1);
+ // Redirect log.txt and console.txt to logd using logwrapper
+ // Keep redirecting as long as the expecting maximum test time. When an adb
+ // command times out, it may trigger the device recovery process, which
+ // disconnect adb, which terminates any live adb commands. See an example at
+ // b/194974010#comment25.
+ ExecutorService executor = Executors.newFixedThreadPool(2);
executor.execute(
() -> {
try {
- // Keep redirecting as long as the expecting maximum test time. When an adb
- // command times out, it may trigger the device recovery process, which
- // disconnect adb, which terminates any live adb commands. See an example at
- // b/194974010#comment25.
- android.runWithTimeout(
- MICRODROID_MAX_LIFETIME_MINUTES * 60 * 1000,
- "logwrapper",
- "tail",
- "-f",
- "-n +0",
- logPath);
+ forwardFileToLog(android, logPath, "MicrodroidLog");
+ } catch (Exception e) {
+ // Consume
+ }
+ });
+
+ executor.execute(
+ () -> {
+ try {
+ forwardFileToLog(android, consolePath, "MicrodroidConsole");
} catch (Exception e) {
// Consume
}
diff --git a/tests/hostside/java/com/android/microdroid/test/MicrodroidTestCase.java b/tests/hostside/java/com/android/microdroid/test/MicrodroidTestCase.java
index 6b8003c..722ec74 100644
--- a/tests/hostside/java/com/android/microdroid/test/MicrodroidTestCase.java
+++ b/tests/hostside/java/com/android/microdroid/test/MicrodroidTestCase.java
@@ -40,6 +40,7 @@
import com.android.tradefed.util.CommandResult;
import com.android.tradefed.util.FileUtil;
import com.android.tradefed.util.RunUtil;
+import com.android.tradefed.util.xml.AbstractXmlParser;
import org.json.JSONArray;
import org.json.JSONException;
@@ -50,7 +51,10 @@
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
+import org.xml.sax.Attributes;
+import org.xml.sax.helpers.DefaultHandler;
+import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
@@ -181,9 +185,55 @@
}
}
+ static class ActiveApexInfo {
+ public String name;
+ public String path;
+ ActiveApexInfo(String name, String path) {
+ this.name = name;
+ this.path = path;
+ }
+ }
+
+ static class ActiveApexInfoList {
+ private List<ActiveApexInfo> mList;
+ ActiveApexInfoList(List<ActiveApexInfo> list) {
+ this.mList = list;
+ }
+ ActiveApexInfo get(String apexName) {
+ for (ActiveApexInfo info: mList) {
+ if (info.name.equals(apexName)) {
+ return info;
+ }
+ }
+ return null;
+ }
+ }
+
+ private ActiveApexInfoList getActiveApexInfoList() throws Exception {
+ String apexInfoListXml = getDevice().pullFileContents("/apex/apex-info-list.xml");
+ List<ActiveApexInfo> list = new ArrayList<>();
+ new AbstractXmlParser() {
+ @Override
+ protected DefaultHandler createXmlHandler() {
+ return new DefaultHandler() {
+ @Override
+ public void startElement(String uri, String localName, String qName,
+ Attributes attributes) {
+ if (localName.equals("apex-info")
+ && attributes.getValue("isActive").equals("true")) {
+ list.add(new ActiveApexInfo(attributes.getValue("moduleName"),
+ attributes.getValue("modulePath")));
+ }
+ }
+ };
+ }
+ }.parse(new ByteArrayInputStream(apexInfoListXml.getBytes()));
+ return new ActiveApexInfoList(list);
+ }
+
private String runMicrodroidWithResignedImages(File key, Map<String, File> keyOverrides,
boolean isProtected, boolean daemonize, String consolePath)
- throws DeviceNotAvailableException, IOException, JSONException {
+ throws Exception {
CommandRunner android = new CommandRunner(getDevice());
File virtApexDir = FileUtil.createTempDir("virt_apex");
@@ -213,11 +263,10 @@
final String payloadMetadataPath = TEST_ROOT + "payload-metadata.img";
getDevice().pushFile(findTestFile("test-payload-metadata.img"), payloadMetadataPath);
- // push APEXes required for the VM.
- final String statsdApexPath = TEST_ROOT + "com.android.os.statsd.apex";
- final String adbdApexPath = TEST_ROOT + "com.android.adbd.apex";
- getDevice().pushFile(findTestFile("com.android.os.statsd.apex"), statsdApexPath);
- getDevice().pushFile(findTestFile("com.android.adbd.apex"), adbdApexPath);
+ // get paths to the two APEXes required for the VM.
+ ActiveApexInfoList list = getActiveApexInfoList();
+ final String statsdApexPath = list.get("com.android.os.statsd").path;
+ final String adbdApexPath = list.get("com.android.adbd").path;
// Since Java APP can't start a VM with a custom image, here, we start a VM using `vm run`
// command with a VM Raw config which is equiv. to what virtualizationservice creates with
diff --git a/virtualizationservice/Android.bp b/virtualizationservice/Android.bp
index 0c9496a..0a5436b 100644
--- a/virtualizationservice/Android.bp
+++ b/virtualizationservice/Android.bp
@@ -26,7 +26,6 @@
"libandroid_logger",
"libanyhow",
"libbinder_common",
- "libbinder_rpc_unstable_bindgen",
"libbinder_rs",
"libcommand_fds",
"libdisk",
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index e2e76d5..cc8d8a3 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -19,7 +19,6 @@
use crate::payload::add_microdroid_images;
use crate::{Cid, FIRST_GUEST_CID, SYSPROP_LAST_CID};
use crate::selinux::{SeContext, getfilecon};
-use ::binder::unstable_api::AsNative;
use android_os_permissions_aidl::aidl::android::os::IPermissionController;
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
DeathReason::DeathReason,
@@ -36,8 +35,8 @@
VirtualMachineState::VirtualMachineState,
};
use android_system_virtualizationservice::binder::{
- self, BinderFeatures, ExceptionCode, Interface, ParcelFileDescriptor, Status, StatusCode, Strong,
- ThreadState,
+ self, BinderFeatures, ExceptionCode, Interface, ParcelFileDescriptor, SpIBinder, Status,
+ StatusCode, Strong, ThreadState,
};
use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::{
IVirtualMachineService::{
@@ -46,7 +45,7 @@
},
};
use anyhow::{anyhow, bail, Context, Result};
-use binder_common::{lazy_service::LazyServiceGuard, new_binder_exception};
+use binder_common::{lazy_service::LazyServiceGuard, new_binder_exception, rpc_server::run_rpc_server_with_factory};
use disk::QcowFile;
use idsig::{HashAlgorithm, V4Signature};
use log::{debug, error, info, warn, trace};
@@ -59,10 +58,8 @@
use std::fs::{create_dir, File, OpenOptions};
use std::io::{Error, ErrorKind, Write, Read};
use std::num::NonZeroU32;
-use std::os::raw;
use std::os::unix::io::{FromRawFd, IntoRawFd};
use std::path::{Path, PathBuf};
-use std::ptr::null_mut;
use std::sync::{Arc, Mutex, Weak};
use tombstoned_client::{TombstonedConnection, DebuggerdDumpType};
use vmconfig::VmConfig;
@@ -330,28 +327,16 @@
// binder server for vm
// reference to state (not the state itself) is copied
- let mut state = service.state.clone();
+ let state = service.state.clone();
std::thread::spawn(move || {
- let state_ptr = &mut state as *mut _ as *mut raw::c_void;
-
- debug!("virtual machine service is starting as an RPC service.");
- // SAFETY: factory function is only ever called by RunRpcServerWithFactory, within the
- // lifetime of the state, with context taking the pointer value above (so a properly
- // aligned non-null pointer to an initialized instance).
- let retval = unsafe {
- binder_rpc_unstable_bindgen::RunRpcServerWithFactory(
- Some(VirtualMachineService::factory),
- state_ptr,
- VM_BINDER_SERVICE_PORT as u32,
- )
- };
- if retval {
+ debug!("VirtualMachineService is starting as an RPC service.");
+ if run_rpc_server_with_factory(VM_BINDER_SERVICE_PORT as u32, |cid| {
+ VirtualMachineService::factory(cid, &state)
+ }) {
debug!("RPC server has shut down gracefully");
} else {
- bail!("Premature termination of RPC server");
+ panic!("Premature termination of RPC server");
}
-
- Ok(retval)
});
service
}
@@ -1143,20 +1128,14 @@
}
impl VirtualMachineService {
- // SAFETY: Service ownership is held by state, and the binder objects are threadsafe.
- pub unsafe extern "C" fn factory(
- cid: Cid,
- context: *mut raw::c_void,
- ) -> *mut binder_rpc_unstable_bindgen::AIBinder {
- let state_ptr = context as *mut Arc<Mutex<State>>;
- let state = state_ptr.as_ref().unwrap();
+ fn factory(cid: Cid, state: &Arc<Mutex<State>>) -> Option<SpIBinder> {
if let Some(vm) = state.lock().unwrap().get_vm(cid) {
let mut vm_service = vm.vm_service.lock().unwrap();
let service = vm_service.get_or_insert_with(|| Self::new_binder(state.clone(), cid));
- service.as_binder().as_native_mut() as *mut binder_rpc_unstable_bindgen::AIBinder
+ Some(service.as_binder())
} else {
error!("connection from cid={} is not from a guest VM", cid);
- null_mut()
+ None
}
}
diff --git a/vmclient/Android.bp b/vmclient/Android.bp
index 8ad5adf..c219198 100644
--- a/vmclient/Android.bp
+++ b/vmclient/Android.bp
@@ -9,7 +9,7 @@
edition: "2021",
rustlibs: [
"android.system.virtualizationservice-rust",
- "libbinder_rpc_unstable_bindgen",
+ "libbinder_common",
"libbinder_rs",
"liblog_rust",
"libthiserror",
diff --git a/vmclient/src/errors.rs b/vmclient/src/errors.rs
index 43db7f9..231f81f 100644
--- a/vmclient/src/errors.rs
+++ b/vmclient/src/errors.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use super::DeathReason;
-use android_system_virtualizationservice::binder::StatusCode;
use thiserror::Error;
/// An error while waiting for a VM to do something.
@@ -32,14 +31,3 @@
#[error("VM payload finished.")]
Finished,
}
-
-/// An error connecting to a VM RPC Binder service.
-#[derive(Clone, Debug, Eq, Error, PartialEq)]
-pub enum ConnectServiceError {
- /// The RPC binder connection failed.
- #[error("Vsock connection to RPC binder failed.")]
- ConnectionFailed,
- /// The AIDL service type didn't match.
- #[error("Service type didn't match ({0}).")]
- WrongServiceType(StatusCode),
-}
diff --git a/vmclient/src/lib.rs b/vmclient/src/lib.rs
index 9b5b8dd..b3bb635 100644
--- a/vmclient/src/lib.rs
+++ b/vmclient/src/lib.rs
@@ -16,12 +16,11 @@
mod death_reason;
mod errors;
-mod rpc_binder;
mod sync;
pub use crate::death_reason::DeathReason;
-pub use crate::errors::{ConnectServiceError, VmWaitError};
-use crate::{rpc_binder::VsockFactory, sync::Monitor};
+pub use crate::errors::VmWaitError;
+use crate::sync::Monitor;
use android_system_virtualizationservice::{
aidl::android::system::virtualizationservice::{
DeathReason::DeathReason as AidlDeathReason,
@@ -36,10 +35,12 @@
ParcelFileDescriptor, Result as BinderResult, StatusCode, Strong,
},
};
+use binder_common::rpc_client::connect_preconnected_rpc_binder;
use log::warn;
use std::{
fmt::{self, Debug, Formatter},
fs::File,
+ os::unix::io::IntoRawFd,
sync::Arc,
time::Duration,
};
@@ -145,12 +146,19 @@
pub fn connect_service<T: FromIBinder + ?Sized>(
&self,
port: u32,
- ) -> Result<Strong<T>, ConnectServiceError> {
- let mut vsock_factory = VsockFactory::new(&*self.vm, port);
-
- let ibinder = vsock_factory.connect_rpc_client()?;
-
- FromIBinder::try_from(ibinder).map_err(ConnectServiceError::WrongServiceType)
+ ) -> Result<Strong<T>, StatusCode> {
+ connect_preconnected_rpc_binder(|| {
+ match self.vm.connectVsock(port as i32) {
+ Ok(vsock) => {
+ // Ownership of the fd is transferred to binder
+ Some(vsock.into_raw_fd())
+ }
+ Err(e) => {
+ warn!("Vsock connection failed: {}", e);
+ None
+ }
+ }
+ })
}
/// Get ramdump
diff --git a/vmclient/src/rpc_binder.rs b/vmclient/src/rpc_binder.rs
deleted file mode 100644
index 7c2992b..0000000
--- a/vmclient/src/rpc_binder.rs
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2022, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use crate::errors::ConnectServiceError;
-use android_system_virtualizationservice::{
- aidl::android::system::virtualizationservice::IVirtualMachine::IVirtualMachine,
-};
-use binder::unstable_api::{new_spibinder, AIBinder};
-use log::warn;
-use std::os::{raw, unix::io::IntoRawFd};
-
-pub struct VsockFactory<'a> {
- vm: &'a dyn IVirtualMachine,
- port: u32,
-}
-
-impl<'a> VsockFactory<'a> {
- pub fn new(vm: &'a dyn IVirtualMachine, port: u32) -> Self {
- Self { vm, port }
- }
-
- pub fn connect_rpc_client(&mut self) -> Result<binder::SpIBinder, ConnectServiceError> {
- let param = self.as_void_ptr();
-
- unsafe {
- // SAFETY: AIBinder returned by RpcPreconnectedClient has correct reference count, and
- // the ownership can be safely taken by new_spibinder.
- // RpcPreconnectedClient does not take ownership of param, only passing it to
- // request_fd.
- let binder =
- binder_rpc_unstable_bindgen::RpcPreconnectedClient(Some(Self::request_fd), param)
- as *mut AIBinder;
- new_spibinder(binder).ok_or(ConnectServiceError::ConnectionFailed)
- }
- }
-
- fn as_void_ptr(&mut self) -> *mut raw::c_void {
- self as *mut _ as *mut raw::c_void
- }
-
- fn new_vsock_fd(&self) -> i32 {
- match self.vm.connectVsock(self.port as i32) {
- Ok(vsock) => {
- // Ownership of the fd is transferred to binder
- vsock.into_raw_fd()
- }
- Err(e) => {
- warn!("Vsock connection failed: {}", e);
- -1
- }
- }
- }
-
- unsafe extern "C" fn request_fd(param: *mut raw::c_void) -> raw::c_int {
- // SAFETY: This is only ever called by RpcPreconnectedClient, within the lifetime of the
- // VsockFactory, with param taking the value returned by as_void_ptr (so a properly aligned
- // non-null pointer to an initialized instance).
- let vsock_factory = param as *mut Self;
- vsock_factory.as_ref().unwrap().new_vsock_fd()
- }
-}