Move VM callback to vmclient
Instead of having clients directly register a callback with VS,
implement a Rust level callback interface in vmclient. This saves an
extra binder call on each notification, a bunch of boilerplate code,
and allows us to provide a slightly better interface (e.g. we can use
the Rust DeathReason enum, as elsewhere in vmclient, for instantly
better logging).
I also replaced all our usages of <some_interface>::binder::{...} with
direct access to binder::{...}. That makes it clearer what depends on
the interface itself and what is just generic binder code. I realise
this should be a separate change, but I only realised that after doing
bits of both.
Test: composd_cmd test-compile, observe logs (on both success & failure)
Test: atest -b (to make sure all our tests build)
Test: Presubmits
Change-Id: Iceda8d7b8f8008f9d7a2c51106c2794f09bb378e
diff --git a/compos/common/Android.bp b/compos/common/Android.bp
index 0773652..23a1eb9 100644
--- a/compos/common/Android.bp
+++ b/compos/common/Android.bp
@@ -11,6 +11,7 @@
"android.system.virtualizationservice-rust",
"compos_aidl_interface-rust",
"libanyhow",
+ "libbinder_rs",
"liblazy_static",
"liblog_rust",
"libnested_virt",
diff --git a/compos/common/binder.rs b/compos/common/binder.rs
index 59726c0..d3550f7 100644
--- a/compos/common/binder.rs
+++ b/compos/common/binder.rs
@@ -16,7 +16,7 @@
//! Helper for converting Error types to what Binder expects
-use android_system_virtualizationservice::binder::{Result as BinderResult, Status};
+use binder::{Result as BinderResult, Status};
use log::warn;
use std::fmt::Debug;
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index cd1ece4..770f489 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -19,16 +19,12 @@
use crate::timeouts::TIMEOUTS;
use crate::{COMPOS_APEX_ROOT, COMPOS_DATA_ROOT, COMPOS_VSOCK_PORT, DEFAULT_VM_CONFIG_PATH};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
- DeathReason::DeathReason,
- IVirtualMachineCallback::{BnVirtualMachineCallback, IVirtualMachineCallback},
IVirtualizationService::IVirtualizationService,
VirtualMachineAppConfig::{DebugLevel::DebugLevel, VirtualMachineAppConfig},
VirtualMachineConfig::VirtualMachineConfig,
};
-use android_system_virtualizationservice::binder::{
- BinderFeatures, Interface, ParcelFileDescriptor, Result as BinderResult, Strong,
-};
use anyhow::{bail, Context, Result};
+use binder::{ParcelFileDescriptor, Strong};
use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
use log::{info, warn};
use rustutils::system_properties;
@@ -37,7 +33,7 @@
use std::num::NonZeroU32;
use std::path::{Path, PathBuf};
use std::thread;
-use vmclient::{VmInstance, VmWaitError};
+use vmclient::{DeathReason, VmInstance, VmWaitError};
/// This owns an instance of the CompOS VM.
pub struct ComposClient(VmInstance);
@@ -119,13 +115,10 @@
taskProfiles: parameters.task_profiles.clone(),
});
- let instance = VmInstance::create(service, &config, console_fd, log_fd)
+ let callback = Box::new(Callback {});
+ let instance = VmInstance::create(service, &config, console_fd, log_fd, Some(callback))
.context("Failed to create VM")?;
- let callback =
- BnVirtualMachineCallback::new_binder(VmCallback(), BinderFeatures::default());
- instance.vm.registerCallback(&callback)?;
-
instance.start()?;
let ready = instance.wait_until_ready(TIMEOUTS.vm_max_time_to_ready);
@@ -163,7 +156,7 @@
fn wait_for_shutdown(self) {
let death_reason = self.0.wait_for_death_with_timeout(TIMEOUTS.vm_max_time_to_exit);
match death_reason {
- Some(vmclient::DeathReason::Shutdown) => info!("VM has exited normally"),
+ Some(DeathReason::Shutdown) => info!("VM has exited normally"),
Some(reason) => warn!("VM died with reason {:?}", reason),
None => warn!("VM failed to exit, dropping"),
}
@@ -228,53 +221,36 @@
bail!("No VM support available")
}
-struct VmCallback();
-
-impl Interface for VmCallback {}
-
-impl IVirtualMachineCallback for VmCallback {
- fn onDied(&self, cid: i32, reason: DeathReason) -> BinderResult<()> {
- log::warn!("VM died, cid = {}, reason = {:?}", cid, reason);
- Ok(())
- }
-
- fn onPayloadStarted(
- &self,
- cid: i32,
- stream: Option<&ParcelFileDescriptor>,
- ) -> BinderResult<()> {
- if let Some(pfd) = stream {
- if let Err(e) = start_logging(pfd) {
+struct Callback {}
+impl vmclient::VmCallback for Callback {
+ fn on_payload_started(&self, cid: i32, stream: Option<&File>) {
+ if let Some(file) = stream {
+ if let Err(e) = start_logging(file) {
warn!("Can't log vm output: {}", e);
};
}
log::info!("VM payload started, cid = {}", cid);
- Ok(())
}
- fn onPayloadReady(&self, cid: i32) -> BinderResult<()> {
+ fn on_payload_ready(&self, cid: i32) {
log::info!("VM payload ready, cid = {}", cid);
- Ok(())
}
- fn onPayloadFinished(&self, cid: i32, exit_code: i32) -> BinderResult<()> {
+ fn on_payload_finished(&self, cid: i32, exit_code: i32) {
log::warn!("VM payload finished, cid = {}, exit code = {}", cid, exit_code);
- Ok(())
}
- fn onError(&self, cid: i32, error_code: i32, message: &str) -> BinderResult<()> {
- log::warn!("VM error, cid = {}, error code = {}, message = {}", cid, error_code, message,);
- Ok(())
+ fn on_error(&self, cid: i32, error_code: i32, message: &str) {
+ log::warn!("VM error, cid = {}, error code = {}, message = {}", cid, error_code, message);
}
- fn onRamdump(&self, _cid: i32, _ramdump: &ParcelFileDescriptor) -> BinderResult<()> {
- // TODO(b/238295267) send this to tombstone?
- Ok(())
+ fn on_died(&self, cid: i32, death_reason: DeathReason) {
+ log::warn!("VM died, cid = {}, reason = {:?}", cid, death_reason);
}
}
-fn start_logging(pfd: &ParcelFileDescriptor) -> Result<()> {
- let reader = BufReader::new(pfd.as_ref().try_clone().context("Cloning fd failed")?);
+fn start_logging(file: &File) -> Result<()> {
+ let reader = BufReader::new(file.try_clone().context("Cloning file failed")?);
thread::spawn(move || {
for line in reader.lines() {
match line {