Factor out Rust client library for VirtualizationService.
This reduces code duplication, and will also be useful for Rust tests.
Test: ComposHostTestCases compos_key_tests
Change-Id: I13c41d3b2bbe506495b723e7739f3181cb033f0f
diff --git a/compos/common/Android.bp b/compos/common/Android.bp
index 39e7c0a..0377474 100644
--- a/compos/common/Android.bp
+++ b/compos/common/Android.bp
@@ -17,6 +17,7 @@
"liblog_rust",
"libnum_traits",
"librustutils",
+ "libvmclient",
],
proc_macros: ["libnum_derive"],
shared_libs: [
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index 839280c..15f74cd 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -27,8 +27,7 @@
VirtualMachineConfig::VirtualMachineConfig,
};
use android_system_virtualizationservice::binder::{
- wait_for_interface, BinderFeatures, DeathRecipient, IBinder, Interface, ParcelFileDescriptor,
- Result as BinderResult, Strong,
+ BinderFeatures, Interface, ParcelFileDescriptor, Result as BinderResult, Strong,
};
use anyhow::{anyhow, bail, Context, Result};
use binder::{
@@ -44,15 +43,11 @@
use std::os::raw;
use std::os::unix::io::IntoRawFd;
use std::path::{Path, PathBuf};
-use std::sync::{Arc, Condvar, Mutex};
use std::thread;
+use vmclient::VmInstance;
/// This owns an instance of the CompOS VM.
-pub struct VmInstance {
- #[allow(dead_code)] // Keeps the VM alive even if we don`t touch it
- vm: Strong<dyn IVirtualMachine>,
- cid: i32,
-}
+pub struct ComposClient(VmInstance);
/// Parameters to be used when creating a virtual machine instance.
#[derive(Default, Debug, Clone)]
@@ -74,14 +69,7 @@
pub never_log: bool,
}
-impl VmInstance {
- /// Return a new connection to the Virtualization Service binder interface. This will start the
- /// service if necessary.
- pub fn connect_to_virtualization_service() -> Result<Strong<dyn IVirtualizationService>> {
- wait_for_interface::<dyn IVirtualizationService>("android.system.virtualizationservice")
- .context("Failed to find VirtualizationService")
- }
-
+impl ComposClient {
/// Start a new CompOS VM instance using the specified instance image file and parameters.
pub fn start(
service: &dyn IVirtualizationService,
@@ -89,7 +77,7 @@
idsig: &Path,
idsig_manifest_apk: &Path,
parameters: &VmParameters,
- ) -> Result<VmInstance> {
+ ) -> Result<Self> {
let protected_vm = want_protected_vm()?;
let instance_fd = ParcelFileDescriptor::new(instance_image);
@@ -121,8 +109,6 @@
.context("Failed to create console log file")?;
let log_fd = File::create(data_dir.join("vm.log"))
.context("Failed to create system log file")?;
- let console_fd = ParcelFileDescriptor::new(console_fd);
- let log_fd = ParcelFileDescriptor::new(log_fd);
info!("Running in debug level {:?}", debug_level);
(Some(console_fd), Some(log_fd))
};
@@ -142,31 +128,18 @@
taskProfiles: parameters.task_profiles.clone(),
});
- let vm = service
- .createVm(&config, console_fd.as_ref(), log_fd.as_ref())
+ let instance = VmInstance::create(service, &config, console_fd, log_fd)
.context("Failed to create VM")?;
- let vm_state = Arc::new(VmStateMonitor::default());
- let vm_state_clone = Arc::clone(&vm_state);
- let mut death_recipient = DeathRecipient::new(move || {
- vm_state_clone.set_died();
- log::error!("VirtualizationService died");
- });
- // Note that dropping death_recipient cancels this, so we can't use a temporary here.
- vm.as_binder().link_to_death(&mut death_recipient)?;
+ let callback =
+ BnVirtualMachineCallback::new_binder(VmCallback(), BinderFeatures::default());
+ instance.vm.registerCallback(&callback)?;
- let vm_state_clone = Arc::clone(&vm_state);
- let callback = BnVirtualMachineCallback::new_binder(
- VmCallback(vm_state_clone),
- BinderFeatures::default(),
- );
- vm.registerCallback(&callback)?;
+ instance.start()?;
- vm.start()?;
+ instance.wait_until_ready(timeouts()?.vm_max_time_to_ready)?;
- let cid = vm_state.wait_until_ready()?;
-
- Ok(VmInstance { vm, cid })
+ Ok(Self(instance))
}
fn locate_config_apk(apex_dir: &Path) -> Result<PathBuf> {
@@ -186,7 +159,7 @@
/// Create and return an RPC Binder connection to the Comp OS service in the VM.
pub fn get_service(&self) -> Result<Strong<dyn ICompOsService>> {
- let mut vsock_factory = VsockFactory::new(&*self.vm);
+ let mut vsock_factory = VsockFactory::new(&*self.0.vm);
let ibinder = vsock_factory
.connect_rpc_client()
@@ -194,12 +167,6 @@
FromIBinder::try_from(ibinder).context("Connecting to CompOS service")
}
-
- /// Return the CID of the VM.
- pub fn cid(&self) -> i32 {
- // TODO: Do we actually need/use this?
- self.cid
- }
}
fn prepare_idsig(
@@ -295,67 +262,12 @@
}
}
-#[derive(Debug, Default)]
-struct VmState {
- has_died: bool,
- cid: Option<i32>,
-}
-
-#[derive(Debug)]
-struct VmStateMonitor {
- mutex: Mutex<VmState>,
- state_ready: Condvar,
-}
-
-impl Default for VmStateMonitor {
- fn default() -> Self {
- Self { mutex: Mutex::new(Default::default()), state_ready: Condvar::new() }
- }
-}
-
-impl VmStateMonitor {
- fn set_died(&self) {
- let mut state = self.mutex.lock().unwrap();
- state.has_died = true;
- state.cid = None;
- drop(state); // Unlock the mutex prior to notifying
- self.state_ready.notify_all();
- }
-
- fn set_ready(&self, cid: i32) {
- let mut state = self.mutex.lock().unwrap();
- if state.has_died {
- return;
- }
- state.cid = Some(cid);
- drop(state); // Unlock the mutex prior to notifying
- self.state_ready.notify_all();
- }
-
- fn wait_until_ready(&self) -> Result<i32> {
- let (state, result) = self
- .state_ready
- .wait_timeout_while(
- self.mutex.lock().unwrap(),
- timeouts()?.vm_max_time_to_ready,
- |state| state.cid.is_none() && !state.has_died,
- )
- .unwrap();
- if result.timed_out() {
- bail!("Timed out waiting for VM")
- }
- state.cid.ok_or_else(|| anyhow!("VM died"))
- }
-}
-
-#[derive(Debug)]
-struct VmCallback(Arc<VmStateMonitor>);
+struct VmCallback();
impl Interface for VmCallback {}
impl IVirtualMachineCallback for VmCallback {
fn onDied(&self, cid: i32, reason: DeathReason) -> BinderResult<()> {
- self.0.set_died();
log::warn!("VM died, cid = {}, reason = {:?}", cid, reason);
Ok(())
}
@@ -375,21 +287,16 @@
}
fn onPayloadReady(&self, cid: i32) -> BinderResult<()> {
- self.0.set_ready(cid);
log::info!("VM payload ready, cid = {}", cid);
Ok(())
}
fn onPayloadFinished(&self, cid: i32, exit_code: i32) -> BinderResult<()> {
- // This should probably never happen in our case, but if it does we means our VM is no
- // longer running
- self.0.set_died();
log::warn!("VM payload finished, cid = {}, exit code = {}", cid, exit_code);
Ok(())
}
fn onError(&self, cid: i32, error_code: i32, message: &str) -> BinderResult<()> {
- self.0.set_died();
log::warn!("VM error, cid = {}, error code = {}, message = {}", cid, error_code, message,);
Ok(())
}
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index 55a3107..3a6119f 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -24,6 +24,7 @@
"liblog_rust",
"librustutils",
"libshared_child",
+ "libvmclient",
],
apex_available: [
"com.android.compos",
diff --git a/compos/composd/src/composd_main.rs b/compos/composd/src/composd_main.rs
index d1b711d..ebcd689 100644
--- a/compos/composd/src/composd_main.rs
+++ b/compos/composd/src/composd_main.rs
@@ -27,7 +27,6 @@
use crate::instance_manager::InstanceManager;
use android_system_composd::binder::{register_lazy_service, ProcessState};
use anyhow::{Context, Result};
-use compos_common::compos_client::VmInstance;
use log::{error, info};
use std::panic;
use std::sync::Arc;
@@ -46,7 +45,8 @@
ProcessState::start_thread_pool();
- let virtualization_service = VmInstance::connect_to_virtualization_service()?;
+ let virtualization_service =
+ vmclient::connect().context("Failed to find VirtualizationService")?;
let instance_manager = Arc::new(InstanceManager::new(virtualization_service));
let composd_service = service::new_binder(instance_manager);
register_lazy_service("android.system.composd", composd_service.as_binder())
diff --git a/compos/composd/src/instance_starter.rs b/compos/composd/src/instance_starter.rs
index f899497..340e8b7 100644
--- a/compos/composd/src/instance_starter.rs
+++ b/compos/composd/src/instance_starter.rs
@@ -24,7 +24,7 @@
use binder_common::lazy_service::LazyServiceGuard;
use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
use compos_aidl_interface::binder::{ParcelFileDescriptor, Strong};
-use compos_common::compos_client::{VmInstance, VmParameters};
+use compos_common::compos_client::{ComposClient, VmParameters};
use compos_common::{COMPOS_DATA_ROOT, IDSIG_FILE, IDSIG_MANIFEST_APK_FILE, INSTANCE_IMAGE_FILE};
use log::info;
use std::fs;
@@ -33,7 +33,7 @@
pub struct CompOsInstance {
service: Strong<dyn ICompOsService>,
#[allow(dead_code)] // Keeps VirtualizationService & the VM alive
- vm_instance: VmInstance,
+ vm_instance: ComposClient,
#[allow(dead_code)] // Keeps composd process alive
lazy_service_guard: LazyServiceGuard,
}
@@ -105,7 +105,7 @@
.write(true)
.open(&self.instance_image)
.context("Failed to open instance image")?;
- let vm_instance = VmInstance::start(
+ let vm_instance = ComposClient::start(
virtualization_service,
instance_image,
&self.idsig,
diff --git a/compos/verify/Android.bp b/compos/verify/Android.bp
index d6875d1..5c74e4f 100644
--- a/compos/verify/Android.bp
+++ b/compos/verify/Android.bp
@@ -15,6 +15,7 @@
"libcompos_common",
"libcompos_verify_native_rust",
"liblog_rust",
+ "libvmclient",
],
prefer_rlib: true,
apex_available: [
diff --git a/compos/verify/verify.rs b/compos/verify/verify.rs
index 14ce798..7a22cfd 100644
--- a/compos/verify/verify.rs
+++ b/compos/verify/verify.rs
@@ -20,7 +20,7 @@
use android_logger::LogId;
use anyhow::{bail, Context, Result};
use compos_aidl_interface::binder::ProcessState;
-use compos_common::compos_client::{VmInstance, VmParameters};
+use compos_common::compos_client::{ComposClient, VmParameters};
use compos_common::odrefresh::{
CURRENT_ARTIFACTS_SUBDIR, ODREFRESH_OUTPUT_ROOT_DIR, PENDING_ARTIFACTS_SUBDIR,
TEST_ARTIFACTS_SUBDIR,
@@ -98,8 +98,8 @@
// We need to start the thread pool to be able to receive Binder callbacks
ProcessState::start_thread_pool();
- let virtualization_service = VmInstance::connect_to_virtualization_service()?;
- let vm_instance = VmInstance::start(
+ let virtualization_service = vmclient::connect()?;
+ let vm_instance = ComposClient::start(
&*virtualization_service,
instance_image,
&idsig,
diff --git a/vm/Android.bp b/vm/Android.bp
index d1d53d0..2b83ca7 100644
--- a/vm/Android.bp
+++ b/vm/Android.bp
@@ -20,6 +20,7 @@
"libserde",
"libstructopt",
"libvmconfig",
+ "libvmclient",
"libzip",
],
apex_available: [
diff --git a/vm/src/main.rs b/vm/src/main.rs
index 705e38f..8450b41 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -17,13 +17,12 @@
mod create_idsig;
mod create_partition;
mod run;
-mod sync;
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
IVirtualizationService::IVirtualizationService, PartitionType::PartitionType,
VirtualMachineAppConfig::DebugLevel::DebugLevel,
};
-use android_system_virtualizationservice::binder::{wait_for_interface, ProcessState, Strong};
+use android_system_virtualizationservice::binder::ProcessState;
use anyhow::{Context, Error};
use create_idsig::command_create_idsig;
use create_partition::command_create_partition;
@@ -33,9 +32,6 @@
use structopt::clap::AppSettings;
use structopt::StructOpt;
-const VIRTUALIZATION_SERVICE_BINDER_SERVICE_IDENTIFIER: &str =
- "android.system.virtualizationservice";
-
#[derive(Debug)]
struct Idsigs(Vec<PathBuf>);
@@ -191,9 +187,7 @@
// We need to start the thread pool for Binder to work properly, especially link_to_death.
ProcessState::start_thread_pool();
- let service: Strong<dyn IVirtualizationService> =
- wait_for_interface(VIRTUALIZATION_SERVICE_BINDER_SERVICE_IDENTIFIER)
- .context("Failed to find VirtualizationService")?;
+ let service = vmclient::connect().context("Failed to find VirtualizationService")?;
match opt {
Opt::RunApp {
diff --git a/vm/src/run.rs b/vm/src/run.rs
index 2ae2c95..ca71665 100644
--- a/vm/src/run.rs
+++ b/vm/src/run.rs
@@ -15,19 +15,18 @@
//! Command to run a VM.
use crate::create_partition::command_create_partition;
-use crate::sync::AtomicFlag;
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
- DeathReason::DeathReason, IVirtualMachine::IVirtualMachine,
- IVirtualMachineCallback::BnVirtualMachineCallback,
- IVirtualMachineCallback::IVirtualMachineCallback,
- IVirtualizationService::IVirtualizationService, PartitionType::PartitionType,
+ DeathReason::DeathReason,
+ IVirtualMachineCallback::{BnVirtualMachineCallback, IVirtualMachineCallback},
+ IVirtualizationService::IVirtualizationService,
+ PartitionType::PartitionType,
VirtualMachineAppConfig::DebugLevel::DebugLevel,
- VirtualMachineAppConfig::VirtualMachineAppConfig, VirtualMachineConfig::VirtualMachineConfig,
+ VirtualMachineAppConfig::VirtualMachineAppConfig,
+ VirtualMachineConfig::VirtualMachineConfig,
VirtualMachineState::VirtualMachineState,
};
use android_system_virtualizationservice::binder::{
- BinderFeatures, DeathRecipient, IBinder, Interface, ParcelFileDescriptor,
- Result as BinderResult,
+ BinderFeatures, Interface, ParcelFileDescriptor, Result as BinderResult,
};
use anyhow::{bail, Context, Error};
use microdroid_payload_config::VmPayloadConfig;
@@ -35,6 +34,7 @@
use std::io::{self, BufRead, BufReader};
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::path::{Path, PathBuf};
+use vmclient::VmInstance;
use vmconfig::{open_parcel_file, VmConfig};
use zip::ZipArchive;
@@ -173,78 +173,53 @@
log_path: Option<&Path>,
) -> Result<(), Error> {
let console = if let Some(console_path) = console_path {
- Some(ParcelFileDescriptor::new(
+ Some(
File::create(console_path)
.with_context(|| format!("Failed to open console file {:?}", console_path))?,
- ))
+ )
} else if daemonize {
None
} else {
- Some(ParcelFileDescriptor::new(duplicate_stdout()?))
+ Some(duplicate_stdout()?)
};
let log = if let Some(log_path) = log_path {
- Some(ParcelFileDescriptor::new(
+ Some(
File::create(log_path)
.with_context(|| format!("Failed to open log file {:?}", log_path))?,
- ))
+ )
} else if daemonize {
None
} else {
- Some(ParcelFileDescriptor::new(duplicate_stdout()?))
+ Some(duplicate_stdout()?)
};
- let vm =
- service.createVm(config, console.as_ref(), log.as_ref()).context("Failed to create VM")?;
+ let vm = VmInstance::create(service, config, console, log).context("Failed to create VM")?;
+ let callback =
+ BnVirtualMachineCallback::new_binder(VirtualMachineCallback {}, BinderFeatures::default());
+ vm.vm.registerCallback(&callback)?;
+ vm.start().context("Failed to start VM")?;
- let cid = vm.getCid().context("Failed to get CID")?;
println!(
"Created VM from {} with CID {}, state is {}.",
config_path,
- cid,
- state_to_str(vm.getState()?)
+ vm.cid(),
+ state_to_str(vm.state()?)
);
- vm.start()?;
- println!("Started VM, state now {}.", state_to_str(vm.getState()?));
if daemonize {
// Pass the VM reference back to VirtualizationService and have it hold it in the
// background.
- service.debugHoldVmRef(&vm).context("Failed to pass VM to VirtualizationService")
+ service.debugHoldVmRef(&vm.vm).context("Failed to pass VM to VirtualizationService")?;
} else {
// Wait until the VM or VirtualizationService dies. If we just returned immediately then the
// IVirtualMachine Binder object would be dropped and the VM would be killed.
- wait_for_vm(vm.as_ref())
+ let death_reason = vm.wait_for_death();
+ println!("{}", death_reason);
}
-}
-/// Wait until the given VM or the VirtualizationService itself dies.
-fn wait_for_vm(vm: &dyn IVirtualMachine) -> Result<(), Error> {
- let dead = AtomicFlag::default();
- let callback = BnVirtualMachineCallback::new_binder(
- VirtualMachineCallback { dead: dead.clone() },
- BinderFeatures::default(),
- );
- vm.registerCallback(&callback)?;
- let death_recipient = wait_for_death(&mut vm.as_binder(), dead.clone())?;
- dead.wait();
- // Ensure that death_recipient isn't dropped before we wait on the flag, as it is removed
- // from the Binder when it's dropped.
- drop(death_recipient);
Ok(())
}
-/// Raise the given flag when the given Binder object dies.
-///
-/// If the returned DeathRecipient is dropped then this will no longer do anything.
-fn wait_for_death(binder: &mut impl IBinder, dead: AtomicFlag) -> Result<DeathRecipient, Error> {
- let mut death_recipient = DeathRecipient::new(move || {
- eprintln!("VirtualizationService unexpectedly died");
- dead.raise();
- });
- binder.link_to_death(&mut death_recipient)?;
- Ok(death_recipient)
-}
-
fn parse_extra_apk_list(apk: &Path, config_path: &str) -> Result<Vec<String>, Error> {
let mut archive = ZipArchive::new(File::open(apk)?)?;
let config_file = archive.by_name(config_path)?;
@@ -253,9 +228,7 @@
}
#[derive(Debug)]
-struct VirtualMachineCallback {
- dead: AtomicFlag,
-}
+struct VirtualMachineCallback {}
impl Interface for VirtualMachineCallback {}
@@ -295,31 +268,7 @@
Ok(())
}
- fn onDied(&self, _cid: i32, reason: DeathReason) -> BinderResult<()> {
- self.dead.raise();
-
- match reason {
- DeathReason::INFRASTRUCTURE_ERROR => println!("Error waiting for VM to finish."),
- DeathReason::KILLED => println!("VM was killed."),
- DeathReason::UNKNOWN => println!("VM died for an unknown reason."),
- DeathReason::SHUTDOWN => println!("VM shutdown cleanly."),
- DeathReason::ERROR => println!("Error starting VM."),
- DeathReason::REBOOT => println!("VM tried to reboot, possibly due to a kernel panic."),
- DeathReason::CRASH => println!("VM crashed."),
- DeathReason::PVM_FIRMWARE_PUBLIC_KEY_MISMATCH => println!(
- "pVM firmware failed to verify the VM because the public key doesn't match."
- ),
- DeathReason::PVM_FIRMWARE_INSTANCE_IMAGE_CHANGED => {
- println!("pVM firmware failed to verify the VM because the instance image changed.")
- }
- DeathReason::BOOTLOADER_PUBLIC_KEY_MISMATCH => {
- println!("Bootloader failed to verify the VM because the public key doesn't match.")
- }
- DeathReason::BOOTLOADER_INSTANCE_IMAGE_CHANGED => {
- println!("Bootloader failed to verify the VM because the instance image changed.")
- }
- _ => println!("VM died for an unrecognised reason."),
- }
+ fn onDied(&self, _cid: i32, _reason: DeathReason) -> BinderResult<()> {
Ok(())
}
}
diff --git a/vm/src/sync.rs b/vm/src/sync.rs
deleted file mode 100644
index 82839b3..0000000
--- a/vm/src/sync.rs
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Synchronisation utilities.
-
-use std::sync::{Arc, Condvar, Mutex};
-
-/// A flag which one thread can use to notify other threads when a condition becomes true. This is
-/// something like a single-use binary semaphore.
-#[derive(Clone, Debug)]
-pub struct AtomicFlag {
- state: Arc<(Mutex<bool>, Condvar)>,
-}
-
-impl Default for AtomicFlag {
- #[allow(clippy::mutex_atomic)]
- fn default() -> Self {
- Self { state: Arc::new((Mutex::new(false), Condvar::new())) }
- }
-}
-
-#[allow(clippy::mutex_atomic)]
-impl AtomicFlag {
- /// Wait until the flag is set.
- pub fn wait(&self) {
- let _flag = self.state.1.wait_while(self.state.0.lock().unwrap(), |flag| !*flag).unwrap();
- }
-
- /// Set the flag, and notify all waiting threads.
- pub fn raise(&self) {
- let mut flag = self.state.0.lock().unwrap();
- *flag = true;
- self.state.1.notify_all();
- }
-}
diff --git a/vmclient/Android.bp b/vmclient/Android.bp
new file mode 100644
index 0000000..3310ec6
--- /dev/null
+++ b/vmclient/Android.bp
@@ -0,0 +1,19 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_library {
+ name: "libvmclient",
+ crate_name: "vmclient",
+ srcs: ["src/lib.rs"],
+ edition: "2021",
+ rustlibs: [
+ "android.system.virtualizationservice-rust",
+ "liblog_rust",
+ "libthiserror",
+ ],
+ apex_available: [
+ "com.android.compos",
+ "com.android.virt",
+ ],
+}
diff --git a/vmclient/src/death_reason.rs b/vmclient/src/death_reason.rs
new file mode 100644
index 0000000..657eaa2
--- /dev/null
+++ b/vmclient/src/death_reason.rs
@@ -0,0 +1,103 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::{self, Debug, Display, Formatter};
+use android_system_virtualizationservice::{
+ aidl::android::system::virtualizationservice::{
+ DeathReason::DeathReason as AidlDeathReason}};
+
+/// The reason why a VM died.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum DeathReason {
+ /// VirtualizationService died.
+ VirtualizationServiceDied,
+ /// There was an error waiting for the VM.
+ InfrastructureError,
+ /// The VM was killed.
+ Killed,
+ /// The VM died for an unknown reason.
+ Unknown,
+ /// The VM requested to shut down.
+ Shutdown,
+ /// crosvm had an error starting the VM.
+ Error,
+ /// The VM requested to reboot, possibly as the result of a kernel panic.
+ Reboot,
+ /// The VM or crosvm crashed.
+ Crash,
+ /// The pVM firmware failed to verify the VM because the public key doesn't match.
+ PvmFirmwarePublicKeyMismatch,
+ /// The pVM firmware failed to verify the VM because the instance image changed.
+ PvmFirmwareInstanceImageChanged,
+ /// The bootloader failed to verify the VM because the public key doesn't match.
+ BootloaderPublicKeyMismatch,
+ /// The bootloader failed to verify the VM because the instance image changed.
+ BootloaderInstanceImageChanged,
+ /// VirtualizationService sent a death reason which was not recognised by the client library.
+ Unrecognised(AidlDeathReason),
+}
+
+impl From<AidlDeathReason> for DeathReason {
+ fn from(reason: AidlDeathReason) -> Self {
+ match reason {
+ AidlDeathReason::INFRASTRUCTURE_ERROR => Self::InfrastructureError,
+ AidlDeathReason::KILLED => Self::Killed,
+ AidlDeathReason::UNKNOWN => Self::Unknown,
+ AidlDeathReason::SHUTDOWN => Self::Shutdown,
+ AidlDeathReason::ERROR => Self::Error,
+ AidlDeathReason::REBOOT => Self::Reboot,
+ AidlDeathReason::CRASH => Self::Crash,
+ AidlDeathReason::PVM_FIRMWARE_PUBLIC_KEY_MISMATCH => Self::PvmFirmwarePublicKeyMismatch,
+ AidlDeathReason::PVM_FIRMWARE_INSTANCE_IMAGE_CHANGED => {
+ Self::PvmFirmwareInstanceImageChanged
+ }
+ AidlDeathReason::BOOTLOADER_PUBLIC_KEY_MISMATCH => Self::BootloaderPublicKeyMismatch,
+ AidlDeathReason::BOOTLOADER_INSTANCE_IMAGE_CHANGED => {
+ Self::BootloaderInstanceImageChanged
+ }
+ _ => Self::Unrecognised(reason),
+ }
+ }
+}
+
+impl Display for DeathReason {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ let s = match self {
+ Self::VirtualizationServiceDied => "VirtualizationService died.",
+ Self::InfrastructureError => "Error waiting for VM to finish.",
+ Self::Killed => "VM was killed.",
+ Self::Unknown => "VM died for an unknown reason.",
+ Self::Shutdown => "VM shutdown cleanly.",
+ Self::Error => "Error starting VM.",
+ Self::Reboot => "VM tried to reboot, possibly due to a kernel panic.",
+ Self::Crash => "VM crashed.",
+ Self::PvmFirmwarePublicKeyMismatch => {
+ "pVM firmware failed to verify the VM because the public key doesn't match."
+ }
+ Self::PvmFirmwareInstanceImageChanged => {
+ "pVM firmware failed to verify the VM because the instance image changed."
+ }
+ Self::BootloaderPublicKeyMismatch => {
+ "Bootloader failed to verify the VM because the public key doesn't match."
+ }
+ Self::BootloaderInstanceImageChanged => {
+ "Bootloader failed to verify the VM because the instance image changed."
+ }
+ Self::Unrecognised(reason) => {
+ return write!(f, "Unrecognised death reason {:?}.", reason);
+ }
+ };
+ f.write_str(s)
+ }
+}
diff --git a/vmclient/src/errors.rs b/vmclient/src/errors.rs
new file mode 100644
index 0000000..b9de868
--- /dev/null
+++ b/vmclient/src/errors.rs
@@ -0,0 +1,33 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::DeathReason;
+use thiserror::Error;
+
+/// An error while waiting for a VM to do something.
+#[derive(Clone, Debug, Error)]
+pub enum VmWaitError {
+ /// Timed out waiting for the VM.
+ #[error("Timed out waiting for VM.")]
+ TimedOut,
+ /// The VM died before it was ready.
+ #[error("VM died. ({reason})")]
+ Died {
+ /// The reason why the VM died.
+ reason: DeathReason,
+ },
+ /// The VM payload finished before becoming ready.
+ #[error("VM payload finished.")]
+ Finished,
+}
diff --git a/vmclient/src/lib.rs b/vmclient/src/lib.rs
new file mode 100644
index 0000000..888092f
--- /dev/null
+++ b/vmclient/src/lib.rs
@@ -0,0 +1,214 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Client library for VirtualizationService.
+
+mod death_reason;
+mod errors;
+mod sync;
+
+pub use crate::death_reason::DeathReason;
+pub use crate::errors::VmWaitError;
+use crate::sync::Monitor;
+use android_system_virtualizationservice::{
+ aidl::android::system::virtualizationservice::{
+ DeathReason::DeathReason as AidlDeathReason,
+ IVirtualMachine::IVirtualMachine,
+ IVirtualMachineCallback::{BnVirtualMachineCallback, IVirtualMachineCallback},
+ IVirtualizationService::IVirtualizationService,
+ VirtualMachineConfig::VirtualMachineConfig,
+ VirtualMachineState::VirtualMachineState,
+ },
+ binder::{
+ wait_for_interface, BinderFeatures, DeathRecipient, IBinder, Interface,
+ ParcelFileDescriptor, Result as BinderResult, StatusCode, Strong,
+ },
+};
+use log::warn;
+use std::{
+ fmt::{self, Debug, Formatter},
+ fs::File,
+ sync::Arc,
+ time::Duration,
+};
+
+const VIRTUALIZATION_SERVICE_BINDER_SERVICE_IDENTIFIER: &str =
+ "android.system.virtualizationservice";
+
+/// Connects to the VirtualizationService AIDL service.
+pub fn connect() -> Result<Strong<dyn IVirtualizationService>, StatusCode> {
+ wait_for_interface(VIRTUALIZATION_SERVICE_BINDER_SERVICE_IDENTIFIER)
+}
+
+/// A virtual machine which has been started by the VirtualizationService.
+pub struct VmInstance {
+ /// The `IVirtualMachine` Binder object representing the VM.
+ pub vm: Strong<dyn IVirtualMachine>,
+ cid: i32,
+ state: Arc<Monitor<VmState>>,
+ // Ensure that the DeathRecipient isn't dropped while someone might call wait_for_death, as it
+ // is removed from the Binder when it's dropped.
+ _death_recipient: DeathRecipient,
+}
+
+impl VmInstance {
+ /// Creates (but doesn't start) a new VM with the given configuration.
+ pub fn create(
+ service: &dyn IVirtualizationService,
+ config: &VirtualMachineConfig,
+ console: Option<File>,
+ log: Option<File>,
+ ) -> BinderResult<Self> {
+ let console = console.map(ParcelFileDescriptor::new);
+ let log = log.map(ParcelFileDescriptor::new);
+
+ let vm = service.createVm(config, console.as_ref(), log.as_ref())?;
+
+ let cid = vm.getCid()?;
+
+ // Register callback before starting VM, in case it dies immediately.
+ let state = Arc::new(Monitor::new(VmState::default()));
+ let callback = BnVirtualMachineCallback::new_binder(
+ VirtualMachineCallback { state: state.clone() },
+ BinderFeatures::default(),
+ );
+ vm.registerCallback(&callback)?;
+ let death_recipient = wait_for_binder_death(&mut vm.as_binder(), state.clone())?;
+
+ Ok(Self { vm, cid, state, _death_recipient: death_recipient })
+ }
+
+ /// Starts the VM.
+ pub fn start(&self) -> BinderResult<()> {
+ self.vm.start()
+ }
+
+ /// Returns the CID used for vsock connections to the VM.
+ pub fn cid(&self) -> i32 {
+ self.cid
+ }
+
+ /// Returns the current lifecycle state of the VM.
+ pub fn state(&self) -> BinderResult<VirtualMachineState> {
+ self.vm.getState()
+ }
+
+ /// Blocks until the VM or the VirtualizationService itself dies, and then returns the reason
+ /// why it died.
+ pub fn wait_for_death(&self) -> DeathReason {
+ self.state.wait_while(|state| state.death_reason.is_none()).unwrap().death_reason.unwrap()
+ }
+
+ /// Waits until the VM reports that it is ready.
+ ///
+ /// Returns an error if the VM dies first, or the `timeout` elapses before the VM is ready.
+ pub fn wait_until_ready(&self, timeout: Duration) -> Result<(), VmWaitError> {
+ let (state, timeout_result) = self
+ .state
+ .wait_timeout_while(timeout, |state| {
+ state.reported_state < VirtualMachineState::READY && state.death_reason.is_none()
+ })
+ .unwrap();
+ if timeout_result.timed_out() {
+ Err(VmWaitError::TimedOut)
+ } else if let Some(reason) = state.death_reason {
+ Err(VmWaitError::Died { reason })
+ } else if state.reported_state != VirtualMachineState::READY {
+ Err(VmWaitError::Finished)
+ } else {
+ Ok(())
+ }
+ }
+}
+
+impl Debug for VmInstance {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ f.debug_struct("VmInstance").field("cid", &self.cid).field("state", &self.state).finish()
+ }
+}
+
+/// Notify the VmState when the given Binder object dies.
+///
+/// If the returned DeathRecipient is dropped then this will no longer do anything.
+fn wait_for_binder_death(
+ binder: &mut impl IBinder,
+ state: Arc<Monitor<VmState>>,
+) -> BinderResult<DeathRecipient> {
+ let mut death_recipient = DeathRecipient::new(move || {
+ warn!("VirtualizationService unexpectedly died");
+ state.notify_death(DeathReason::VirtualizationServiceDied);
+ });
+ binder.link_to_death(&mut death_recipient)?;
+ Ok(death_recipient)
+}
+
+#[derive(Debug, Default)]
+struct VmState {
+ death_reason: Option<DeathReason>,
+ reported_state: VirtualMachineState,
+}
+
+impl Monitor<VmState> {
+ fn notify_death(&self, reason: DeathReason) {
+ let state = &mut *self.state.lock().unwrap();
+ // In case this method is called more than once, ignore subsequent calls.
+ if state.death_reason.is_none() {
+ state.death_reason.replace(reason);
+ self.cv.notify_all();
+ }
+ }
+
+ fn notify_state(&self, state: VirtualMachineState) {
+ self.state.lock().unwrap().reported_state = state;
+ self.cv.notify_all();
+ }
+}
+
+#[derive(Debug)]
+struct VirtualMachineCallback {
+ state: Arc<Monitor<VmState>>,
+}
+
+impl Interface for VirtualMachineCallback {}
+
+impl IVirtualMachineCallback for VirtualMachineCallback {
+ fn onPayloadStarted(
+ &self,
+ _cid: i32,
+ _stream: Option<&ParcelFileDescriptor>,
+ ) -> BinderResult<()> {
+ self.state.notify_state(VirtualMachineState::STARTED);
+ Ok(())
+ }
+
+ fn onPayloadReady(&self, _cid: i32) -> BinderResult<()> {
+ self.state.notify_state(VirtualMachineState::READY);
+ Ok(())
+ }
+
+ fn onPayloadFinished(&self, _cid: i32, _exit_code: i32) -> BinderResult<()> {
+ self.state.notify_state(VirtualMachineState::FINISHED);
+ Ok(())
+ }
+
+ fn onError(&self, _cid: i32, _error_code: i32, _message: &str) -> BinderResult<()> {
+ self.state.notify_state(VirtualMachineState::FINISHED);
+ Ok(())
+ }
+
+ fn onDied(&self, _cid: i32, reason: AidlDeathReason) -> BinderResult<()> {
+ self.state.notify_death(reason.into());
+ Ok(())
+ }
+}
diff --git a/vmclient/src/sync.rs b/vmclient/src/sync.rs
new file mode 100644
index 0000000..a265f60
--- /dev/null
+++ b/vmclient/src/sync.rs
@@ -0,0 +1,59 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::{
+ sync::{Condvar, LockResult, Mutex, MutexGuard, PoisonError, WaitTimeoutResult},
+ time::Duration,
+};
+
+/// A mutex with an associated condition variable.
+#[derive(Debug)]
+pub struct Monitor<T> {
+ pub state: Mutex<T>,
+ pub cv: Condvar,
+}
+
+impl<T> Monitor<T> {
+ /// Creates a new mutex wrapping the given value, and a new condition variable to go with it.
+ pub fn new(state: T) -> Self {
+ Self { state: Mutex::new(state), cv: Condvar::default() }
+ }
+
+ /// Waits on the condition variable while the given condition holds true on the contents of the
+ /// mutex.
+ ///
+ /// Blocks until the condition variable is notified and the function returns false.
+ pub fn wait_while(&self, condition: impl FnMut(&mut T) -> bool) -> LockResult<MutexGuard<T>> {
+ self.cv.wait_while(self.state.lock()?, condition)
+ }
+
+ /// Waits on the condition variable while the given condition holds true on the contents of the
+ /// mutex, with a timeout.
+ ///
+ /// Blocks until the condition variable is notified and the function returns false, or the
+ /// timeout elapses.
+ pub fn wait_timeout_while(
+ &self,
+ timeout: Duration,
+ condition: impl FnMut(&mut T) -> bool,
+ ) -> Result<(MutexGuard<T>, WaitTimeoutResult), PoisonError<MutexGuard<T>>> {
+ self.cv
+ .wait_timeout_while(self.state.lock()?, timeout, condition)
+ .map_err(convert_poison_error)
+ }
+}
+
+fn convert_poison_error<T>(err: PoisonError<(T, WaitTimeoutResult)>) -> PoisonError<T> {
+ PoisonError::new(err.into_inner().0)
+}