Add VM ID database and maintenance functionality
Convert `VirtualizationServiceInternal` to support both the
`IVirtualizationServiceInternal` interface and (optionally) the
`IVirtualizationMaintenance` interface.
Support for the latter has state held in a `maintenance::State` item,
holding both:
- A reference to the device's Secretkeeper instance
- An SQLite database of VM IDs and the corresponding (user_id, app_id).
The latter is implemented in a new maintenance::vmdb submodule.
Bug: 294177871
Test: virtualizationservice_test
Change-Id: I0c2f482252bc97dfdb75dd2e3a43883ab0eb3a77
diff --git a/virtualizationservice/Android.bp b/virtualizationservice/Android.bp
index 5dd1e0f..fc7fcd2 100644
--- a/virtualizationservice/Android.bp
+++ b/virtualizationservice/Android.bp
@@ -5,7 +5,10 @@
rust_defaults {
name: "virtualizationservice_defaults",
crate_name: "virtualizationservice",
- defaults: ["avf_build_flags_rust"],
+ defaults: [
+ "avf_build_flags_rust",
+ "secretkeeper_use_latest_hal_aidl_rust",
+ ],
edition: "2021",
srcs: ["src/main.rs"],
// Only build on targets which crosvm builds on.
@@ -36,11 +39,13 @@
"libhypervisor_props",
"liblazy_static",
"liblibc",
+ "liblibsqlite3_sys",
"liblog_rust",
"libnix",
"libopenssl",
"librand",
"librkpd_client",
+ "librusqlite",
"librustutils",
"libstatslog_virtualization_rust",
"libtombstoned_client_rust",
@@ -67,7 +72,10 @@
rust_test {
name: "virtualizationservice_test",
- defaults: ["virtualizationservice_defaults"],
+ defaults: [
+ "authgraph_use_latest_hal_aidl_rust",
+ "virtualizationservice_defaults",
+ ],
test_suites: ["general-tests"],
data: [
":test_rkp_cert_chain",
diff --git a/virtualizationservice/aidl/android/system/virtualizationmaintenance/IVirtualizationMaintenance.aidl b/virtualizationservice/aidl/android/system/virtualizationmaintenance/IVirtualizationMaintenance.aidl
index 161673a..76d7309 100644
--- a/virtualizationservice/aidl/android/system/virtualizationmaintenance/IVirtualizationMaintenance.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationmaintenance/IVirtualizationMaintenance.aidl
@@ -17,9 +17,20 @@
package android.system.virtualizationmaintenance;
interface IVirtualizationMaintenance {
+ /**
+ * Notification that an app has been permanently removed, to allow related global state to
+ * be removed.
+ *
+ * @param userId The Android user ID for whom the notification applies.
+ */
void appRemoved(int userId, int appId);
+ /**
+ * Notification that a user has been removed, to allow related global state to be removed.
+ *
+ * @param userId The Android user ID of the user.
+ */
void userRemoved(int userId);
- // TODO: Something for daily reconciliation
+ // TODO(b/294177871): Something for daily reconciliation
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
index fc36190..8af881b 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
@@ -96,4 +96,11 @@
* Allocate an instance_id to the (newly created) VM.
*/
byte[64] allocateInstanceId();
+
+ /**
+ * Notification that state associated with a VM should be removed.
+ *
+ * @param instanceId The ID for the VM.
+ */
+ void removeVmInstance(in byte[64] instanceId);
}
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index 3bc7caf..c0024f1 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -15,11 +15,13 @@
//! Implementation of the AIDL interface of the VirtualizationService.
use crate::atom::{forward_vm_booted_atom, forward_vm_creation_atom, forward_vm_exited_atom};
+use crate::maintenance;
use crate::remote_provisioning;
use crate::rkpvm::{generate_ecdsa_p256_key_pair, request_attestation};
use crate::{get_calling_pid, get_calling_uid, REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME};
use android_os_permissions_aidl::aidl::android::os::IPermissionController;
use android_system_virtualizationcommon::aidl::android::system::virtualizationcommon;
+use android_system_virtualizationmaintenance::aidl::android::system::virtualizationmaintenance;
use android_system_virtualizationservice::aidl::android::system::virtualizationservice;
use android_system_virtualizationservice_internal as android_vs_internal;
use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice;
@@ -49,6 +51,7 @@
use std::sync::{Arc, Mutex, Weak};
use tombstoned_client::{DebuggerdDumpType, TombstonedConnection};
use virtualizationcommon::Certificate::Certificate;
+use virtualizationmaintenance::IVirtualizationMaintenance::IVirtualizationMaintenance;
use virtualizationservice::{
AssignableDevice::AssignableDevice, VirtualMachineDebugInfo::VirtualMachineDebugInfo,
};
@@ -60,9 +63,7 @@
IGlobalVmContext::{BnGlobalVmContext, IGlobalVmContext},
IVfioHandler::VfioDev::VfioDev,
IVfioHandler::{BpVfioHandler, IVfioHandler},
- IVirtualizationServiceInternal::{
- BnVirtualizationServiceInternal, IVirtualizationServiceInternal,
- },
+ IVirtualizationServiceInternal::IVirtualizationServiceInternal,
};
use virtualmachineservice::IVirtualMachineService::VM_TOMBSTONES_SERVICE_PORT;
use vsock::{VsockListener, VsockStream};
@@ -160,14 +161,15 @@
/// Singleton service for allocating globally-unique VM resources, such as the CID, and running
/// singleton servers, like tombstone receiver.
-#[derive(Debug, Default)]
+#[derive(Clone)]
pub struct VirtualizationServiceInternal {
state: Arc<Mutex<GlobalState>>,
}
impl VirtualizationServiceInternal {
- pub fn init() -> Strong<dyn IVirtualizationServiceInternal> {
- let service = VirtualizationServiceInternal::default();
+ pub fn init() -> VirtualizationServiceInternal {
+ let service =
+ VirtualizationServiceInternal { state: Arc::new(Mutex::new(GlobalState::new())) };
std::thread::spawn(|| {
if let Err(e) = handle_stream_connection_tombstoned() {
@@ -175,7 +177,7 @@
}
});
- BnVirtualizationServiceInternal::new_binder(service, BinderFeatures::default())
+ service
}
}
@@ -390,6 +392,41 @@
info!("Allocated a VM's instance_id: {:?}, for uid: {:?}", hex::encode(id), uid);
Ok(id)
}
+
+ fn removeVmInstance(&self, instance_id: &[u8; 64]) -> binder::Result<()> {
+ let state = &mut *self.state.lock().unwrap();
+ if let Some(sk_state) = &mut state.sk_state {
+ info!("removeVmInstance(): delete secret");
+ sk_state.delete_ids(&[*instance_id]);
+ } else {
+ info!("ignoring removeVmInstance() as no ISecretkeeper");
+ }
+ Ok(())
+ }
+}
+
+impl IVirtualizationMaintenance for VirtualizationServiceInternal {
+ fn appRemoved(&self, user_id: i32, app_id: i32) -> binder::Result<()> {
+ let state = &mut *self.state.lock().unwrap();
+ if let Some(sk_state) = &mut state.sk_state {
+ info!("packageRemoved(user_id={user_id}, app_id={app_id})");
+ sk_state.delete_ids_for_app(user_id, app_id).or_service_specific_exception(-1)?;
+ } else {
+ info!("ignoring packageRemoved(user_id={user_id}, app_id={app_id})");
+ }
+ Ok(())
+ }
+
+ fn userRemoved(&self, user_id: i32) -> binder::Result<()> {
+ let state = &mut *self.state.lock().unwrap();
+ if let Some(sk_state) = &mut state.sk_state {
+ info!("userRemoved({user_id})");
+ sk_state.delete_ids_for_user(user_id).or_service_specific_exception(-1)?;
+ } else {
+ info!("ignoring userRemoved(user_id={user_id})");
+ }
+ Ok(())
+ }
}
// KEEP IN SYNC WITH assignable_devices.xsd
@@ -474,7 +511,6 @@
/// The mutable state of the VirtualizationServiceInternal. There should only be one instance
/// of this struct.
-#[derive(Debug, Default)]
struct GlobalState {
/// VM contexts currently allocated to running VMs. A CID is never recycled as long
/// as there is a strong reference held by a GlobalVmContext.
@@ -482,9 +518,20 @@
/// Cached read-only FD of VM DTBO file. Also serves as a lock for creating the file.
dtbo_file: Mutex<Option<File>>,
+
+ /// State relating to secrets held by (optional) Secretkeeper instance on behalf of VMs.
+ sk_state: Option<maintenance::State>,
}
impl GlobalState {
+ fn new() -> Self {
+ Self {
+ held_contexts: HashMap::new(),
+ dtbo_file: Mutex::new(None),
+ sk_state: maintenance::State::new(),
+ }
+ }
+
/// Get the next available CID, or an error if we have run out. The last CID used is stored in
/// a system property so that restart of virtualizationservice doesn't reuse CID while the host
/// Android is up.
@@ -729,7 +776,6 @@
#[cfg(test)]
mod tests {
use super::*;
- use std::fs;
const TEST_RKP_CERT_CHAIN_PATH: &str = "testdata/rkp_cert_chain.der";
diff --git a/virtualizationservice/src/main.rs b/virtualizationservice/src/main.rs
index 97bb38f..bcea1bc 100644
--- a/virtualizationservice/src/main.rs
+++ b/virtualizationservice/src/main.rs
@@ -20,10 +20,16 @@
mod remote_provisioning;
mod rkpvm;
-use crate::aidl::{remove_temporary_dir, VirtualizationServiceInternal, TEMPORARY_DIRECTORY};
+use crate::aidl::{remove_temporary_dir, TEMPORARY_DIRECTORY, VirtualizationServiceInternal};
use android_logger::{Config, FilterBuilder};
+use android_system_virtualizationservice_internal::aidl::android::system::{
+ virtualizationservice_internal::IVirtualizationServiceInternal::BnVirtualizationServiceInternal
+};
+use android_system_virtualizationmaintenance::aidl::android::system::virtualizationmaintenance::{
+ IVirtualizationMaintenance::BnVirtualizationMaintenance
+};
use anyhow::{bail, Context, Error, Result};
-use binder::{register_lazy_service, ProcessState, ThreadState};
+use binder::{register_lazy_service, BinderFeatures, ProcessState, ThreadState};
use log::{error, info, LevelFilter};
use std::fs::{create_dir, read_dir};
use std::os::unix::raw::{pid_t, uid_t};
@@ -69,16 +75,25 @@
create_dir(common_dir_path).context("Failed to create common directory")?;
ProcessState::start_thread_pool();
- register(INTERNAL_SERVICE_NAME, VirtualizationServiceInternal::init())?;
+
+ // One instance of `VirtualizationServiceInternal` implements both the internal interface
+ // and (optionally) the maintenance interface.
+ let service = VirtualizationServiceInternal::init();
+ let internal_service =
+ BnVirtualizationServiceInternal::new_binder(service.clone(), BinderFeatures::default());
+ register(INTERNAL_SERVICE_NAME, internal_service)?;
if cfg!(remote_attestation) {
// The IRemotelyProvisionedComponent service is only supposed to be triggered by rkpd for
// RKP VM attestation.
- register(REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME, remote_provisioning::new_binder())?;
+ let remote_provisioning_service = remote_provisioning::new_binder();
+ register(REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME, remote_provisioning_service)?;
}
if cfg!(llpvm_changes) {
- register(MAINTENANCE_SERVICE_NAME, maintenance::new_binder())?;
+ let maintenance_service =
+ BnVirtualizationMaintenance::new_binder(service.clone(), BinderFeatures::default());
+ register(MAINTENANCE_SERVICE_NAME, maintenance_service)?;
}
ProcessState::join_thread_pool();
diff --git a/virtualizationservice/src/maintenance.rs b/virtualizationservice/src/maintenance.rs
index 191d39a..7fc2f37 100644
--- a/virtualizationservice/src/maintenance.rs
+++ b/virtualizationservice/src/maintenance.rs
@@ -12,33 +12,245 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use android_system_virtualizationmaintenance::aidl::android::system::virtualizationmaintenance;
-use anyhow::anyhow;
-use binder::{BinderFeatures, ExceptionCode, Interface, IntoBinderResult, Strong};
-use virtualizationmaintenance::IVirtualizationMaintenance::{
- BnVirtualizationMaintenance, IVirtualizationMaintenance,
+use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper::{
+ ISecretkeeper::ISecretkeeper, SecretId::SecretId,
};
+use anyhow::Result;
+use log::{error, info, warn};
-pub(crate) fn new_binder() -> Strong<dyn IVirtualizationMaintenance> {
- BnVirtualizationMaintenance::new_binder(
- VirtualizationMaintenanceService {},
- BinderFeatures::default(),
- )
+mod vmdb;
+use vmdb::{VmId, VmIdDb};
+
+/// Interface name for the Secretkeeper HAL.
+const SECRETKEEPER_SERVICE: &str = "android.hardware.security.secretkeeper.ISecretkeeper/default";
+
+/// Directory in which to write persistent state.
+const PERSISTENT_DIRECTORY: &str = "/data/misc/apexdata/com.android.virt";
+
+/// Maximum number of VM IDs to delete at once. Needs to be smaller than both the maximum
+/// number of SQLite parameters (999) and also small enough that an ISecretkeeper::deleteIds
+/// parcel fits within max AIDL message size.
+const DELETE_MAX_BATCH_SIZE: usize = 100;
+
+/// State related to VM secrets.
+pub struct State {
+ sk: binder::Strong<dyn ISecretkeeper>,
+ /// Database of VM IDs,
+ vm_id_db: VmIdDb,
+ batch_size: usize,
}
-pub struct VirtualizationMaintenanceService;
-
-impl Interface for VirtualizationMaintenanceService {}
-
-#[allow(non_snake_case)]
-impl IVirtualizationMaintenance for VirtualizationMaintenanceService {
- fn appRemoved(&self, _user_id: i32, _app_id: i32) -> binder::Result<()> {
- Err(anyhow!("appRemoved not supported"))
- .or_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION)
+impl State {
+ pub fn new() -> Option<Self> {
+ let sk = match Self::find_sk() {
+ Some(sk) => sk,
+ None => {
+ warn!("failed to find a Secretkeeper instance; skipping secret management");
+ return None;
+ }
+ };
+ let (vm_id_db, created) = match VmIdDb::new(PERSISTENT_DIRECTORY) {
+ Ok(v) => v,
+ Err(e) => {
+ error!("skipping secret management, failed to connect to database: {e:?}");
+ return None;
+ }
+ };
+ if created {
+ // If the database did not previously exist, then this appears to be the first run of
+ // `virtualizationservice` since device setup or factory reset. In case of the latter,
+ // delete any secrets that may be left over from before reset, thus ensuring that the
+ // local database state matches that of the TA (i.e. empty).
+ warn!("no existing VM ID DB; clearing any previous secrets to match fresh DB");
+ if let Err(e) = sk.deleteAll() {
+ error!("failed to delete previous secrets, dropping database: {e:?}");
+ vm_id_db.delete_db_file(PERSISTENT_DIRECTORY);
+ return None;
+ }
+ } else {
+ info!("re-using existing VM ID DB");
+ }
+ Some(Self { sk, vm_id_db, batch_size: DELETE_MAX_BATCH_SIZE })
}
- fn userRemoved(&self, _user_id: i32) -> binder::Result<()> {
- Err(anyhow!("userRemoved not supported"))
- .or_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION)
+ fn find_sk() -> Option<binder::Strong<dyn ISecretkeeper>> {
+ if let Ok(true) = binder::is_declared(SECRETKEEPER_SERVICE) {
+ match binder::get_interface(SECRETKEEPER_SERVICE) {
+ Ok(sk) => Some(sk),
+ Err(e) => {
+ error!("failed to connect to {SECRETKEEPER_SERVICE}: {e:?}");
+ None
+ }
+ }
+ } else {
+ info!("instance {SECRETKEEPER_SERVICE} not declared");
+ None
+ }
+ }
+
+ /// Delete the VM IDs associated with Android user ID `user_id`.
+ pub fn delete_ids_for_user(&mut self, user_id: i32) -> Result<()> {
+ let vm_ids = self.vm_id_db.vm_ids_for_user(user_id)?;
+ info!(
+ "delete_ids_for_user(user_id={user_id}) triggers deletion of {} secrets",
+ vm_ids.len()
+ );
+ self.delete_ids(&vm_ids);
+ Ok(())
+ }
+
+ /// Delete the VM IDs associated with `(user_id, app_id)`.
+ pub fn delete_ids_for_app(&mut self, user_id: i32, app_id: i32) -> Result<()> {
+ let vm_ids = self.vm_id_db.vm_ids_for_app(user_id, app_id)?;
+ info!(
+ "delete_ids_for_app(user_id={user_id}, app_id={app_id}) removes {} secrets",
+ vm_ids.len()
+ );
+ self.delete_ids(&vm_ids);
+ Ok(())
+ }
+
+ /// Delete the provided VM IDs from both Secretkeeper and the database.
+ pub fn delete_ids(&mut self, mut vm_ids: &[VmId]) {
+ while !vm_ids.is_empty() {
+ let len = std::cmp::min(vm_ids.len(), self.batch_size);
+ let batch = &vm_ids[..len];
+ self.delete_ids_batch(batch);
+ vm_ids = &vm_ids[len..];
+ }
+ }
+
+ /// Delete a batch of VM IDs from both Secretkeeper and the database. The batch is assumed
+ /// to be smaller than both:
+ /// - the corresponding limit for number of database parameters
+ /// - the corresponding limit for maximum size of a single AIDL message for `ISecretkeeper`.
+ fn delete_ids_batch(&mut self, vm_ids: &[VmId]) {
+ let secret_ids: Vec<SecretId> = vm_ids.iter().map(|id| SecretId { id: *id }).collect();
+ if let Err(e) = self.sk.deleteIds(&secret_ids) {
+ error!("failed to delete all secrets from Secretkeeper: {e:?}");
+ }
+ if let Err(e) = self.vm_id_db.delete_vm_ids(vm_ids) {
+ error!("failed to remove secret IDs from database: {e:?}");
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::sync::{Arc, Mutex};
+ use android_hardware_security_authgraph::aidl::android::hardware::security::authgraph::{
+ IAuthGraphKeyExchange::IAuthGraphKeyExchange,
+ };
+ use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper::{
+ ISecretkeeper::BnSecretkeeper
+ };
+
+ /// Fake implementation of Secretkeeper that keeps a history of what operations were invoked.
+ #[derive(Default)]
+ struct FakeSk {
+ history: Arc<Mutex<Vec<SkOp>>>,
+ }
+
+ #[derive(Clone, PartialEq, Eq, Debug)]
+ enum SkOp {
+ Management,
+ DeleteIds(Vec<VmId>),
+ DeleteAll,
+ }
+
+ impl ISecretkeeper for FakeSk {
+ fn processSecretManagementRequest(&self, _req: &[u8]) -> binder::Result<Vec<u8>> {
+ self.history.lock().unwrap().push(SkOp::Management);
+ Ok(vec![])
+ }
+
+ fn getAuthGraphKe(&self) -> binder::Result<binder::Strong<dyn IAuthGraphKeyExchange>> {
+ unimplemented!()
+ }
+
+ fn deleteIds(&self, ids: &[SecretId]) -> binder::Result<()> {
+ self.history.lock().unwrap().push(SkOp::DeleteIds(ids.iter().map(|s| s.id).collect()));
+ Ok(())
+ }
+
+ fn deleteAll(&self) -> binder::Result<()> {
+ self.history.lock().unwrap().push(SkOp::DeleteAll);
+ Ok(())
+ }
+ }
+ impl binder::Interface for FakeSk {}
+
+ fn new_test_state(history: Arc<Mutex<Vec<SkOp>>>, batch_size: usize) -> State {
+ let vm_id_db = vmdb::new_test_db();
+ let sk = FakeSk { history };
+ let sk = BnSecretkeeper::new_binder(sk, binder::BinderFeatures::default());
+ State { sk, vm_id_db, batch_size }
+ }
+
+ const VM_ID1: VmId = [1u8; 64];
+ const VM_ID2: VmId = [2u8; 64];
+ const VM_ID3: VmId = [3u8; 64];
+ const VM_ID4: VmId = [4u8; 64];
+ const VM_ID5: VmId = [5u8; 64];
+
+ #[test]
+ fn test_sk_state_batching() {
+ let history = Arc::new(Mutex::new(Vec::new()));
+ let mut sk_state = new_test_state(history.clone(), 2);
+ sk_state.delete_ids(&[VM_ID1, VM_ID2, VM_ID3, VM_ID4, VM_ID5]);
+ let got = (*history.lock().unwrap()).clone();
+ assert_eq!(
+ got,
+ vec![
+ SkOp::DeleteIds(vec![VM_ID1, VM_ID2]),
+ SkOp::DeleteIds(vec![VM_ID3, VM_ID4]),
+ SkOp::DeleteIds(vec![VM_ID5]),
+ ]
+ );
+ }
+
+ #[test]
+ fn test_sk_state_no_batching() {
+ let history = Arc::new(Mutex::new(Vec::new()));
+ let mut sk_state = new_test_state(history.clone(), 6);
+ sk_state.delete_ids(&[VM_ID1, VM_ID2, VM_ID3, VM_ID4, VM_ID5]);
+ let got = (*history.lock().unwrap()).clone();
+ assert_eq!(got, vec![SkOp::DeleteIds(vec![VM_ID1, VM_ID2, VM_ID3, VM_ID4, VM_ID5])]);
+ }
+
+ #[test]
+ fn test_sk_state() {
+ const USER1: i32 = 1;
+ const USER2: i32 = 2;
+ const USER3: i32 = 3;
+ const APP_A: i32 = 50;
+ const APP_B: i32 = 60;
+ const APP_C: i32 = 70;
+
+ let history = Arc::new(Mutex::new(Vec::new()));
+ let mut sk_state = new_test_state(history.clone(), 2);
+
+ sk_state.vm_id_db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+ sk_state.vm_id_db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ sk_state.vm_id_db.add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
+ sk_state.vm_id_db.add_vm_id(&VM_ID4, USER3, APP_A).unwrap();
+ sk_state.vm_id_db.add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
+ assert_eq!((*history.lock().unwrap()).clone(), vec![]);
+
+ sk_state.delete_ids_for_app(USER2, APP_B).unwrap();
+ assert_eq!((*history.lock().unwrap()).clone(), vec![SkOp::DeleteIds(vec![VM_ID3])]);
+
+ sk_state.delete_ids_for_user(USER3).unwrap();
+ assert_eq!(
+ (*history.lock().unwrap()).clone(),
+ vec![SkOp::DeleteIds(vec![VM_ID3]), SkOp::DeleteIds(vec![VM_ID4, VM_ID5]),]
+ );
+
+ assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_user(USER1).unwrap());
+ assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_app(USER1, APP_A).unwrap());
+ let empty: Vec<VmId> = Vec::new();
+ assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_app(USER2, APP_B).unwrap());
+ assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_user(USER3).unwrap());
}
}
diff --git a/virtualizationservice/src/maintenance/vmdb.rs b/virtualizationservice/src/maintenance/vmdb.rs
new file mode 100644
index 0000000..bdff034
--- /dev/null
+++ b/virtualizationservice/src/maintenance/vmdb.rs
@@ -0,0 +1,265 @@
+// Copyright 2024, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Database of VM IDs.
+
+use anyhow::{Context, Result};
+use log::{debug, error, info, warn};
+use rusqlite::{params, params_from_iter, Connection, OpenFlags, Rows};
+use std::path::PathBuf;
+
+/// Subdirectory to hold the database.
+const DB_DIR: &str = "vmdb";
+
+/// Name of the file that holds the database.
+const DB_FILENAME: &str = "vmids.sqlite";
+
+/// Maximum number of host parameters in a single SQL statement.
+/// (Default value of `SQLITE_LIMIT_VARIABLE_NUMBER` for <= 3.32.0)
+const MAX_VARIABLES: usize = 999;
+
+/// Identifier for a VM and its corresponding secret.
+pub type VmId = [u8; 64];
+
+/// Representation of an on-disk database of VM IDs.
+pub struct VmIdDb {
+ conn: Connection,
+}
+
+impl VmIdDb {
+ /// Connect to the VM ID database file held in the given directory, creating it if necessary.
+ /// The second return value indicates whether a new database file was created.
+ ///
+ /// This function assumes no other threads/processes are attempting to connect concurrently.
+ pub fn new(db_dir: &str) -> Result<(Self, bool)> {
+ let mut db_path = PathBuf::from(db_dir);
+ db_path.push(DB_DIR);
+ if !db_path.exists() {
+ std::fs::create_dir(&db_path).context("failed to create {db_path:?}")?;
+ info!("created persistent db dir {db_path:?}");
+ }
+
+ db_path.push(DB_FILENAME);
+ let (flags, created) = if db_path.exists() {
+ debug!("connecting to existing database {db_path:?}");
+ (
+ OpenFlags::SQLITE_OPEN_READ_WRITE
+ | OpenFlags::SQLITE_OPEN_URI
+ | OpenFlags::SQLITE_OPEN_NO_MUTEX,
+ false,
+ )
+ } else {
+ info!("creating fresh database {db_path:?}");
+ (
+ OpenFlags::SQLITE_OPEN_READ_WRITE
+ | OpenFlags::SQLITE_OPEN_CREATE
+ | OpenFlags::SQLITE_OPEN_URI
+ | OpenFlags::SQLITE_OPEN_NO_MUTEX,
+ true,
+ )
+ };
+ let mut result = Self {
+ conn: Connection::open_with_flags(db_path, flags)
+ .context(format!("failed to open/create DB with {flags:?}"))?,
+ };
+
+ if created {
+ result.init_tables().context("failed to create tables")?;
+ }
+ Ok((result, created))
+ }
+
+ /// Delete the associated database file.
+ pub fn delete_db_file(self, db_dir: &str) {
+ let mut db_path = PathBuf::from(db_dir);
+ db_path.push(DB_DIR);
+ db_path.push(DB_FILENAME);
+
+ // Drop the connection before removing the backing file.
+ drop(self);
+ warn!("removing database file {db_path:?}");
+ if let Err(e) = std::fs::remove_file(&db_path) {
+ error!("failed to remove database file {db_path:?}: {e:?}");
+ }
+ }
+
+ /// Create the database table and indices.
+ fn init_tables(&mut self) -> Result<()> {
+ self.conn
+ .execute(
+ "CREATE TABLE IF NOT EXISTS main.vmids (
+ vm_id BLOB PRIMARY KEY,
+ user_id INTEGER,
+ app_id INTEGER
+ ) WITHOUT ROWID;",
+ (),
+ )
+ .context("failed to create table")?;
+ self.conn
+ .execute("CREATE INDEX IF NOT EXISTS main.vmids_user_index ON vmids(user_id);", [])
+ .context("Failed to create user index")?;
+ self.conn
+ .execute(
+ "CREATE INDEX IF NOT EXISTS main.vmids_app_index ON vmids(user_id, app_id);",
+ [],
+ )
+ .context("Failed to create app index")?;
+ Ok(())
+ }
+
+ /// Add the given VM ID into the database.
+ #[allow(dead_code)] // TODO(b/294177871): connect this up
+ pub fn add_vm_id(&mut self, vm_id: &VmId, user_id: i32, app_id: i32) -> Result<()> {
+ let _rows = self
+ .conn
+ .execute(
+ "REPLACE INTO main.vmids (vm_id, user_id, app_id) VALUES (?1, ?2, ?3);",
+ params![vm_id, &user_id, &app_id],
+ )
+ .context("failed to add VM ID")?;
+ Ok(())
+ }
+
+ /// Remove the given VM IDs from the database. The collection of IDs is assumed to be smaller
+ /// than the maximum number of SQLite parameters.
+ pub fn delete_vm_ids(&mut self, vm_ids: &[VmId]) -> Result<()> {
+ assert!(vm_ids.len() < MAX_VARIABLES);
+ let mut vars = "?,".repeat(vm_ids.len());
+ vars.pop(); // remove trailing comma
+ let sql = format!("DELETE FROM main.vmids WHERE vm_id IN ({});", vars);
+ let mut stmt = self.conn.prepare(&sql).context("failed to prepare DELETE stmt")?;
+ let _rows = stmt.execute(params_from_iter(vm_ids)).context("failed to delete VM IDs")?;
+ Ok(())
+ }
+
+ /// Return the VM IDs associated with Android user ID `user_id`.
+ pub fn vm_ids_for_user(&mut self, user_id: i32) -> Result<Vec<VmId>> {
+ let mut stmt = self
+ .conn
+ .prepare("SELECT vm_id FROM main.vmids WHERE user_id = ?;")
+ .context("failed to prepare SELECT stmt")?;
+ let rows = stmt.query(params![user_id]).context("query failed")?;
+ Self::vm_ids_from_rows(rows)
+ }
+
+ /// Return the VM IDs associated with `(user_id, app_id)`.
+ pub fn vm_ids_for_app(&mut self, user_id: i32, app_id: i32) -> Result<Vec<VmId>> {
+ let mut stmt = self
+ .conn
+ .prepare("SELECT vm_id FROM main.vmids WHERE user_id = ? AND app_id = ?;")
+ .context("failed to prepare SELECT stmt")?;
+ let rows = stmt.query(params![user_id, app_id]).context("query failed")?;
+ Self::vm_ids_from_rows(rows)
+ }
+
+ /// Retrieve a collection of VM IDs from database rows.
+ fn vm_ids_from_rows(mut rows: Rows) -> Result<Vec<VmId>> {
+ let mut vm_ids: Vec<VmId> = Vec::new();
+ while let Some(row) = rows.next().context("failed row unpack")? {
+ match row.get(0) {
+ Ok(vm_id) => vm_ids.push(vm_id),
+ Err(e) => log::error!("failed to parse row: {e:?}"),
+ }
+ }
+
+ Ok(vm_ids)
+ }
+}
+
+#[cfg(test)]
+pub fn new_test_db() -> VmIdDb {
+ let mut db = VmIdDb { conn: Connection::open_in_memory().unwrap() };
+ db.init_tables().unwrap();
+ db
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ const VM_ID1: VmId = [1u8; 64];
+ const VM_ID2: VmId = [2u8; 64];
+ const VM_ID3: VmId = [3u8; 64];
+ const VM_ID4: VmId = [4u8; 64];
+ const VM_ID5: VmId = [5u8; 64];
+ const USER1: i32 = 1;
+ const USER2: i32 = 2;
+ const USER3: i32 = 3;
+ const USER_UNKNOWN: i32 = 4;
+ const APP_A: i32 = 50;
+ const APP_B: i32 = 60;
+ const APP_C: i32 = 70;
+ const APP_UNKNOWN: i32 = 99;
+
+ #[test]
+ fn test_add_remove() {
+ let mut db = new_test_db();
+ db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID3, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID4, USER2, APP_B).unwrap();
+ db.add_vm_id(&VM_ID5, USER3, APP_A).unwrap();
+ db.add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
+ let empty: Vec<VmId> = Vec::new();
+
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_user(USER1).unwrap());
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_app(USER1, APP_A).unwrap());
+ assert_eq!(vec![VM_ID4], db.vm_ids_for_app(USER2, APP_B).unwrap());
+ assert_eq!(vec![VM_ID5], db.vm_ids_for_user(USER3).unwrap());
+ assert_eq!(empty, db.vm_ids_for_user(USER_UNKNOWN).unwrap());
+ assert_eq!(empty, db.vm_ids_for_app(USER1, APP_UNKNOWN).unwrap());
+
+ db.delete_vm_ids(&[VM_ID2, VM_ID3]).unwrap();
+
+ assert_eq!(vec![VM_ID1], db.vm_ids_for_user(USER1).unwrap());
+ assert_eq!(vec![VM_ID1], db.vm_ids_for_app(USER1, APP_A).unwrap());
+
+ // OK to delete things that don't exist.
+ db.delete_vm_ids(&[VM_ID2, VM_ID3]).unwrap();
+
+ assert_eq!(vec![VM_ID1], db.vm_ids_for_user(USER1).unwrap());
+ assert_eq!(vec![VM_ID1], db.vm_ids_for_app(USER1, APP_A).unwrap());
+
+ db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID3, USER1, APP_A).unwrap();
+
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_user(USER1).unwrap());
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_app(USER1, APP_A).unwrap());
+ assert_eq!(vec![VM_ID4], db.vm_ids_for_app(USER2, APP_B).unwrap());
+ assert_eq!(vec![VM_ID5], db.vm_ids_for_user(USER3).unwrap());
+ assert_eq!(empty, db.vm_ids_for_user(USER_UNKNOWN).unwrap());
+ assert_eq!(empty, db.vm_ids_for_app(USER1, APP_UNKNOWN).unwrap());
+ }
+
+ #[test]
+ fn test_invalid_vm_id() {
+ let mut db = new_test_db();
+ db.add_vm_id(&VM_ID3, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+
+ // Note that results are returned in `vm_id` order, because the table is `WITHOUT ROWID`.
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_user(USER1).unwrap());
+
+ // Manually insert a row with a VM ID that's the wrong size.
+ db.conn
+ .execute(
+ "REPLACE INTO main.vmids (vm_id, user_id, app_id) VALUES (?1, ?2, ?3);",
+ params![&[99u8; 60], &USER1, APP_A],
+ )
+ .unwrap();
+
+ // Invalid row is skipped and remainder returned.
+ assert_eq!(vec![VM_ID1, VM_ID2, VM_ID3], db.vm_ids_for_user(USER1).unwrap());
+ }
+}