Merge "Define skeleton code of vmnic(Virtual Machine Network Interface Creator)" into main
diff --git a/README.md b/README.md
index 4905b56..7560a45 100644
--- a/README.md
+++ b/README.md
@@ -32,3 +32,4 @@
* [Debugging](docs/debug)
* [Using custom VM](docs/custom_vm.md)
* [Device assignment](docs/device_assignment.md)
+* [Huge Pages](docs/hugepages.md)
diff --git a/apex/empty-payload-apk/Android.bp b/apex/empty-payload-apk/Android.bp
index 01bf795..d890d9a 100644
--- a/apex/empty-payload-apk/Android.bp
+++ b/apex/empty-payload-apk/Android.bp
@@ -9,6 +9,7 @@
apex_available: ["com.android.virt"],
sdk_version: "system_current",
jni_uses_platform_apis: true,
+ use_embedded_native_libs: true,
min_sdk_version: "34",
target_sdk_version: "34",
compile_multilib: "first",
diff --git a/docs/custom_vm.md b/docs/custom_vm.md
index b218a5e..36f5998 100644
--- a/docs/custom_vm.md
+++ b/docs/custom_vm.md
@@ -34,12 +34,27 @@
As of today (April 2024), ChromiumOS is the only officially supported guest
payload. We will be adding more OSes in the future.
+#### Download from build server
+
+ - Step 1) Go to the link https://ci.chromium.org/ui/p/chromeos/builders/chromiumos/ferrochrome-public-main/
+ - Note: I 'searched' the ferrochrome target with builder search.
+ - Step 2) Click a build number
+ - Step 3) Expand steps and find `48. upload artifacts`.
+ - Step 4) Click `gs upload dir`. You'll see Cloud storage with comprehensive artifacts (e.g. [Here](https://pantheon.corp.google.com/storage/browser/chromiumos-image-archive/ferrochrome-public/R126-15883.0.0) is the initial build of ferrochrome)
+ - Step 5) Download `image.zip`, which contains working vmlinuz.
+ - Note: DO NOT DOWNLOAD `vmlinuz.tar.xz` from the CI.
+ - Step 6) Uncompress `image.zip`, and boot with `chromiumos_test_image.bin` and `boot_images/vmlinuz`.
+ - Note: DO NOT USE `vmlinuz.bin`.
+
+IMPORTANT: DO NOT USE `vmlinuz.bin` for passing to crosvm. It doesn't pick-up the correct `init` process (picks `/init` instead of `/sbin/init`, and `cfg80211` keeps crashing (i.e. no network)
+
+
#### Build ChromiumOS for VM
First, check out source code from the ChromiumOS and Chromium projects.
* Checking out ChromiumOS: https://www.chromium.org/chromium-os/developer-library/guides/development/developer-guide/
-* Checking out Chromium: https://g3doc.corp.google.com/chrome/chromeos/system_services_team/dev_instructions/g3doc/setup_checkout.md?cl=headless
+* Checking out Chromium: https://g3doc.corp.google.com/chrome/chromeos/system_services_team/dev_instructions/g3doc/setup_checkout.md?cl=head
Important: When you are at the step “Set up gclient args” in the Chromium checkout instruction, configure .gclient as follows.
@@ -122,7 +137,7 @@
You need two outputs:
* ChromiumOS disk image: ~/chromiumos/src/build/images/ferrochrome/latest/chromiumos_test_image.bin
-* The kernel: ~/chromiumos/out/build/ferrochrome/boot/vmlinuz
+* The kernel: ~/chromiumos/src/build/images/ferrochrome/latest/boot_images/vmlinuz
### Create a guest VM configuration
diff --git a/docs/hugepages.md b/docs/hugepages.md
new file mode 100644
index 0000000..b379e9b
--- /dev/null
+++ b/docs/hugepages.md
@@ -0,0 +1,47 @@
+# Huge Pages
+
+From Android 15, the pKVM hypervisor supports Transparent Hugepages. This is a
+Linux feature which allows the kernel to allocate, when possible, a huge-page
+(typically, 2MiB on a 4K system). This huge-page being the size of a block,
+the hypervisor can leverage this allocation to also use a block mapping
+in the stage-2 page tables, instead of 512 individual contiguous single page
+mappings.
+
+Using block mappings brings a significant performance improvement by reducing
+the number of stage-2 page faults as well as the TLB pressure. However, finding
+a huge-page can be difficult on a system where the memory is fragmented.
+
+By default, huge-pages are disabled.
+
+## Enabling THP
+
+### 1. Sysfs configuration
+
+The sysfs configuration file that will enable THP for AVF is
+
+```
+/sys/kernel/mm/transparent_hugepages/shmem_enabled
+```
+
+This always defaults to `never`. It is recommended to set it to `advise` to
+benefit from the THP performance improvement.
+
+THPs can have an impact on the system depending on the chosen policy. The
+policy is configured with the following sysfs file:
+
+```
+/sys/kernel/mm/transparent_hugepages/defrag
+```
+
+The recommended policy is `never` as this has zero impact on the system. THPs
+would be used only if some are available.
+
+More information can be found in the Linux
+[admin guide](https://docs.kernel.org/admin-guide/mm/transhuge.html).
+
+### 2. AVF configuration
+
+The guest VM configuration can select huge-pages with the `vm_config.json`
+option `"hugepages": true`.
+
+Alternatively, the `vm` command can also pass `--hugepages`.
diff --git a/microdroid/payload/config/src/lib.rs b/microdroid/payload/config/src/lib.rs
index d6f65bd..28c3c70 100644
--- a/microdroid/payload/config/src/lib.rs
+++ b/microdroid/payload/config/src/lib.rs
@@ -49,6 +49,13 @@
/// files with integrity checking, but not confidentiality.
#[serde(default)]
pub enable_authfs: bool,
+
+ /// Ask the kernel for transparent huge-pages (THP). This is only a hint and
+ /// the kernel will allocate THP-backed memory only if globally enabled by
+ /// the system and if any can be found. See
+ /// https://docs.kernel.org/admin-guide/mm/transhuge.html
+ #[serde(default)]
+ pub hugepages: bool,
}
/// OS config
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index 7da9ea4..990d27a 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -629,6 +629,7 @@
prefer_staged: false,
export_tombstones: None,
enable_authfs: false,
+ hugepages: false,
})
}
_ => bail!("Failed to match config against a config type."),
diff --git a/service_vm/requests/src/rkp.rs b/service_vm/requests/src/rkp.rs
index cdbd60e..4f2262f 100644
--- a/service_vm/requests/src/rkp.rs
+++ b/service_vm/requests/src/rkp.rs
@@ -123,9 +123,12 @@
"model" => "avf",
"device" => "avf",
"product" => "avf",
+ "vb_state" => "avf",
"manufacturer" => "aosp-avf",
- "vbmeta_digest" => Value::Bytes(vec![0u8; 0]),
+ "vbmeta_digest" => Value::Bytes(vec![1u8; 0]),
+ "security_level" => "avf",
"boot_patch_level" => 20240202,
+ "bootloader_state" => "avf",
"system_patch_level" => 202402,
"vendor_patch_level" => 20240202,
})
diff --git a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
index e7e9ded..f424ce0 100644
--- a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
@@ -59,6 +59,7 @@
import org.json.JSONObject;
import org.junit.After;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
@@ -660,6 +661,7 @@
}
@Test
+ @Ignore("b/341087884") // TODO(b/341087884): fix & re-enable
public void testTombstonesAreGeneratedUponKernelCrash() throws Exception {
assumeFalse("Cuttlefish is not supported", isCuttlefish());
assumeFalse("Skipping test because ramdump is disabled on user build", isUserBuild());
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index f1509e2..a245e11 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -633,6 +633,7 @@
device_tree_overlay,
display_config,
input_device_options,
+ hugepages: config.hugePages,
};
let instance = Arc::new(
VmInstance::new(
@@ -918,6 +919,7 @@
vm_config.name.clone_from(&config.name);
vm_config.protectedVm = config.protectedVm;
vm_config.cpuTopology = config.cpuTopology;
+ vm_config.hugePages = config.hugePages || vm_payload_config.hugepages;
// Microdroid takes additional init ramdisk & (optionally) storage image
add_microdroid_system_images(config, instance_file, storage_image, os_name, &mut vm_config)?;
diff --git a/virtualizationmanager/src/crosvm.rs b/virtualizationmanager/src/crosvm.rs
index 040e552..b426051 100644
--- a/virtualizationmanager/src/crosvm.rs
+++ b/virtualizationmanager/src/crosvm.rs
@@ -121,6 +121,7 @@
pub device_tree_overlay: Option<File>,
pub display_config: Option<DisplayConfig>,
pub input_device_options: Vec<InputDeviceOption>,
+ pub hugepages: bool,
}
#[derive(Debug)]
@@ -1005,6 +1006,11 @@
});
}
}
+
+ if config.hugepages {
+ command.arg("--hugepages");
+ }
+
append_platform_devices(&mut command, &mut preserved_fds, &config)?;
debug!("Preserving FDs {:?}", preserved_fds);
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
index 417d5d3..9951bfd 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
@@ -128,4 +128,12 @@
/** Configuration parameters guarded by android.permission.USE_CUSTOM_VIRTUAL_MACHINE */
@nullable CustomConfig customConfig;
+
+ /**
+ * Ask the kernel for transparent huge-pages (THP). This is only a hint and
+ * the kernel will allocate THP-backed memory only if globally enabled by
+ * the system and if any can be found. See
+ * https://docs.kernel.org/admin-guide/mm/transhuge.html
+ */
+ boolean hugePages;
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl
index 86e26da..cf9d25a 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl
@@ -70,6 +70,14 @@
*/
int gdbPort = 0;
+ /**
+ * Ask the kernel for transparent huge-pages (THP). This is only a hint and
+ * the kernel will allocate THP-backed memory only if globally enabled by
+ * the system and if any can be found. See
+ * https://docs.kernel.org/admin-guide/mm/transhuge.html
+ */
+ boolean hugePages;
+
/** List of SysFS nodes of devices to be assigned */
String[] devices;
diff --git a/virtualizationservice/src/main.rs b/virtualizationservice/src/main.rs
index bcea1bc..8acfdd3 100644
--- a/virtualizationservice/src/main.rs
+++ b/virtualizationservice/src/main.rs
@@ -20,20 +20,18 @@
mod remote_provisioning;
mod rkpvm;
-use crate::aidl::{remove_temporary_dir, TEMPORARY_DIRECTORY, VirtualizationServiceInternal};
+use crate::aidl::{remove_temporary_dir, VirtualizationServiceInternal, TEMPORARY_DIRECTORY};
use android_logger::{Config, FilterBuilder};
-use android_system_virtualizationservice_internal::aidl::android::system::{
- virtualizationservice_internal::IVirtualizationServiceInternal::BnVirtualizationServiceInternal
-};
-use android_system_virtualizationmaintenance::aidl::android::system::virtualizationmaintenance::{
- IVirtualizationMaintenance::BnVirtualizationMaintenance
-};
+use android_system_virtualizationmaintenance::aidl::android::system::virtualizationmaintenance;
+use android_system_virtualizationservice_internal::aidl::android::system::virtualizationservice_internal;
use anyhow::{bail, Context, Error, Result};
use binder::{register_lazy_service, BinderFeatures, ProcessState, ThreadState};
use log::{error, info, LevelFilter};
use std::fs::{create_dir, read_dir};
use std::os::unix::raw::{pid_t, uid_t};
use std::path::Path;
+use virtualizationmaintenance::IVirtualizationMaintenance::BnVirtualizationMaintenance;
+use virtualizationservice_internal::IVirtualizationServiceInternal::BnVirtualizationServiceInternal;
const LOG_TAG: &str = "VirtualizationService";
pub(crate) const REMOTELY_PROVISIONED_COMPONENT_SERVICE_NAME: &str =
diff --git a/virtualizationservice/src/maintenance.rs b/virtualizationservice/src/maintenance.rs
index 8efc58d..4732e1f 100644
--- a/virtualizationservice/src/maintenance.rs
+++ b/virtualizationservice/src/maintenance.rs
@@ -24,11 +24,6 @@
mod vmdb;
use vmdb::{VmId, VmIdDb};
-/// Indicate whether an app ID belongs to a system core component.
-fn core_app_id(app_id: i32) -> bool {
- app_id < 10000
-}
-
/// Interface name for the Secretkeeper HAL.
const SECRETKEEPER_SERVICE: &str = "android.hardware.security.secretkeeper.ISecretkeeper/default";
@@ -45,6 +40,11 @@
/// State related to VM secrets.
pub struct State {
+ /// The real state, lazily created when we first need it.
+ inner: Option<InnerState>,
+}
+
+struct InnerState {
sk: binder::Strong<dyn ISecretkeeper>,
/// Database of VM IDs,
vm_id_db: VmIdDb,
@@ -53,20 +53,69 @@
impl State {
pub fn new() -> Option<Self> {
- let sk = match Self::find_sk() {
- Some(sk) => sk,
- None => {
- warn!("failed to find a Secretkeeper instance; skipping secret management");
- return None;
- }
+ if is_sk_present() {
+ // Don't instantiate the inner state yet, that will happen when it is needed.
+ Some(Self { inner: None })
+ } else {
+ // If the Secretkeeper HAL doesn't exist, there's never any point in trying to
+ // handle maintenance for it.
+ info!("Failed to find a Secretkeeper instance; skipping secret management");
+ None
+ }
+ }
+
+ /// Return the existing inner state, or create one if there isn't one.
+ /// This is done on demand as in early boot (before we need Secretkeeper) it may not be
+ /// available to connect to. See b/331417880.
+ fn get_inner(&mut self) -> Result<&mut InnerState> {
+ if self.inner.is_none() {
+ self.inner = Some(InnerState::new()?);
+ }
+ Ok(self.inner.as_mut().unwrap())
+ }
+
+ /// Record a new VM ID. If there is an existing owner (user_id, app_id) for the VM ID,
+ /// it will be replaced.
+ pub fn add_id(&mut self, vm_id: &VmId, user_id: u32, app_id: u32) -> Result<()> {
+ self.get_inner()?.add_id(vm_id, user_id, app_id)
+ }
+
+ /// Delete the VM IDs associated with Android user ID `user_id`.
+ pub fn delete_ids_for_user(&mut self, user_id: i32) -> Result<()> {
+ self.get_inner()?.delete_ids_for_user(user_id)
+ }
+
+ /// Delete the VM IDs associated with `(user_id, app_id)`.
+ pub fn delete_ids_for_app(&mut self, user_id: i32, app_id: i32) -> Result<()> {
+ self.get_inner()?.delete_ids_for_app(user_id, app_id)
+ }
+
+ /// Delete the provided VM IDs from both Secretkeeper and the database.
+ pub fn delete_ids(&mut self, vm_ids: &[VmId]) {
+ let Ok(inner) = self.get_inner() else {
+ warn!("No Secretkeeper available, not deleting secrets");
+ return;
};
- let (vm_id_db, created) = match VmIdDb::new(PERSISTENT_DIRECTORY) {
- Ok(v) => v,
- Err(e) => {
- error!("skipping secret management, failed to connect to database: {e:?}");
- return None;
- }
- };
+
+ inner.delete_ids(vm_ids)
+ }
+
+ /// Perform reconciliation to allow for possibly missed notifications of user or app removal.
+ pub fn reconcile(
+ &mut self,
+ callback: &Strong<dyn IVirtualizationReconciliationCallback>,
+ ) -> Result<()> {
+ self.get_inner()?.reconcile(callback)
+ }
+}
+
+impl InnerState {
+ fn new() -> Result<Self> {
+ info!("Connecting to {SECRETKEEPER_SERVICE}");
+ let sk = binder::wait_for_interface::<dyn ISecretkeeper>(SECRETKEEPER_SERVICE)
+ .context("Connecting to {SECRETKEEPER_SERVICE}")?;
+ let (vm_id_db, created) = VmIdDb::new(PERSISTENT_DIRECTORY)
+ .context("Connecting to secret management database")?;
if created {
// If the database did not previously exist, then this appears to be the first run of
// `virtualizationservice` since device setup or factory reset. In case of the latter,
@@ -76,32 +125,15 @@
if let Err(e) = sk.deleteAll() {
error!("failed to delete previous secrets, dropping database: {e:?}");
vm_id_db.delete_db_file(PERSISTENT_DIRECTORY);
- return None;
+ return Err(e.into());
}
} else {
info!("re-using existing VM ID DB");
}
- Some(Self { sk, vm_id_db, batch_size: DELETE_MAX_BATCH_SIZE })
+ Ok(Self { sk, vm_id_db, batch_size: DELETE_MAX_BATCH_SIZE })
}
- fn find_sk() -> Option<binder::Strong<dyn ISecretkeeper>> {
- if let Ok(true) = binder::is_declared(SECRETKEEPER_SERVICE) {
- match binder::get_interface(SECRETKEEPER_SERVICE) {
- Ok(sk) => Some(sk),
- Err(e) => {
- error!("failed to connect to {SECRETKEEPER_SERVICE}: {e:?}");
- None
- }
- }
- } else {
- info!("instance {SECRETKEEPER_SERVICE} not declared");
- None
- }
- }
-
- /// Record a new VM ID. If there is an existing owner (user_id, app_id) for the VM ID,
- /// it will be replaced.
- pub fn add_id(&mut self, vm_id: &VmId, user_id: u32, app_id: u32) -> Result<()> {
+ fn add_id(&mut self, vm_id: &VmId, user_id: u32, app_id: u32) -> Result<()> {
let user_id: i32 = user_id.try_into().context(format!("user_id {user_id} out of range"))?;
let app_id: i32 = app_id.try_into().context(format!("app_id {app_id} out of range"))?;
@@ -125,8 +157,7 @@
self.vm_id_db.add_vm_id(vm_id, user_id, app_id)
}
- /// Delete the VM IDs associated with Android user ID `user_id`.
- pub fn delete_ids_for_user(&mut self, user_id: i32) -> Result<()> {
+ fn delete_ids_for_user(&mut self, user_id: i32) -> Result<()> {
let vm_ids = self.vm_id_db.vm_ids_for_user(user_id)?;
info!(
"delete_ids_for_user(user_id={user_id}) triggers deletion of {} secrets",
@@ -136,8 +167,7 @@
Ok(())
}
- /// Delete the VM IDs associated with `(user_id, app_id)`.
- pub fn delete_ids_for_app(&mut self, user_id: i32, app_id: i32) -> Result<()> {
+ fn delete_ids_for_app(&mut self, user_id: i32, app_id: i32) -> Result<()> {
let vm_ids = self.vm_id_db.vm_ids_for_app(user_id, app_id)?;
info!(
"delete_ids_for_app(user_id={user_id}, app_id={app_id}) removes {} secrets",
@@ -147,8 +177,7 @@
Ok(())
}
- /// Delete the provided VM IDs from both Secretkeeper and the database.
- pub fn delete_ids(&mut self, mut vm_ids: &[VmId]) {
+ fn delete_ids(&mut self, mut vm_ids: &[VmId]) {
while !vm_ids.is_empty() {
let len = std::cmp::min(vm_ids.len(), self.batch_size);
let batch = &vm_ids[..len];
@@ -171,8 +200,7 @@
}
}
- /// Perform reconciliation to allow for possibly missed notifications of user or app removal.
- pub fn reconcile(
+ fn reconcile(
&mut self,
callback: &Strong<dyn IVirtualizationReconciliationCallback>,
) -> Result<()> {
@@ -245,19 +273,24 @@
}
}
+/// Indicate whether an app ID belongs to a system core component.
+fn core_app_id(app_id: i32) -> bool {
+ app_id < 10000
+}
+
+fn is_sk_present() -> bool {
+ matches!(binder::is_declared(SECRETKEEPER_SERVICE), Ok(true))
+}
+
#[cfg(test)]
mod tests {
use super::*;
+ use android_hardware_security_authgraph::aidl::android::hardware::security::authgraph;
+ use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper;
+ use authgraph::IAuthGraphKeyExchange::IAuthGraphKeyExchange;
+ use secretkeeper::ISecretkeeper::BnSecretkeeper;
use std::sync::{Arc, Mutex};
- use android_hardware_security_authgraph::aidl::android::hardware::security::authgraph::{
- IAuthGraphKeyExchange::IAuthGraphKeyExchange,
- };
- use android_hardware_security_secretkeeper::aidl::android::hardware::security::secretkeeper::{
- ISecretkeeper::BnSecretkeeper
- };
- use virtualizationmaintenance::IVirtualizationReconciliationCallback::{
- BnVirtualizationReconciliationCallback
- };
+ use virtualizationmaintenance::IVirtualizationReconciliationCallback::BnVirtualizationReconciliationCallback;
/// Fake implementation of Secretkeeper that keeps a history of what operations were invoked.
#[derive(Default)]
@@ -298,7 +331,12 @@
let vm_id_db = vmdb::new_test_db();
let sk = FakeSk { history };
let sk = BnSecretkeeper::new_binder(sk, binder::BinderFeatures::default());
- State { sk, vm_id_db, batch_size }
+ let inner = InnerState { sk, vm_id_db, batch_size };
+ State { inner: Some(inner) }
+ }
+
+ fn get_db(state: &mut State) -> &mut VmIdDb {
+ &mut state.inner.as_mut().unwrap().vm_id_db
}
struct Reconciliation {
@@ -360,11 +398,11 @@
let history = Arc::new(Mutex::new(Vec::new()));
let mut sk_state = new_test_state(history.clone(), 2);
- sk_state.vm_id_db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID4, USER3, APP_A).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID5, USER3, APP_C).unwrap(); // Overwrites APP_A
+ get_db(&mut sk_state).add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID4, USER3, APP_A).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID5, USER3, APP_C).unwrap(); // Overwrites APP_A
assert_eq!((*history.lock().unwrap()).clone(), vec![]);
sk_state.delete_ids_for_app(USER2, APP_B).unwrap();
@@ -376,11 +414,14 @@
vec![SkOp::DeleteIds(vec![VM_ID3]), SkOp::DeleteIds(vec![VM_ID4, VM_ID5]),]
);
- assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_user(USER1).unwrap());
- assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_app(USER1, APP_A).unwrap());
+ assert_eq!(vec![VM_ID1, VM_ID2], get_db(&mut sk_state).vm_ids_for_user(USER1).unwrap());
+ assert_eq!(
+ vec![VM_ID1, VM_ID2],
+ get_db(&mut sk_state).vm_ids_for_app(USER1, APP_A).unwrap()
+ );
let empty: Vec<VmId> = Vec::new();
- assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_app(USER2, APP_B).unwrap());
- assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_user(USER3).unwrap());
+ assert_eq!(empty, get_db(&mut sk_state).vm_ids_for_app(USER2, APP_B).unwrap());
+ assert_eq!(empty, get_db(&mut sk_state).vm_ids_for_user(USER3).unwrap());
}
#[test]
@@ -388,16 +429,19 @@
let history = Arc::new(Mutex::new(Vec::new()));
let mut sk_state = new_test_state(history.clone(), 20);
- sk_state.vm_id_db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID4, USER2, CORE_APP_A).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID4, USER2, CORE_APP_A).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
- assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_user(USER1).unwrap());
- assert_eq!(vec![VM_ID1, VM_ID2], sk_state.vm_id_db.vm_ids_for_app(USER1, APP_A).unwrap());
- assert_eq!(vec![VM_ID3], sk_state.vm_id_db.vm_ids_for_app(USER2, APP_B).unwrap());
- assert_eq!(vec![VM_ID5], sk_state.vm_id_db.vm_ids_for_user(USER3).unwrap());
+ assert_eq!(vec![VM_ID1, VM_ID2], get_db(&mut sk_state).vm_ids_for_user(USER1).unwrap());
+ assert_eq!(
+ vec![VM_ID1, VM_ID2],
+ get_db(&mut sk_state).vm_ids_for_app(USER1, APP_A).unwrap()
+ );
+ assert_eq!(vec![VM_ID3], get_db(&mut sk_state).vm_ids_for_app(USER2, APP_B).unwrap());
+ assert_eq!(vec![VM_ID5], get_db(&mut sk_state).vm_ids_for_user(USER3).unwrap());
// Perform a reconciliation and pretend that USER1 and [CORE_APP_A, APP_B] are gone.
let reconciliation =
@@ -409,12 +453,12 @@
sk_state.reconcile(&callback).unwrap();
let empty: Vec<VmId> = Vec::new();
- assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_user(USER1).unwrap());
- assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_app(USER1, APP_A).unwrap());
+ assert_eq!(empty, get_db(&mut sk_state).vm_ids_for_user(USER1).unwrap());
+ assert_eq!(empty, get_db(&mut sk_state).vm_ids_for_app(USER1, APP_A).unwrap());
// VM for core app stays even though it's reported as absent.
- assert_eq!(vec![VM_ID4], sk_state.vm_id_db.vm_ids_for_user(USER2).unwrap());
- assert_eq!(empty, sk_state.vm_id_db.vm_ids_for_app(USER2, APP_B).unwrap());
- assert_eq!(vec![VM_ID5], sk_state.vm_id_db.vm_ids_for_user(USER3).unwrap());
+ assert_eq!(vec![VM_ID4], get_db(&mut sk_state).vm_ids_for_user(USER2).unwrap());
+ assert_eq!(empty, get_db(&mut sk_state).vm_ids_for_app(USER2, APP_B).unwrap());
+ assert_eq!(vec![VM_ID5], get_db(&mut sk_state).vm_ids_for_user(USER3).unwrap());
}
#[test]
@@ -427,11 +471,11 @@
let mut vm_id = [0u8; 64];
vm_id[0..8].copy_from_slice(&(idx as u64).to_be_bytes());
sk_state.add_id(&vm_id, USER1 as u32, APP_A as u32).unwrap();
- assert_eq!(idx + 1, sk_state.vm_id_db.count_vm_ids_for_app(USER1, APP_A).unwrap());
+ assert_eq!(idx + 1, get_db(&mut sk_state).count_vm_ids_for_app(USER1, APP_A).unwrap());
}
assert_eq!(
MAX_VM_IDS_PER_APP,
- sk_state.vm_id_db.count_vm_ids_for_app(USER1, APP_A).unwrap()
+ get_db(&mut sk_state).count_vm_ids_for_app(USER1, APP_A).unwrap()
);
// Beyond the limit it's one in, one out.
@@ -441,12 +485,12 @@
sk_state.add_id(&vm_id, USER1 as u32, APP_A as u32).unwrap();
assert_eq!(
MAX_VM_IDS_PER_APP,
- sk_state.vm_id_db.count_vm_ids_for_app(USER1, APP_A).unwrap()
+ get_db(&mut sk_state).count_vm_ids_for_app(USER1, APP_A).unwrap()
);
}
assert_eq!(
MAX_VM_IDS_PER_APP,
- sk_state.vm_id_db.count_vm_ids_for_app(USER1, APP_A).unwrap()
+ get_db(&mut sk_state).count_vm_ids_for_app(USER1, APP_A).unwrap()
);
}
@@ -467,10 +511,10 @@
let history = Arc::new(Mutex::new(Vec::new()));
let mut sk_state = new_test_state(history.clone(), 20);
- sk_state.vm_id_db.add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
- sk_state.vm_id_db.add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID1, USER1, APP_A).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID2, USER1, APP_A).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID3, USER2, APP_B).unwrap();
+ get_db(&mut sk_state).add_vm_id(&VM_ID5, USER3, APP_C).unwrap();
sk_state.delete_ids_for_user(USER1).unwrap();
sk_state.delete_ids_for_user(USER2).unwrap();
sk_state.delete_ids_for_user(USER3).unwrap();
diff --git a/virtualizationservice/vfio_handler/src/aidl.rs b/virtualizationservice/vfio_handler/src/aidl.rs
index c0967af..b527260 100644
--- a/virtualizationservice/vfio_handler/src/aidl.rs
+++ b/virtualizationservice/vfio_handler/src/aidl.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//! Implementation of the AIDL interface of the VirtualizationService.
+//! Implementation of the AIDL interface of VfioHandler.
use anyhow::{anyhow, Context};
use android_system_virtualizationservice_internal::aidl::android::system::virtualizationservice_internal::IBoundDevice::{IBoundDevice, BnBoundDevice};
diff --git a/vm/src/main.rs b/vm/src/main.rs
index b60f2db..d6ee3a5 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -53,6 +53,13 @@
/// Run VM in protected mode.
#[arg(short, long)]
protected: bool,
+
+ /// Ask the kernel for transparent huge-pages (THP). This is only a hint and
+ /// the kernel will allocate THP-backed memory only if globally enabled by
+ /// the system and if any can be found. See
+ /// https://docs.kernel.org/admin-guide/mm/transhuge.html
+ #[arg(short, long)]
+ hugepages: bool,
}
#[derive(Args, Default)]
diff --git a/vm/src/run.rs b/vm/src/run.rs
index f3a5987..5e797f8 100644
--- a/vm/src/run.rs
+++ b/vm/src/run.rs
@@ -177,6 +177,7 @@
cpuTopology: config.common.cpu_topology,
customConfig: Some(custom_config),
osName: os_name,
+ hugePages: config.common.hugepages,
});
run(
service.as_ref(),
@@ -257,6 +258,7 @@
vm_config.gdbPort = gdb.get() as i32;
}
vm_config.cpuTopology = config.common.cpu_topology;
+ vm_config.hugePages = config.common.hugepages;
run(
get_service()?.as_ref(),
&VirtualMachineConfig::RawConfig(vm_config),
diff --git a/vm_payload/Android.bp b/vm_payload/Android.bp
index 97d4649..229f533 100644
--- a/vm_payload/Android.bp
+++ b/vm_payload/Android.bp
@@ -79,6 +79,7 @@
// Implementation is available inside a Microdroid VM.
implementation_installable: false,
},
+ visibility: ["//visibility:public"],
}
// Just the headers. Mostly useful for clients that only want the
@@ -88,6 +89,7 @@
defaults: ["avf_build_flags_cc"],
apex_available: ["com.android.compos"],
export_include_dirs: ["include"],
+ visibility: ["//visibility:public"],
}
// Restricted headers for use by internal clients & associated tests.