Merge "Clean-up the device after running TerminalAppTests" into main
diff --git a/build/debian/fai_config/files/etc/systemd/system/forwarder_guest_launcher.service/AVF b/build/debian/fai_config/files/etc/systemd/system/forwarder_guest_launcher.service/AVF
index 4c1b2f5..f4c2a24 100644
--- a/build/debian/fai_config/files/etc/systemd/system/forwarder_guest_launcher.service/AVF
+++ b/build/debian/fai_config/files/etc/systemd/system/forwarder_guest_launcher.service/AVF
@@ -4,7 +4,7 @@
After=network.target
After=virtiofs_internal.service
[Service]
-ExecStart=/usr/local/bin/forwarder_guest_launcher --host 192.168.0.1 --grpc_port $(cat /mnt/internal/debian_service_port)
+ExecStart=/usr/bin/bash -c '/usr/local/bin/forwarder_guest_launcher --host 192.168.0.1 --grpc_port $(cat /mnt/internal/debian_service_port)'
Type=simple
Restart=on-failure
RestartSec=1
diff --git a/build/debian/fai_config/files/etc/systemd/system/ip_addr_reporter.service/AVF b/build/debian/fai_config/files/etc/systemd/system/ip_addr_reporter.service/AVF
index 81347a7..b9f3193 100644
--- a/build/debian/fai_config/files/etc/systemd/system/ip_addr_reporter.service/AVF
+++ b/build/debian/fai_config/files/etc/systemd/system/ip_addr_reporter.service/AVF
@@ -5,7 +5,7 @@
Requires=ttyd.service
After=virtiofs_internal.service
[Service]
-ExecStart=/usr/local/bin/ip_addr_reporter --grpc_port $(cat /mnt/internal/debian_service_port)
+ExecStart=/usr/bin/bash -c '/usr/local/bin/ip_addr_reporter --grpc_port $(cat /mnt/internal/debian_service_port)'
Type=simple
Restart=on-failure
User=root
diff --git a/build/debian/vm_config.json.aarch64 b/build/debian/vm_config.json.aarch64
index 8e16093..2df0a05 100644
--- a/build/debian/vm_config.json.aarch64
+++ b/build/debian/vm_config.json.aarch64
@@ -12,7 +12,7 @@
"sharedPath": "/storage/emulated"
},
{
- "sharedPath": "/data/user/$USER_ID/$PACKAGE_NAME/files"
+ "sharedPath": "$APP_DATA_DIR/files"
}
],
"protected": false,
diff --git a/build/debian/vm_config.json.x86_64 b/build/debian/vm_config.json.x86_64
index 09e04b9..1719815 100644
--- a/build/debian/vm_config.json.x86_64
+++ b/build/debian/vm_config.json.x86_64
@@ -12,7 +12,7 @@
"sharedPath": "/storage/emulated"
},
{
- "sharedPath": "/data/user/$USER_ID/$PACKAGE_NAME/files"
+ "sharedPath": "$APP_DATA_DIR/files"
}
],
"kernel": "$PAYLOAD_DIR/vmlinuz",
diff --git a/docs/service_vm.md b/docs/service_vm.md
index 735c14d..eedc6fd 100644
--- a/docs/service_vm.md
+++ b/docs/service_vm.md
@@ -16,9 +16,9 @@
## Architecture
-[Rialto](../rialto) is used as the bare-metal kernel for the Service VM. It
+[Rialto][rialto] is used as the bare-metal kernel for the Service VM. It
shares some low-level setup, such as memory management and virtio device
-parsing, with pvmfw. The common setup code is grouped in [vmbase/](../libs/libvmbase).
+parsing, with pvmfw. The common setup code is grouped in [libvmbase/][libvmbase].
## Functionality
@@ -26,12 +26,21 @@
and provide responses for each request. The requests and responses are
serialized in CBOR format and transmitted over a virtio-vsock device.
-- [./comm](./comm) contains the definitions for the requests and responses.
-- [./requests](./requests) contains the library that processes the requests.
-- [./manager](./manager) manages the Service VM session, ensuring that only
- one Service VM is active at any given time. The
- [virtualizationservice](../android/virtualizationservice) process owns and manages
- the Service VM instance.
+- [libservice_vm_comm][libservice_vm_comm] contains the definitions for the
+ requests and responses.
+- [libservice_vm_requests][libservice_vm_requests] contains the library that
+ processes the requests.
+- [libservice_vm_manager][libservice_vm_manager] manages the Service VM
+ session, ensuring that only one Service VM is active at any given time. The
+ [virtualizationservice][virtualizationservice] process owns and manages the
+ Service VM instance.
+
+[rialto]: ../guest/rialto
+[libvmbase]: ../libs/libvmbase
+[libservice_vm_comm]: ../libs/libservice_vm_comm
+[libservice_vm_requests]: ../libs/libservice_vm_requests
+[libservice_vm_manager]: ../libs/libservice_vm_manager
+[virtualizationservice]: ../android/virtualizationservice
### RKP VM (Remote Key Provisioning Virtual Machine)
diff --git a/guest/pvmfw/src/bcc.rs b/guest/pvmfw/src/bcc.rs
index 7a13da7..5317ce9 100644
--- a/guest/pvmfw/src/bcc.rs
+++ b/guest/pvmfw/src/bcc.rs
@@ -109,10 +109,14 @@
Value::Array(v) if v.len() >= 2 => v,
_ => return Err(BccError::MalformedBcc("Invalid top level value")),
};
- // Decode all the entries to make sure they are well-formed.
- let entries: Vec<_> = bcc.into_iter().skip(1).map(BccEntry::new).collect();
+ // Decode all the DICE payloads to make sure they are well-formed.
+ let payloads = bcc
+ .into_iter()
+ .skip(1)
+ .map(|v| BccEntry::new(v).payload())
+ .collect::<Result<Vec<_>>>()?;
- let is_debug_mode = is_any_entry_debug_mode(entries.as_slice())?;
+ let is_debug_mode = is_any_payload_debug_mode(&payloads)?;
Ok(Self { is_debug_mode })
}
@@ -121,13 +125,13 @@
}
}
-fn is_any_entry_debug_mode(entries: &[BccEntry]) -> Result<bool> {
- // Check if any entry in the chain is marked as Debug mode, which means the device is not
+fn is_any_payload_debug_mode(payloads: &[BccPayload]) -> Result<bool> {
+ // Check if any payload in the chain is marked as Debug mode, which means the device is not
// secure. (Normal means it is a secure boot, for that stage at least; we ignore recovery
// & not configured /invalid values, since it's not clear what they would mean in this
// context.)
- for entry in entries {
- if entry.payload()?.is_debug_mode()? {
+ for payload in payloads {
+ if payload.is_debug_mode()? {
return Ok(true);
}
}
diff --git a/guest/pvmfw/src/entry.rs b/guest/pvmfw/src/entry.rs
index 945ad6a..64a03cc 100644
--- a/guest/pvmfw/src/entry.rs
+++ b/guest/pvmfw/src/entry.rs
@@ -15,25 +15,23 @@
//! Low-level entry and exit points of pvmfw.
use crate::config;
-use crate::fdt;
use crate::memory;
use core::arch::asm;
use core::mem::{drop, size_of};
-use core::num::NonZeroUsize;
use core::ops::Range;
use core::slice;
-use log::debug;
use log::error;
use log::info;
use log::warn;
use log::LevelFilter;
use vmbase::util::RangeExt as _;
use vmbase::{
+ arch::aarch64::min_dcache_line_size,
configure_heap, console_writeln,
- hyp::{get_mem_sharer, get_mmio_guard},
+ hyp::get_mmio_guard,
layout::{self, crosvm, UART_PAGE_ADDR},
main,
- memory::{min_dcache_line_size, MemoryTracker, MEMORY, SIZE_128KB, SIZE_4KB},
+ memory::{MemoryTracker, MEMORY, SIZE_128KB, SIZE_4KB},
power::reboot,
};
use zeroize::Zeroize;
@@ -94,112 +92,6 @@
// if we reach this point and return, vmbase::entry::rust_entry() will call power::shutdown().
}
-struct MemorySlices<'a> {
- fdt: &'a mut libfdt::Fdt,
- kernel: &'a [u8],
- ramdisk: Option<&'a [u8]>,
-}
-
-impl<'a> MemorySlices<'a> {
- fn new(
- fdt: usize,
- kernel: usize,
- kernel_size: usize,
- vm_dtbo: Option<&mut [u8]>,
- vm_ref_dt: Option<&[u8]>,
- ) -> Result<Self, RebootReason> {
- let fdt_size = NonZeroUsize::new(crosvm::FDT_MAX_SIZE).unwrap();
- // TODO - Only map the FDT as read-only, until we modify it right before jump_to_payload()
- // e.g. by generating a DTBO for a template DT in main() and, on return, re-map DT as RW,
- // overwrite with the template DT and apply the DTBO.
- let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, fdt_size).map_err(|e| {
- error!("Failed to allocate the FDT range: {e}");
- RebootReason::InternalError
- })?;
-
- // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
- let fdt = unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) };
-
- let info = fdt::sanitize_device_tree(fdt, vm_dtbo, vm_ref_dt)?;
- let fdt = libfdt::Fdt::from_mut_slice(fdt).map_err(|e| {
- error!("Failed to load sanitized FDT: {e}");
- RebootReason::InvalidFdt
- })?;
- debug!("Fdt passed validation!");
-
- let memory_range = info.memory_range;
- debug!("Resizing MemoryTracker to range {memory_range:#x?}");
- MEMORY.lock().as_mut().unwrap().shrink(&memory_range).map_err(|e| {
- error!("Failed to use memory range value from DT: {memory_range:#x?}: {e}");
- RebootReason::InvalidFdt
- })?;
-
- if let Some(mem_sharer) = get_mem_sharer() {
- let granule = mem_sharer.granule().map_err(|e| {
- error!("Failed to get memory protection granule: {e}");
- RebootReason::InternalError
- })?;
- MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
- error!("Failed to initialize dynamically shared pool: {e}");
- RebootReason::InternalError
- })?;
- } else {
- let range = info.swiotlb_info.fixed_range().ok_or_else(|| {
- error!("Pre-shared pool range not specified in swiotlb node");
- RebootReason::InvalidFdt
- })?;
-
- MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).map_err(|e| {
- error!("Failed to initialize pre-shared pool {e}");
- RebootReason::InvalidFdt
- })?;
- }
-
- let kernel_range = if let Some(r) = info.kernel_range {
- MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
- error!("Failed to obtain the kernel range with DT range: {e}");
- RebootReason::InternalError
- })?
- } else if cfg!(feature = "legacy") {
- warn!("Failed to find the kernel range in the DT; falling back to legacy ABI");
-
- let kernel_size = NonZeroUsize::new(kernel_size).ok_or_else(|| {
- error!("Invalid kernel size: {kernel_size:#x}");
- RebootReason::InvalidPayload
- })?;
-
- MEMORY.lock().as_mut().unwrap().alloc(kernel, kernel_size).map_err(|e| {
- error!("Failed to obtain the kernel range with legacy range: {e}");
- RebootReason::InternalError
- })?
- } else {
- error!("Failed to locate the kernel from the DT");
- return Err(RebootReason::InvalidPayload);
- };
-
- let kernel = kernel_range.start as *const u8;
- // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
- let kernel = unsafe { slice::from_raw_parts(kernel, kernel_range.len()) };
-
- let ramdisk = if let Some(r) = info.initrd_range {
- debug!("Located ramdisk at {r:?}");
- let r = MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
- error!("Failed to obtain the initrd range: {e}");
- RebootReason::InvalidRamdisk
- })?;
-
- // SAFETY: The region was validated by memory to be in main memory, mapped, and
- // not overlap.
- Some(unsafe { slice::from_raw_parts(r.start as *const u8, r.len()) })
- } else {
- info!("Couldn't locate the ramdisk from the device tree");
- None
- };
-
- Ok(Self { fdt, kernel, ramdisk })
- }
-}
-
/// Sets up the environment for main() and wraps its result for start().
///
/// Provide the abstractions necessary for start() to abort the pVM boot and for main() to run with
@@ -240,7 +132,7 @@
Some(memory::appended_payload_range()),
));
- let slices = MemorySlices::new(
+ let slices = memory::MemorySlices::new(
fdt,
payload,
payload_size,
diff --git a/guest/pvmfw/src/exceptions.rs b/guest/pvmfw/src/exceptions.rs
index d9f0891..c16e637 100644
--- a/guest/pvmfw/src/exceptions.rs
+++ b/guest/pvmfw/src/exceptions.rs
@@ -16,9 +16,9 @@
use vmbase::{
eprintln,
+ exceptions::{handle_permission_fault, handle_translation_fault},
exceptions::{ArmException, Esr, HandleExceptionError},
logger,
- memory::{handle_permission_fault, handle_translation_fault},
power::reboot,
read_sysreg,
};
diff --git a/guest/pvmfw/src/fdt.rs b/guest/pvmfw/src/fdt.rs
index f667d60..0381f3e 100644
--- a/guest/pvmfw/src/fdt.rs
+++ b/guest/pvmfw/src/fdt.rs
@@ -49,10 +49,12 @@
use vmbase::hyp;
use vmbase::layout::{crosvm::MEM_START, MAX_VIRT_ADDR};
use vmbase::memory::SIZE_4KB;
-use vmbase::util::flatten;
use vmbase::util::RangeExt as _;
use zerocopy::AsBytes as _;
+// SAFETY: The template DT is automatically generated through DTC, which should produce valid DTBs.
+const FDT_TEMPLATE: &Fdt = unsafe { Fdt::unchecked_from_slice(pvmfw_fdt_template::RAW) };
+
/// An enumeration of errors that can occur during the FDT validation.
#[derive(Clone, Debug)]
pub enum FdtValidationError {
@@ -726,7 +728,7 @@
node.setprop_inplace(
cstr!("ranges"),
- flatten(&[pci_info.ranges[0].to_cells(), pci_info.ranges[1].to_cells()]),
+ [pci_info.ranges[0].to_cells(), pci_info.ranges[1].to_cells()].as_flattened(),
)
}
@@ -923,7 +925,7 @@
let mut node =
fdt.root_mut().next_compatible(cstr!("arm,gic-v3"))?.ok_or(FdtError::NotFound)?;
- node.setprop_inplace(cstr!("reg"), flatten(&value))
+ node.setprop_inplace(cstr!("reg"), value.as_flattened())
}
fn patch_timer(fdt: &mut Fdt, num_cpus: usize) -> libfdt::Result<()> {
@@ -1031,9 +1033,7 @@
let info = parse_device_tree(fdt, vm_dtbo.as_deref())?;
- // SAFETY: We trust that the template (hardcoded in our RO data) is a valid DT.
- let fdt_template = unsafe { Fdt::unchecked_from_slice(pvmfw_fdt_template::RAW) };
- fdt.clone_from(fdt_template).map_err(|e| {
+ fdt.clone_from(FDT_TEMPLATE).map_err(|e| {
error!("Failed to instantiate FDT from the template DT: {e}");
RebootReason::InvalidFdt
})?;
@@ -1327,7 +1327,7 @@
let addr: u64 = addr.try_into().unwrap();
let size: u64 = size.try_into().unwrap();
- node.setprop_inplace(cstr!("reg"), flatten(&[addr.to_be_bytes(), size.to_be_bytes()]))
+ node.setprop_inplace(cstr!("reg"), [addr.to_be_bytes(), size.to_be_bytes()].as_flattened())
}
fn empty_or_delete_prop(
diff --git a/guest/pvmfw/src/memory.rs b/guest/pvmfw/src/memory.rs
index 8d12b57..f49d79b 100644
--- a/guest/pvmfw/src/memory.rs
+++ b/guest/pvmfw/src/memory.rs
@@ -14,15 +14,23 @@
//! Low-level allocation and tracking of main memory.
+use crate::entry::RebootReason;
+use crate::fdt;
use crate::helpers::PVMFW_PAGE_SIZE;
use aarch64_paging::paging::VirtualAddress;
use aarch64_paging::MapError;
+use core::num::NonZeroUsize;
use core::ops::Range;
use core::result;
+use core::slice;
+use log::debug;
use log::error;
+use log::info;
+use log::warn;
use vmbase::{
- layout,
- memory::{PageTable, SIZE_2MB, SIZE_4KB},
+ hyp::get_mem_sharer,
+ layout::{self, crosvm},
+ memory::{PageTable, MEMORY, SIZE_2MB, SIZE_4KB},
util::align_up,
};
@@ -57,3 +65,109 @@
}
Ok(page_table)
}
+
+pub(crate) struct MemorySlices<'a> {
+ pub fdt: &'a mut libfdt::Fdt,
+ pub kernel: &'a [u8],
+ pub ramdisk: Option<&'a [u8]>,
+}
+
+impl<'a> MemorySlices<'a> {
+ pub fn new(
+ fdt: usize,
+ kernel: usize,
+ kernel_size: usize,
+ vm_dtbo: Option<&mut [u8]>,
+ vm_ref_dt: Option<&[u8]>,
+ ) -> Result<Self, RebootReason> {
+ let fdt_size = NonZeroUsize::new(crosvm::FDT_MAX_SIZE).unwrap();
+ // TODO - Only map the FDT as read-only, until we modify it right before jump_to_payload()
+ // e.g. by generating a DTBO for a template DT in main() and, on return, re-map DT as RW,
+ // overwrite with the template DT and apply the DTBO.
+ let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, fdt_size).map_err(|e| {
+ error!("Failed to allocate the FDT range: {e}");
+ RebootReason::InternalError
+ })?;
+
+ // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
+ let fdt = unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) };
+
+ let info = fdt::sanitize_device_tree(fdt, vm_dtbo, vm_ref_dt)?;
+ let fdt = libfdt::Fdt::from_mut_slice(fdt).map_err(|e| {
+ error!("Failed to load sanitized FDT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ debug!("Fdt passed validation!");
+
+ let memory_range = info.memory_range;
+ debug!("Resizing MemoryTracker to range {memory_range:#x?}");
+ MEMORY.lock().as_mut().unwrap().shrink(&memory_range).map_err(|e| {
+ error!("Failed to use memory range value from DT: {memory_range:#x?}: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ if let Some(mem_sharer) = get_mem_sharer() {
+ let granule = mem_sharer.granule().map_err(|e| {
+ error!("Failed to get memory protection granule: {e}");
+ RebootReason::InternalError
+ })?;
+ MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
+ error!("Failed to initialize dynamically shared pool: {e}");
+ RebootReason::InternalError
+ })?;
+ } else {
+ let range = info.swiotlb_info.fixed_range().ok_or_else(|| {
+ error!("Pre-shared pool range not specified in swiotlb node");
+ RebootReason::InvalidFdt
+ })?;
+
+ MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).map_err(|e| {
+ error!("Failed to initialize pre-shared pool {e}");
+ RebootReason::InvalidFdt
+ })?;
+ }
+
+ let kernel_range = if let Some(r) = info.kernel_range {
+ MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
+ error!("Failed to obtain the kernel range with DT range: {e}");
+ RebootReason::InternalError
+ })?
+ } else if cfg!(feature = "legacy") {
+ warn!("Failed to find the kernel range in the DT; falling back to legacy ABI");
+
+ let kernel_size = NonZeroUsize::new(kernel_size).ok_or_else(|| {
+ error!("Invalid kernel size: {kernel_size:#x}");
+ RebootReason::InvalidPayload
+ })?;
+
+ MEMORY.lock().as_mut().unwrap().alloc(kernel, kernel_size).map_err(|e| {
+ error!("Failed to obtain the kernel range with legacy range: {e}");
+ RebootReason::InternalError
+ })?
+ } else {
+ error!("Failed to locate the kernel from the DT");
+ return Err(RebootReason::InvalidPayload);
+ };
+
+ let kernel = kernel_range.start as *const u8;
+ // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
+ let kernel = unsafe { slice::from_raw_parts(kernel, kernel_range.len()) };
+
+ let ramdisk = if let Some(r) = info.initrd_range {
+ debug!("Located ramdisk at {r:?}");
+ let r = MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
+ error!("Failed to obtain the initrd range: {e}");
+ RebootReason::InvalidRamdisk
+ })?;
+
+ // SAFETY: The region was validated by memory to be in main memory, mapped, and
+ // not overlap.
+ Some(unsafe { slice::from_raw_parts(r.start as *const u8, r.len()) })
+ } else {
+ info!("Couldn't locate the ramdisk from the device tree");
+ None
+ };
+
+ Ok(Self { fdt, kernel, ramdisk })
+ }
+}
diff --git a/guest/rialto/src/exceptions.rs b/guest/rialto/src/exceptions.rs
index e87e0d3..8899796 100644
--- a/guest/rialto/src/exceptions.rs
+++ b/guest/rialto/src/exceptions.rs
@@ -16,9 +16,9 @@
use vmbase::{
eprintln,
+ exceptions::{handle_permission_fault, handle_translation_fault},
exceptions::{ArmException, Esr, HandleExceptionError},
logger,
- memory::{handle_permission_fault, handle_translation_fault},
power::reboot,
read_sysreg,
};
diff --git a/guest/rialto/src/main.rs b/guest/rialto/src/main.rs
index f09cbd2..ec9a76e 100644
--- a/guest/rialto/src/main.rs
+++ b/guest/rialto/src/main.rs
@@ -47,8 +47,8 @@
fdt::pci::PciInfo,
fdt::SwiotlbInfo,
generate_image_header,
- hyp::{get_mem_sharer, get_mmio_guard},
- layout::{self, crosvm, UART_PAGE_ADDR},
+ hyp::get_mem_sharer,
+ layout::{self, crosvm},
main,
memory::{MemoryTracker, PageTable, MEMORY, PAGE_SIZE, SIZE_128KB},
power::reboot,
@@ -188,36 +188,14 @@
.ok_or(Error::MissingVirtIOSocketDevice)
}
-fn try_unshare_all_memory() -> Result<()> {
- info!("Starting unsharing memory...");
-
- // No logging after unmapping UART.
- if let Some(mmio_guard) = get_mmio_guard() {
- mmio_guard.unmap(UART_PAGE_ADDR)?;
- }
- // Unshares all memory and deactivates page table.
- drop(MEMORY.lock().take());
- Ok(())
-}
-
-fn unshare_all_memory() {
- if let Err(e) = try_unshare_all_memory() {
- error!("Failed to unshare the memory: {e}");
- }
-}
-
/// Entry point for Rialto.
pub fn main(fdt_addr: u64, _a1: u64, _a2: u64, _a3: u64) {
log::set_max_level(log::LevelFilter::Debug);
// SAFETY: `fdt_addr` is supposed to be a valid pointer and points to
// a valid `Fdt`.
- match unsafe { try_main(fdt_addr as usize) } {
- Ok(()) => unshare_all_memory(),
- Err(e) => {
- error!("Rialto failed with {e}");
- unshare_all_memory();
- reboot()
- }
+ if let Err(e) = unsafe { try_main(fdt_addr as usize) } {
+ error!("Rialto failed with {e}");
+ reboot()
}
}
diff --git a/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachine.java b/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachine.java
index c2f4c7d..5f634ef 100644
--- a/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachine.java
+++ b/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachine.java
@@ -43,7 +43,6 @@
import static java.util.Objects.requireNonNull;
import android.annotation.CallbackExecutor;
-import android.annotation.FlaggedApi;
import android.annotation.IntDef;
import android.annotation.IntRange;
import android.annotation.NonNull;
diff --git a/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachineConfig.java b/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachineConfig.java
index 0b7059a..3d1964d 100644
--- a/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachineConfig.java
+++ b/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachineConfig.java
@@ -22,7 +22,6 @@
import static java.util.Objects.requireNonNull;
-import android.annotation.FlaggedApi;
import android.annotation.IntDef;
import android.annotation.IntRange;
import android.annotation.NonNull;
diff --git a/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachineManager.java b/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachineManager.java
index 8f5413e..ae34a1e 100644
--- a/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachineManager.java
+++ b/libs/framework-virtualization/src/android/system/virtualmachine/VirtualMachineManager.java
@@ -18,7 +18,6 @@
import static java.util.Objects.requireNonNull;
-import android.annotation.FlaggedApi;
import android.annotation.IntDef;
import android.annotation.NonNull;
import android.annotation.Nullable;
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index 8ea9cd9..5883567 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -624,7 +624,7 @@
/// # Safety
///
/// It is undefined to call this function on a slice that does not contain a valid device tree.
- pub unsafe fn unchecked_from_slice(fdt: &[u8]) -> &Self {
+ pub const unsafe fn unchecked_from_slice(fdt: &[u8]) -> &Self {
let self_ptr = fdt as *const _ as *const _;
// SAFETY: The pointer is non-null, dereferenceable, and points to allocated memory.
unsafe { &*self_ptr }
diff --git a/libs/libvmbase/Android.bp b/libs/libvmbase/Android.bp
index 206c4cb..c4e8385 100644
--- a/libs/libvmbase/Android.bp
+++ b/libs/libvmbase/Android.bp
@@ -79,6 +79,7 @@
rustlibs: [
"libaarch64_paging",
"libbuddy_system_allocator",
+ "libcfg_if",
"libcstr",
"liblibfdt_nostd",
"liblog_rust_nostd",
diff --git a/libs/libvmbase/src/arch.rs b/libs/libvmbase/src/arch.rs
index 992ab27..0348800 100644
--- a/libs/libvmbase/src/arch.rs
+++ b/libs/libvmbase/src/arch.rs
@@ -1,4 +1,4 @@
-// Copyright 2023, The Android Open Source Project
+// Copyright 2024, The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,85 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//! Wrappers of assembly calls.
+//! Low-level CPU-specific operations.
-/// Reads a value from a system register.
-#[macro_export]
-macro_rules! read_sysreg {
- ($sysreg:literal) => {{
- let mut r: usize;
- #[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
- // SAFETY: Reading a system register does not affect memory.
- unsafe {
- core::arch::asm!(
- concat!("mrs {}, ", $sysreg),
- out(reg) r,
- options(nomem, nostack, preserves_flags),
- )
- }
- r
- }};
-}
-
-/// Writes a value to a system register.
-///
-/// # Safety
-///
-/// Callers must ensure that side effects of updating the system register are properly handled.
-#[macro_export]
-macro_rules! write_sysreg {
- ($sysreg:literal, $val:expr) => {{
- let value: usize = $val;
- core::arch::asm!(
- concat!("msr ", $sysreg, ", {}"),
- in(reg) value,
- options(nomem, nostack, preserves_flags),
- )
- }};
-}
-
-/// Executes an instruction synchronization barrier.
-#[macro_export]
-macro_rules! isb {
- () => {{
- #[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
- // SAFETY: memory barriers do not affect Rust's memory model.
- unsafe {
- core::arch::asm!("isb", options(nomem, nostack, preserves_flags));
- }
- }};
-}
-
-/// Executes a data synchronization barrier.
-#[macro_export]
-macro_rules! dsb {
- ($option:literal) => {{
- #[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
- // SAFETY: memory barriers do not affect Rust's memory model.
- unsafe {
- core::arch::asm!(concat!("dsb ", $option), options(nomem, nostack, preserves_flags));
- }
- }};
-}
-
-/// Invalidates cached leaf PTE entries by virtual address.
-#[macro_export]
-macro_rules! tlbi {
- ($option:literal, $asid:expr, $addr:expr) => {{
- let asid: usize = $asid;
- let addr: usize = $addr;
- #[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
- // SAFETY: Invalidating the TLB doesn't affect Rust. When the address matches a
- // block entry larger than the page size, all translations for the block are invalidated.
- unsafe {
- core::arch::asm!(
- concat!("tlbi ", $option, ", {x}"),
- x = in(reg) (asid << 48) | (addr >> 12),
- options(nomem, nostack, preserves_flags)
- );
- }
- }};
-}
+#[cfg(target_arch = "aarch64")]
+pub mod aarch64;
/// Write with well-defined compiled behavior.
///
@@ -99,14 +24,32 @@
/// # Safety
///
/// `dst` must be valid for writes.
+#[inline]
pub unsafe fn write_volatile_u8(dst: *mut u8, src: u8) {
- // SAFETY: strb only modifies *dst, which must be valid for writes.
- unsafe {
- core::arch::asm!(
- "strb {value:w}, [{ptr}]",
- value = in(reg) src,
- ptr = in(reg) dst,
- options(preserves_flags),
- );
+ cfg_if::cfg_if! {
+ if #[cfg(target_arch = "aarch64")] {
+ // SAFETY: `dst` is valid for writes.
+ unsafe { aarch64::strb(dst, src) }
+ } else {
+ compile_error!("Unsupported target_arch")
+ }
+ }
+}
+
+/// Flush `size` bytes of data cache by virtual address.
+#[inline]
+pub(crate) fn flush_region(start: usize, size: usize) {
+ cfg_if::cfg_if! {
+ if #[cfg(target_arch = "aarch64")] {
+ let line_size = aarch64::min_dcache_line_size();
+ let end = start + size;
+ let start = crate::util::unchecked_align_down(start, line_size);
+
+ for line in (start..end).step_by(line_size) {
+ crate::dc!("cvau", line);
+ }
+ } else {
+ compile_error!("Unsupported target_arch")
+ }
}
}
diff --git a/libs/libvmbase/src/arch/aarch64.rs b/libs/libvmbase/src/arch/aarch64.rs
new file mode 100644
index 0000000..5006aca
--- /dev/null
+++ b/libs/libvmbase/src/arch/aarch64.rs
@@ -0,0 +1,143 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Wrappers of assembly calls.
+
+/// Reads a value from a system register.
+#[macro_export]
+macro_rules! read_sysreg {
+ ($sysreg:literal) => {{
+ let mut r: usize;
+ #[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: Reading a system register does not affect memory.
+ unsafe {
+ core::arch::asm!(
+ concat!("mrs {}, ", $sysreg),
+ out(reg) r,
+ options(nomem, nostack, preserves_flags),
+ )
+ }
+ r
+ }};
+}
+
+/// Writes a value to a system register.
+///
+/// # Safety
+///
+/// Callers must ensure that side effects of updating the system register are properly handled.
+#[macro_export]
+macro_rules! write_sysreg {
+ ($sysreg:literal, $val:expr) => {{
+ let value: usize = $val;
+ core::arch::asm!(
+ concat!("msr ", $sysreg, ", {}"),
+ in(reg) value,
+ options(nomem, nostack, preserves_flags),
+ )
+ }};
+}
+
+/// Executes an instruction synchronization barrier.
+#[macro_export]
+macro_rules! isb {
+ () => {{
+ #[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: memory barriers do not affect Rust's memory model.
+ unsafe {
+ core::arch::asm!("isb", options(nomem, nostack, preserves_flags));
+ }
+ }};
+}
+
+/// Executes a data synchronization barrier.
+#[macro_export]
+macro_rules! dsb {
+ ($option:literal) => {{
+ #[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: memory barriers do not affect Rust's memory model.
+ unsafe {
+ core::arch::asm!(concat!("dsb ", $option), options(nomem, nostack, preserves_flags));
+ }
+ }};
+}
+
+/// Executes a data cache operation.
+#[macro_export]
+macro_rules! dc {
+ ($option:literal, $addr:expr) => {{
+ let addr: usize = $addr;
+ #[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: Clearing cache lines shouldn't have Rust-visible side effects.
+ unsafe {
+ core::arch::asm!(
+ concat!("dc ", $option, ", {x}"),
+ x = in(reg) addr,
+ options(nomem, nostack, preserves_flags),
+ );
+ }
+ }};
+}
+
+/// Invalidates cached leaf PTE entries by virtual address.
+#[macro_export]
+macro_rules! tlbi {
+ ($option:literal, $asid:expr, $addr:expr) => {{
+ let asid: usize = $asid;
+ let addr: usize = $addr;
+ #[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: Invalidating the TLB doesn't affect Rust. When the address matches a
+ // block entry larger than the page size, all translations for the block are invalidated.
+ unsafe {
+ core::arch::asm!(
+ concat!("tlbi ", $option, ", {x}"),
+ x = in(reg) (asid << 48) | (addr >> 12),
+ options(nomem, nostack, preserves_flags)
+ );
+ }
+ }};
+}
+
+/// STRB intrinsics.
+///
+/// See https://github.com/rust-lang/rust/issues/131894
+///
+/// # Safety
+///
+/// `dst` must be valid for writes.
+#[inline]
+pub unsafe fn strb(dst: *mut u8, src: u8) {
+ // SAFETY: strb only modifies *dst, which must be valid for writes.
+ unsafe {
+ core::arch::asm!(
+ "strb {value:w}, [{ptr}]",
+ value = in(reg) src,
+ ptr = in(reg) dst,
+ options(preserves_flags),
+ );
+ }
+}
+
+/// Reads the number of words in the smallest cache line of all the data caches and unified caches.
+#[inline]
+pub fn min_dcache_line_size() -> usize {
+ const DMINLINE_SHIFT: usize = 16;
+ const DMINLINE_MASK: usize = 0xf;
+ let ctr_el0 = read_sysreg!("ctr_el0");
+
+ // DminLine: log2 of the number of words in the smallest cache line of all the data caches.
+ let dminline = (ctr_el0 >> DMINLINE_SHIFT) & DMINLINE_MASK;
+
+ 1 << dminline
+}
diff --git a/libs/libvmbase/src/exceptions.rs b/libs/libvmbase/src/exceptions.rs
index 11fcd93..b04cb16 100644
--- a/libs/libvmbase/src/exceptions.rs
+++ b/libs/libvmbase/src/exceptions.rs
@@ -17,11 +17,12 @@
use crate::{
eprintln,
layout::UART_PAGE_ADDR,
- memory::{page_4kb_of, MemoryTrackerError},
+ memory::{page_4kb_of, MemoryTrackerError, MEMORY},
read_sysreg,
};
use aarch64_paging::paging::VirtualAddress;
use core::fmt;
+use core::result;
/// Represents an error that can occur while handling an exception.
#[derive(Debug)]
@@ -136,3 +137,19 @@
self.esr == Esr::DataAbortSyncExternalAbort && page_4kb_of(self.far.0) == UART_PAGE_ADDR
}
}
+
+/// Handles a translation fault with the given fault address register (FAR).
+#[inline]
+pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
+ let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
+ let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
+ Ok(memory.handle_mmio_fault(far)?)
+}
+
+/// Handles a permission fault with the given fault address register (FAR).
+#[inline]
+pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
+ let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
+ let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
+ Ok(memory.handle_permission_fault(far)?)
+}
diff --git a/libs/libvmbase/src/memory.rs b/libs/libvmbase/src/memory.rs
index 299d50f..e0ea207 100644
--- a/libs/libvmbase/src/memory.rs
+++ b/libs/libvmbase/src/memory.rs
@@ -18,16 +18,16 @@
mod error;
mod page_table;
mod shared;
+mod tracker;
mod util;
pub use error::MemoryTrackerError;
pub use page_table::PageTable;
-pub use shared::{
- handle_permission_fault, handle_translation_fault, MemoryRange, MemoryTracker, MEMORY,
-};
+pub use shared::MemoryRange;
+pub use tracker::{MemoryTracker, MEMORY};
pub use util::{
- flush, flushed_zeroize, min_dcache_line_size, page_4kb_of, PAGE_SIZE, SIZE_128KB, SIZE_16KB,
- SIZE_2MB, SIZE_4KB, SIZE_4MB, SIZE_64KB,
+ flush, flushed_zeroize, page_4kb_of, PAGE_SIZE, SIZE_128KB, SIZE_16KB, SIZE_2MB, SIZE_4KB,
+ SIZE_4MB, SIZE_64KB,
};
pub(crate) use shared::{alloc_shared, dealloc_shared};
diff --git a/libs/libvmbase/src/memory/dbm.rs b/libs/libvmbase/src/memory/dbm.rs
index 108cd5d..de43403 100644
--- a/libs/libvmbase/src/memory/dbm.rs
+++ b/libs/libvmbase/src/memory/dbm.rs
@@ -15,7 +15,7 @@
//! Hardware management of the access flag and dirty state.
use super::page_table::PageTable;
-use super::util::flush_region;
+use crate::arch::flush_region;
use crate::{dsb, isb, read_sysreg, tlbi, write_sysreg};
use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion};
diff --git a/libs/libvmbase/src/memory/shared.rs b/libs/libvmbase/src/memory/shared.rs
index d869b16..92dd09e 100644
--- a/libs/libvmbase/src/memory/shared.rs
+++ b/libs/libvmbase/src/memory/shared.rs
@@ -14,378 +14,40 @@
//! Shared memory management.
-use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
use super::error::MemoryTrackerError;
-use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
use super::util::virt_to_phys;
-use crate::dsb;
-use crate::exceptions::HandleExceptionError;
use crate::hyp::{self, get_mem_sharer, get_mmio_guard};
use crate::layout;
use crate::util::unchecked_align_down;
-use crate::util::RangeExt as _;
-use aarch64_paging::paging::{
- Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress, PAGE_SIZE,
-};
+use aarch64_paging::paging::{MemoryRegion as VaRange, VirtualAddress, PAGE_SIZE};
use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
-use alloc::boxed::Box;
use alloc::collections::BTreeSet;
use alloc::vec::Vec;
use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
use core::alloc::Layout;
use core::cmp::max;
-use core::mem::size_of;
-use core::num::NonZeroUsize;
use core::ops::Range;
use core::ptr::NonNull;
use core::result;
-use log::{debug, error, trace};
+use log::trace;
use once_cell::race::OnceBox;
use spin::mutex::SpinMutex;
-use tinyvec::ArrayVec;
-/// A global static variable representing the system memory tracker, protected by a spin mutex.
-pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
-
-static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
-static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
+pub(crate) static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
+pub(crate) static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
/// Memory range.
pub type MemoryRange = Range<usize>;
-fn get_va_range(range: &MemoryRange) -> VaRange {
- VaRange::new(range.start, range.end)
-}
-
type Result<T> = result::Result<T, MemoryTrackerError>;
-#[derive(Clone, Copy, Debug, Default, PartialEq)]
-enum MemoryType {
- #[default]
- ReadOnly,
- ReadWrite,
-}
-
-#[derive(Clone, Debug, Default)]
-struct MemoryRegion {
- range: MemoryRange,
- mem_type: MemoryType,
-}
-
-/// Tracks non-overlapping slices of main memory.
-pub struct MemoryTracker {
- total: MemoryRange,
- page_table: PageTable,
- regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
- mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
- mmio_range: MemoryRange,
- payload_range: Option<MemoryRange>,
- mmio_sharer: MmioSharer,
-}
-
-impl MemoryTracker {
- const CAPACITY: usize = 5;
- const MMIO_CAPACITY: usize = 5;
-
- /// Creates a new instance from an active page table, covering the maximum RAM size.
- pub fn new(
- mut page_table: PageTable,
- total: MemoryRange,
- mmio_range: MemoryRange,
- payload_range: Option<Range<VirtualAddress>>,
- ) -> Self {
- assert!(
- !total.overlaps(&mmio_range),
- "MMIO space should not overlap with the main memory region."
- );
-
- // Activate dirty state management first, otherwise we may get permission faults immediately
- // after activating the new page table. This has no effect before the new page table is
- // activated because none of the entries in the initial idmap have the DBM flag.
- set_dbm_enabled(true);
-
- debug!("Activating dynamic page table...");
- // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
- // aware of so activating it shouldn't have any visible effect.
- unsafe { page_table.activate() }
- debug!("... Success!");
-
- Self {
- total,
- page_table,
- regions: ArrayVec::new(),
- mmio_regions: ArrayVec::new(),
- mmio_range,
- payload_range: payload_range.map(|r| r.start.0..r.end.0),
- mmio_sharer: MmioSharer::new().unwrap(),
- }
- }
-
- /// Resize the total RAM size.
- ///
- /// This function fails if it contains regions that are not included within the new size.
- pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
- if range.start != self.total.start {
- return Err(MemoryTrackerError::DifferentBaseAddress);
- }
- if self.total.end < range.end {
- return Err(MemoryTrackerError::SizeTooLarge);
- }
- if !self.regions.iter().all(|r| r.range.is_within(range)) {
- return Err(MemoryTrackerError::SizeTooSmall);
- }
-
- self.total = range.clone();
- Ok(())
- }
-
- /// Allocate the address range for a const slice; returns None if failed.
- pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
- let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
- self.check_allocatable(®ion)?;
- self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
- error!("Error during range allocation: {e}");
- MemoryTrackerError::FailedToMap
- })?;
- self.add(region)
- }
-
- /// Allocates the address range for a const slice.
- ///
- /// # Safety
- ///
- /// Callers of this method need to ensure that the `range` is valid for mapping as read-only
- /// data.
- pub unsafe fn alloc_range_outside_main_memory(
- &mut self,
- range: &MemoryRange,
- ) -> Result<MemoryRange> {
- let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
- self.check_no_overlap(®ion)?;
- self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
- error!("Error during range allocation: {e}");
- MemoryTrackerError::FailedToMap
- })?;
- self.add(region)
- }
-
- /// Allocate the address range for a mutable slice; returns None if failed.
- pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
- let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
- self.check_allocatable(®ion)?;
- self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
- error!("Error during mutable range allocation: {e}");
- MemoryTrackerError::FailedToMap
- })?;
- self.add(region)
- }
-
- /// Allocate the address range for a const slice; returns None if failed.
- pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
- self.alloc_range(&(base..(base + size.get())))
- }
-
- /// Allocate the address range for a mutable slice; returns None if failed.
- pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
- self.alloc_range_mut(&(base..(base + size.get())))
- }
-
- /// Checks that the given range of addresses is within the MMIO region, and then maps it
- /// appropriately.
- pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
- if !range.is_within(&self.mmio_range) {
- return Err(MemoryTrackerError::OutOfRange);
- }
- if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
- return Err(MemoryTrackerError::Overlaps);
- }
- if self.mmio_regions.len() == self.mmio_regions.capacity() {
- return Err(MemoryTrackerError::Full);
- }
-
- if get_mmio_guard().is_some() {
- self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
- error!("Error during lazy MMIO device mapping: {e}");
- MemoryTrackerError::FailedToMap
- })?;
- } else {
- self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
- error!("Error during MMIO device mapping: {e}");
- MemoryTrackerError::FailedToMap
- })?;
- }
-
- if self.mmio_regions.try_push(range).is_some() {
- return Err(MemoryTrackerError::Full);
- }
-
- Ok(())
- }
-
- /// Checks that the memory region meets the following criteria:
- /// - It is within the range of the `MemoryTracker`.
- /// - It does not overlap with any previously allocated regions.
- /// - The `regions` ArrayVec has sufficient capacity to add it.
- fn check_allocatable(&self, region: &MemoryRegion) -> Result<()> {
- if !region.range.is_within(&self.total) {
- return Err(MemoryTrackerError::OutOfRange);
- }
- self.check_no_overlap(region)
- }
-
- /// Checks that the given region doesn't overlap with any other previously allocated regions,
- /// and that the regions ArrayVec has capacity to add it.
- fn check_no_overlap(&self, region: &MemoryRegion) -> Result<()> {
- if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
- return Err(MemoryTrackerError::Overlaps);
- }
- if self.regions.len() == self.regions.capacity() {
- return Err(MemoryTrackerError::Full);
- }
- Ok(())
- }
-
- fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
- if self.regions.try_push(region).is_some() {
- return Err(MemoryTrackerError::Full);
- }
-
- Ok(self.regions.last().unwrap().range.clone())
- }
-
- /// Unshares any MMIO region previously shared with the MMIO guard.
- pub fn unshare_all_mmio(&mut self) -> Result<()> {
- self.mmio_sharer.unshare_all();
-
- Ok(())
- }
-
- /// Initialize the shared heap to dynamically share memory from the global allocator.
- pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
- const INIT_CAP: usize = 10;
-
- let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
- if previous.is_some() {
- return Err(MemoryTrackerError::SharedMemorySetFailure);
- }
-
- SHARED_POOL
- .set(Box::new(LockedFrameAllocator::new()))
- .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
-
- Ok(())
- }
-
- /// Initialize the shared heap from a static region of memory.
- ///
- /// Some hypervisors such as Gunyah do not support a MemShare API for guest
- /// to share its memory with host. Instead they allow host to designate part
- /// of guest memory as "shared" ahead of guest starting its execution. The
- /// shared memory region is indicated in swiotlb node. On such platforms use
- /// a separate heap to allocate buffers that can be shared with host.
- pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
- let size = NonZeroUsize::new(range.len()).unwrap();
- let range = self.alloc_mut(range.start, size)?;
- let shared_pool = LockedFrameAllocator::<32>::new();
-
- shared_pool.lock().insert(range);
-
- SHARED_POOL
- .set(Box::new(shared_pool))
- .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
-
- Ok(())
- }
-
- /// Initialize the shared heap to use heap memory directly.
- ///
- /// When running on "non-protected" hypervisors which permit host direct accesses to guest
- /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
- /// dedicated region so this function instructs the shared pool to use the global allocator.
- pub fn init_heap_shared_pool(&mut self) -> Result<()> {
- // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
- // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
- // without any actual "dynamic memory sharing" taking place and, as such, the granule may
- // be set to the one of the global_allocator i.e. a byte.
- self.init_dynamic_shared_pool(size_of::<u8>())
- }
-
- /// Unshares any memory that may have been shared.
- pub fn unshare_all_memory(&mut self) {
- drop(SHARED_MEMORY.lock().take());
- }
-
- /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
- /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
- fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
- let shared_range = self.mmio_sharer.share(addr)?;
- self.map_lazy_mmio_as_valid(&shared_range)?;
-
- Ok(())
- }
-
- /// Modify the PTEs corresponding to a given range from (invalid) "lazy MMIO" to valid MMIO.
- ///
- /// Returns an error if any PTE in the range is not an invalid lazy MMIO mapping.
- fn map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()> {
- // This must be safe and free from break-before-make (BBM) violations, given that the
- // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
- // created inside the mapping has the same size and alignment.
- self.page_table
- .modify_range(page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
- let flags = desc.flags().expect("Unsupported PTE flags set");
- if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
- desc.modify_flags(Attributes::VALID, Attributes::empty());
- Ok(())
- } else {
- Err(())
- }
- })
- .map_err(|_| MemoryTrackerError::InvalidPte)
- }
-
- /// Flush all memory regions marked as writable-dirty.
- fn flush_dirty_pages(&mut self) -> Result<()> {
- // Collect memory ranges for which dirty state is tracked.
- let writable_regions =
- self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
- // Execute a barrier instruction to ensure all hardware updates to the page table have been
- // observed before reading PTE flags to determine dirty state.
- dsb!("ish");
- // Now flush writable-dirty pages in those regions.
- for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
- self.page_table
- .walk_range(&get_va_range(range), &flush_dirty_range)
- .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
- }
- Ok(())
- }
-
- /// Handles permission fault for read-only blocks by setting writable-dirty state.
- /// In general, this should be called from the exception handler when hardware dirty
- /// state management is disabled or unavailable.
- fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
- self.page_table
- .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
- .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
- }
-}
-
-impl Drop for MemoryTracker {
- fn drop(&mut self) {
- set_dbm_enabled(false);
- self.flush_dirty_pages().unwrap();
- self.unshare_all_memory();
- }
-}
-
-struct MmioSharer {
+pub(crate) struct MmioSharer {
granule: usize,
frames: BTreeSet<usize>,
}
impl MmioSharer {
- fn new() -> Result<Self> {
+ pub fn new() -> Result<Self> {
let granule = Self::get_granule()?;
let frames = BTreeSet::new();
@@ -395,7 +57,7 @@
Ok(Self { granule, frames })
}
- fn get_granule() -> Result<usize> {
+ pub fn get_granule() -> Result<usize> {
let Some(mmio_guard) = get_mmio_guard() else {
return Ok(PAGE_SIZE);
};
@@ -406,7 +68,7 @@
}
/// Share the MMIO region aligned to the granule size containing addr (not validated as MMIO).
- fn share(&mut self, addr: VirtualAddress) -> Result<VaRange> {
+ pub fn share(&mut self, addr: VirtualAddress) -> Result<VaRange> {
// This can't use virt_to_phys() since 0x0 is a valid MMIO address and we are ID-mapped.
let phys = addr.0;
let base = unchecked_align_down(phys, self.granule);
@@ -427,7 +89,7 @@
Ok((base_va..base_va + self.granule).into())
}
- fn unshare_all(&mut self) {
+ pub fn unshare_all(&mut self) {
let Some(mmio_guard) = get_mmio_guard() else {
return self.frames.clear();
};
@@ -491,7 +153,7 @@
/// Allocates memory on the heap and shares it with the host.
///
/// Unshares all pages when dropped.
-struct MemorySharer {
+pub(crate) struct MemorySharer {
granule: usize,
frames: Vec<(usize, Layout)>,
}
@@ -499,13 +161,13 @@
impl MemorySharer {
/// Constructs a new `MemorySharer` instance with the specified granule size and capacity.
/// `granule` must be a power of 2.
- fn new(granule: usize, capacity: usize) -> Self {
+ pub fn new(granule: usize, capacity: usize) -> Self {
assert!(granule.is_power_of_two());
Self { granule, frames: Vec::with_capacity(capacity) }
}
/// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
- fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
+ pub fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
let layout = hint.align_to(self.granule).unwrap().pad_to_align();
assert_ne!(layout.size(), 0);
// SAFETY: layout has non-zero size.
@@ -546,19 +208,3 @@
}
}
}
-
-/// Handles a translation fault with the given fault address register (FAR).
-#[inline]
-pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
- let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
- let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
- Ok(memory.handle_mmio_fault(far)?)
-}
-
-/// Handles a permission fault with the given fault address register (FAR).
-#[inline]
-pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
- let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
- let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
- Ok(memory.handle_permission_fault(far)?)
-}
diff --git a/libs/libvmbase/src/memory/tracker.rs b/libs/libvmbase/src/memory/tracker.rs
new file mode 100644
index 0000000..acf182f
--- /dev/null
+++ b/libs/libvmbase/src/memory/tracker.rs
@@ -0,0 +1,363 @@
+// Copyright 2024, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Memory management.
+
+use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
+use super::error::MemoryTrackerError;
+use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
+use super::shared::{SHARED_MEMORY, SHARED_POOL};
+use crate::dsb;
+use crate::hyp::get_mmio_guard;
+use crate::memory::shared::{MemoryRange, MemorySharer, MmioSharer};
+use crate::util::RangeExt as _;
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
+use alloc::boxed::Box;
+use buddy_system_allocator::LockedFrameAllocator;
+use core::mem::size_of;
+use core::num::NonZeroUsize;
+use core::ops::Range;
+use core::result;
+use log::{debug, error};
+use spin::mutex::SpinMutex;
+use tinyvec::ArrayVec;
+
+/// A global static variable representing the system memory tracker, protected by a spin mutex.
+pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
+
+fn get_va_range(range: &MemoryRange) -> VaRange {
+ VaRange::new(range.start, range.end)
+}
+
+type Result<T> = result::Result<T, MemoryTrackerError>;
+
+#[derive(Clone, Copy, Debug, Default, PartialEq)]
+enum MemoryType {
+ #[default]
+ ReadOnly,
+ ReadWrite,
+}
+
+#[derive(Clone, Debug, Default)]
+struct MemoryRegion {
+ range: MemoryRange,
+ mem_type: MemoryType,
+}
+
+/// Tracks non-overlapping slices of main memory.
+pub struct MemoryTracker {
+ total: MemoryRange,
+ page_table: PageTable,
+ regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
+ mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
+ mmio_range: MemoryRange,
+ payload_range: Option<MemoryRange>,
+ mmio_sharer: MmioSharer,
+}
+
+impl MemoryTracker {
+ const CAPACITY: usize = 5;
+ const MMIO_CAPACITY: usize = 5;
+
+ /// Creates a new instance from an active page table, covering the maximum RAM size.
+ pub fn new(
+ mut page_table: PageTable,
+ total: MemoryRange,
+ mmio_range: MemoryRange,
+ payload_range: Option<Range<VirtualAddress>>,
+ ) -> Self {
+ assert!(
+ !total.overlaps(&mmio_range),
+ "MMIO space should not overlap with the main memory region."
+ );
+
+ // Activate dirty state management first, otherwise we may get permission faults immediately
+ // after activating the new page table. This has no effect before the new page table is
+ // activated because none of the entries in the initial idmap have the DBM flag.
+ set_dbm_enabled(true);
+
+ debug!("Activating dynamic page table...");
+ // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
+ // aware of so activating it shouldn't have any visible effect.
+ unsafe { page_table.activate() }
+ debug!("... Success!");
+
+ Self {
+ total,
+ page_table,
+ regions: ArrayVec::new(),
+ mmio_regions: ArrayVec::new(),
+ mmio_range,
+ payload_range: payload_range.map(|r| r.start.0..r.end.0),
+ mmio_sharer: MmioSharer::new().unwrap(),
+ }
+ }
+
+ /// Resize the total RAM size.
+ ///
+ /// This function fails if it contains regions that are not included within the new size.
+ pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
+ if range.start != self.total.start {
+ return Err(MemoryTrackerError::DifferentBaseAddress);
+ }
+ if self.total.end < range.end {
+ return Err(MemoryTrackerError::SizeTooLarge);
+ }
+ if !self.regions.iter().all(|r| r.range.is_within(range)) {
+ return Err(MemoryTrackerError::SizeTooSmall);
+ }
+
+ self.total = range.clone();
+ Ok(())
+ }
+
+ /// Allocate the address range for a const slice; returns None if failed.
+ pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
+ let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
+ self.check_allocatable(®ion)?;
+ self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
+ error!("Error during range allocation: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ self.add(region)
+ }
+
+ /// Allocates the address range for a const slice.
+ ///
+ /// # Safety
+ ///
+ /// Callers of this method need to ensure that the `range` is valid for mapping as read-only
+ /// data.
+ pub unsafe fn alloc_range_outside_main_memory(
+ &mut self,
+ range: &MemoryRange,
+ ) -> Result<MemoryRange> {
+ let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
+ self.check_no_overlap(®ion)?;
+ self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
+ error!("Error during range allocation: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ self.add(region)
+ }
+
+ /// Allocate the address range for a mutable slice; returns None if failed.
+ pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
+ let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
+ self.check_allocatable(®ion)?;
+ self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
+ error!("Error during mutable range allocation: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ self.add(region)
+ }
+
+ /// Allocate the address range for a const slice; returns None if failed.
+ pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
+ self.alloc_range(&(base..(base + size.get())))
+ }
+
+ /// Allocate the address range for a mutable slice; returns None if failed.
+ pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
+ self.alloc_range_mut(&(base..(base + size.get())))
+ }
+
+ /// Checks that the given range of addresses is within the MMIO region, and then maps it
+ /// appropriately.
+ pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
+ if !range.is_within(&self.mmio_range) {
+ return Err(MemoryTrackerError::OutOfRange);
+ }
+ if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
+ return Err(MemoryTrackerError::Overlaps);
+ }
+ if self.mmio_regions.len() == self.mmio_regions.capacity() {
+ return Err(MemoryTrackerError::Full);
+ }
+
+ if get_mmio_guard().is_some() {
+ self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
+ error!("Error during lazy MMIO device mapping: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ } else {
+ self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
+ error!("Error during MMIO device mapping: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ }
+
+ if self.mmio_regions.try_push(range).is_some() {
+ return Err(MemoryTrackerError::Full);
+ }
+
+ Ok(())
+ }
+
+ /// Checks that the memory region meets the following criteria:
+ /// - It is within the range of the `MemoryTracker`.
+ /// - It does not overlap with any previously allocated regions.
+ /// - The `regions` ArrayVec has sufficient capacity to add it.
+ fn check_allocatable(&self, region: &MemoryRegion) -> Result<()> {
+ if !region.range.is_within(&self.total) {
+ return Err(MemoryTrackerError::OutOfRange);
+ }
+ self.check_no_overlap(region)
+ }
+
+ /// Checks that the given region doesn't overlap with any other previously allocated regions,
+ /// and that the regions ArrayVec has capacity to add it.
+ fn check_no_overlap(&self, region: &MemoryRegion) -> Result<()> {
+ if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
+ return Err(MemoryTrackerError::Overlaps);
+ }
+ if self.regions.len() == self.regions.capacity() {
+ return Err(MemoryTrackerError::Full);
+ }
+ Ok(())
+ }
+
+ fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
+ if self.regions.try_push(region).is_some() {
+ return Err(MemoryTrackerError::Full);
+ }
+
+ Ok(self.regions.last().unwrap().range.clone())
+ }
+
+ /// Unshares any MMIO region previously shared with the MMIO guard.
+ pub fn unshare_all_mmio(&mut self) -> Result<()> {
+ self.mmio_sharer.unshare_all();
+
+ Ok(())
+ }
+
+ /// Initialize the shared heap to dynamically share memory from the global allocator.
+ pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
+ const INIT_CAP: usize = 10;
+
+ let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
+ if previous.is_some() {
+ return Err(MemoryTrackerError::SharedMemorySetFailure);
+ }
+
+ SHARED_POOL
+ .set(Box::new(LockedFrameAllocator::new()))
+ .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
+
+ Ok(())
+ }
+
+ /// Initialize the shared heap from a static region of memory.
+ ///
+ /// Some hypervisors such as Gunyah do not support a MemShare API for guest
+ /// to share its memory with host. Instead they allow host to designate part
+ /// of guest memory as "shared" ahead of guest starting its execution. The
+ /// shared memory region is indicated in swiotlb node. On such platforms use
+ /// a separate heap to allocate buffers that can be shared with host.
+ pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
+ let size = NonZeroUsize::new(range.len()).unwrap();
+ let range = self.alloc_mut(range.start, size)?;
+ let shared_pool = LockedFrameAllocator::<32>::new();
+
+ shared_pool.lock().insert(range);
+
+ SHARED_POOL
+ .set(Box::new(shared_pool))
+ .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
+
+ Ok(())
+ }
+
+ /// Initialize the shared heap to use heap memory directly.
+ ///
+ /// When running on "non-protected" hypervisors which permit host direct accesses to guest
+ /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
+ /// dedicated region so this function instructs the shared pool to use the global allocator.
+ pub fn init_heap_shared_pool(&mut self) -> Result<()> {
+ // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
+ // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
+ // without any actual "dynamic memory sharing" taking place and, as such, the granule may
+ // be set to the one of the global_allocator i.e. a byte.
+ self.init_dynamic_shared_pool(size_of::<u8>())
+ }
+
+ /// Unshares any memory that may have been shared.
+ pub fn unshare_all_memory(&mut self) {
+ drop(SHARED_MEMORY.lock().take());
+ }
+
+ /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
+ /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
+ pub(crate) fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
+ let shared_range = self.mmio_sharer.share(addr)?;
+ self.map_lazy_mmio_as_valid(&shared_range)?;
+
+ Ok(())
+ }
+
+ /// Modify the PTEs corresponding to a given range from (invalid) "lazy MMIO" to valid MMIO.
+ ///
+ /// Returns an error if any PTE in the range is not an invalid lazy MMIO mapping.
+ fn map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()> {
+ // This must be safe and free from break-before-make (BBM) violations, given that the
+ // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
+ // created inside the mapping has the same size and alignment.
+ self.page_table
+ .modify_range(page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
+ let flags = desc.flags().expect("Unsupported PTE flags set");
+ if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
+ desc.modify_flags(Attributes::VALID, Attributes::empty());
+ Ok(())
+ } else {
+ Err(())
+ }
+ })
+ .map_err(|_| MemoryTrackerError::InvalidPte)
+ }
+
+ /// Flush all memory regions marked as writable-dirty.
+ fn flush_dirty_pages(&mut self) -> Result<()> {
+ // Collect memory ranges for which dirty state is tracked.
+ let writable_regions =
+ self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
+ // Execute a barrier instruction to ensure all hardware updates to the page table have been
+ // observed before reading PTE flags to determine dirty state.
+ dsb!("ish");
+ // Now flush writable-dirty pages in those regions.
+ for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
+ self.page_table
+ .walk_range(&get_va_range(range), &flush_dirty_range)
+ .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
+ }
+ Ok(())
+ }
+
+ /// Handles permission fault for read-only blocks by setting writable-dirty state.
+ /// In general, this should be called from the exception handler when hardware dirty
+ /// state management is disabled or unavailable.
+ pub(crate) fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
+ self.page_table
+ .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
+ .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
+ }
+}
+
+impl Drop for MemoryTracker {
+ fn drop(&mut self) {
+ set_dbm_enabled(false);
+ self.flush_dirty_pages().unwrap();
+ self.unshare_all_memory();
+ }
+}
diff --git a/libs/libvmbase/src/memory/util.rs b/libs/libvmbase/src/memory/util.rs
index e9f867f..cfb0fa7 100644
--- a/libs/libvmbase/src/memory/util.rs
+++ b/libs/libvmbase/src/memory/util.rs
@@ -14,9 +14,8 @@
//! Utility functions for memory management.
-use crate::read_sysreg;
+use crate::arch::flush_region;
use crate::util::unchecked_align_down;
-use core::arch::asm;
use core::ptr::NonNull;
use zeroize::Zeroize;
@@ -36,38 +35,6 @@
/// The page size in bytes assumed by vmbase - 4 KiB.
pub const PAGE_SIZE: usize = SIZE_4KB;
-/// Reads the number of words in the smallest cache line of all the data caches and unified caches.
-#[inline]
-pub fn min_dcache_line_size() -> usize {
- const DMINLINE_SHIFT: usize = 16;
- const DMINLINE_MASK: usize = 0xf;
- let ctr_el0 = read_sysreg!("ctr_el0");
-
- // DminLine: log2 of the number of words in the smallest cache line of all the data caches.
- let dminline = (ctr_el0 >> DMINLINE_SHIFT) & DMINLINE_MASK;
-
- 1 << dminline
-}
-
-/// Flush `size` bytes of data cache by virtual address.
-#[inline]
-pub(super) fn flush_region(start: usize, size: usize) {
- let line_size = min_dcache_line_size();
- let end = start + size;
- let start = unchecked_align_down(start, line_size);
-
- for line in (start..end).step_by(line_size) {
- // SAFETY: Clearing cache lines shouldn't have Rust-visible side effects.
- unsafe {
- asm!(
- "dc cvau, {x}",
- x = in(reg) line,
- options(nomem, nostack, preserves_flags),
- )
- }
- }
-}
-
/// Flushes the slice to the point of unification.
#[inline]
pub fn flush(reg: &[u8]) {
diff --git a/libs/libvmbase/src/util.rs b/libs/libvmbase/src/util.rs
index e52ac8e..6142cb3 100644
--- a/libs/libvmbase/src/util.rs
+++ b/libs/libvmbase/src/util.rs
@@ -17,15 +17,6 @@
use aarch64_paging::paging::MemoryRegion;
use core::ops::Range;
-/// Flatten [[T; N]] into &[T]
-/// TODO: use slice::flatten when it graduates from experimental
-pub fn flatten<T, const N: usize>(original: &[[T; N]]) -> &[T] {
- // SAFETY: no overflow because original (whose size is len()*N) is already in memory
- let len = original.len() * N;
- // SAFETY: [T] has the same layout as [T;N]
- unsafe { core::slice::from_raw_parts(original.as_ptr().cast(), len) }
-}
-
/// Computes the largest multiple of the provided alignment smaller or equal to the address.
///
/// Note: the result is undefined if alignment isn't a power of two.
diff --git a/libs/vm_launcher_lib/java/com/android/virtualization/vmlauncher/InstallUtils.java b/libs/vm_launcher_lib/java/com/android/virtualization/vmlauncher/InstallUtils.java
index 0e46a63..f5cc912 100644
--- a/libs/vm_launcher_lib/java/com/android/virtualization/vmlauncher/InstallUtils.java
+++ b/libs/vm_launcher_lib/java/com/android/virtualization/vmlauncher/InstallUtils.java
@@ -141,6 +141,12 @@
rules.put("\\$PAYLOAD_DIR", new File(context.getFilesDir(), PAYLOAD_DIR).toString());
rules.put("\\$USER_ID", String.valueOf(context.getUserId()));
rules.put("\\$PACKAGE_NAME", context.getPackageName());
+ String appDataDir = context.getDataDir().toString();
+ // TODO: remove this hack
+ if (context.getUserId() == 0) {
+ appDataDir = "/data/data/" + context.getPackageName();
+ }
+ rules.put("\\$APP_DATA_DIR", appDataDir);
return (s) -> {
for (Map.Entry<String, String> rule : rules.entrySet()) {
s = s.replaceAll(rule.getKey(), rule.getValue());
diff --git a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
index ceebb52..e2ee381 100644
--- a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
+++ b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
@@ -594,7 +594,7 @@
@Override
public void onPayloadReady(VirtualMachine vm, IBenchmarkService service)
throws RemoteException {
- int vmPid = ProcessUtil.getCrosvmPid(Os.getpid(), mShellExecutor);
+ int vmPid = ProcessUtil.getCrosvmPid(Os.getpid(), "test_vm_mem_usage", mShellExecutor);
mMemTotal = service.getMemInfoEntry("MemTotal");
mMemFree = service.getMemInfoEntry("MemFree");
@@ -668,7 +668,8 @@
@SuppressWarnings("ReturnValueIgnored")
public void onPayloadReady(VirtualMachine vm, IBenchmarkService service)
throws RemoteException {
- int vmPid = ProcessUtil.getCrosvmPid(Os.getpid(), mShellExecutor);
+ int vmPid =
+ ProcessUtil.getCrosvmPid(Os.getpid(), "test_vm_mem_reclaim", mShellExecutor);
// Allocate 256MB of anonymous memory. This will fill all guest
// memory and cause swapping to start.
diff --git a/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java b/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java
index c4aba81..c544b77 100644
--- a/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java
+++ b/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java
@@ -127,9 +127,10 @@
return getSingleChildProcess(parentPid, VIRTMGR_BIN, shellExecutor);
}
- public static int getCrosvmPid(int parentPid, Function<String, String> shellExecutor) {
+ public static int getCrosvmPid(
+ int parentPid, String testName, Function<String, String> shellExecutor) {
int virtmgrPid = getVirtmgrPid(parentPid, shellExecutor);
- return getSingleChildProcess(virtmgrPid, CROSVM_BIN, shellExecutor);
+ return getSingleChildProcess(virtmgrPid, "crosvm_" + testName, shellExecutor);
}
// To ensures that only one object is created at a time.