vmbase: Introduce mem API & turn MEMORY private
Stop allowing clients from accessing MEMORY directly, which will enable
limiting the refactoring of memory management to libvmbase only. As a
result, change the visibility of MemoryTracker and MEMORY to pub(crate).
Expose the functionality currently needed by client as individual
functions, that can be re-used between Rialto and pvmfw, de-duping some
code. Again, this prepares the code for an in-vmbase only refactoring.
Note that some of those functions will eventually be integrated into
libvmbase's rust_entry(), simplifying clients.
Where touching client code using aarch64_paging::VirtualAddress, make
use of usize so that we make progress towards limiting the use of that
crate to the aarch64-specific subset of vmbase only, for portability.
Bug: 377276983
Test: m {pvmfw,rialto,vmbase_example_{bios,kernel}}_bin
Test: atest rialto_test vmbase_example.integration_test
Change-Id: Ic510dba126200d61ad3691dce415193a0055ef8e
diff --git a/libs/libvmbase/src/memory.rs b/libs/libvmbase/src/memory.rs
index e0ea207..145f766 100644
--- a/libs/libvmbase/src/memory.rs
+++ b/libs/libvmbase/src/memory.rs
@@ -24,11 +24,16 @@
pub use error::MemoryTrackerError;
pub use page_table::PageTable;
pub use shared::MemoryRange;
-pub use tracker::{MemoryTracker, MEMORY};
+pub use tracker::{
+ deactivate_dynamic_page_tables, init_shared_pool, map_data, map_device, map_image_footer,
+ map_rodata, map_rodata_outside_main_memory, resize_available_memory,
+ switch_to_dynamic_page_tables, unshare_all_memory, unshare_all_mmio_except_uart, unshare_uart,
+};
pub use util::{
flush, flushed_zeroize, page_4kb_of, PAGE_SIZE, SIZE_128KB, SIZE_16KB, SIZE_2MB, SIZE_4KB,
SIZE_4MB, SIZE_64KB,
};
pub(crate) use shared::{alloc_shared, dealloc_shared};
+pub(crate) use tracker::MEMORY;
pub(crate) use util::{phys_to_virt, virt_to_phys};
diff --git a/libs/libvmbase/src/memory/error.rs b/libs/libvmbase/src/memory/error.rs
index 1d42a04..2c00518 100644
--- a/libs/libvmbase/src/memory/error.rs
+++ b/libs/libvmbase/src/memory/error.rs
@@ -21,6 +21,8 @@
/// Errors for MemoryTracker operations.
#[derive(Debug, Clone)]
pub enum MemoryTrackerError {
+ /// MemoryTracker not configured or deactivated.
+ Unavailable,
/// Tried to modify the memory base address.
DifferentBaseAddress,
/// Tried to shrink to a larger memory size.
@@ -60,6 +62,7 @@
impl fmt::Display for MemoryTrackerError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
+ Self::Unavailable => write!(f, "MemoryTracker is not available"),
Self::DifferentBaseAddress => write!(f, "Received different base address"),
Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
diff --git a/libs/libvmbase/src/memory/tracker.rs b/libs/libvmbase/src/memory/tracker.rs
index d699c4c..e75dc11 100644
--- a/libs/libvmbase/src/memory/tracker.rs
+++ b/libs/libvmbase/src/memory/tracker.rs
@@ -29,13 +29,13 @@
use core::num::NonZeroUsize;
use core::ops::Range;
use core::result;
-use hypervisor_backends::get_mmio_guard;
-use log::{debug, error};
-use spin::mutex::SpinMutex;
+use hypervisor_backends::{get_mem_sharer, get_mmio_guard};
+use log::{debug, error, info};
+use spin::mutex::{SpinMutex, SpinMutexGuard};
use tinyvec::ArrayVec;
/// A global static variable representing the system memory tracker, protected by a spin mutex.
-pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
+pub(crate) static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
fn get_va_range(range: &MemoryRange) -> VaRange {
VaRange::new(range.start, range.end)
@@ -43,6 +43,141 @@
type Result<T> = result::Result<T, MemoryTrackerError>;
+/// Attempts to lock `MEMORY`, returns an error if already deactivated.
+fn try_lock_memory_tracker() -> Result<SpinMutexGuard<'static, Option<MemoryTracker>>> {
+ // Being single-threaded, we only spin if `deactivate_dynamic_page_tables()` leaked the lock.
+ MEMORY.try_lock().ok_or(MemoryTrackerError::Unavailable)
+}
+
+/// Switch the MMU to the provided PageTable.
+///
+/// Panics if called more than once.
+pub fn switch_to_dynamic_page_tables(pt: PageTable) {
+ let mut locked_tracker = try_lock_memory_tracker().unwrap();
+ if locked_tracker.is_some() {
+ panic!("switch_to_dynamic_page_tables() called more than once.");
+ }
+
+ locked_tracker.replace(MemoryTracker::new(
+ pt,
+ layout::crosvm::MEM_START..layout::MAX_VIRT_ADDR,
+ layout::crosvm::MMIO_RANGE,
+ ));
+}
+
+/// Switch the MMU back to the static page tables (see `idmap` C symbol).
+///
+/// Panics if called before `switch_to_dynamic_page_tables()` or more than once.
+pub fn deactivate_dynamic_page_tables() {
+ let locked_tracker = try_lock_memory_tracker().unwrap();
+ // Force future calls to try_lock_memory_tracker() to fail by leaking this lock guard.
+ let leaked_tracker = SpinMutexGuard::leak(locked_tracker);
+ // Force deallocation/unsharing of all the resources used by the MemoryTracker.
+ drop(leaked_tracker.take())
+}
+
+/// Redefines the actual mappable range of memory.
+///
+/// Fails if a region has already been mapped beyond the new upper limit.
+pub fn resize_available_memory(memory_range: &Range<usize>) -> Result<()> {
+ let mut locked_tracker = try_lock_memory_tracker()?;
+ let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+ tracker.shrink(memory_range)
+}
+
+/// Initialize the memory pool for page sharing with the host.
+pub fn init_shared_pool(static_range: Option<Range<usize>>) -> Result<()> {
+ let mut locked_tracker = try_lock_memory_tracker()?;
+ let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+ if let Some(mem_sharer) = get_mem_sharer() {
+ let granule = mem_sharer.granule()?;
+ tracker.init_dynamic_shared_pool(granule)
+ } else if let Some(r) = static_range {
+ tracker.init_static_shared_pool(r)
+ } else {
+ info!("Initialized shared pool from heap memory without MEM_SHARE");
+ tracker.init_heap_shared_pool()
+ }
+}
+
+/// Unshare all MMIO that was previously shared with the host, with the exception of the UART page.
+pub fn unshare_all_mmio_except_uart() -> Result<()> {
+ let Ok(mut locked_tracker) = try_lock_memory_tracker() else { return Ok(()) };
+ let Some(tracker) = locked_tracker.as_mut() else { return Ok(()) };
+ if cfg!(feature = "compat_android_13") {
+ info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
+ }
+ tracker.unshare_all_mmio()
+}
+
+/// Unshare all memory that was previously shared with the host.
+pub fn unshare_all_memory() {
+ let Ok(mut locked_tracker) = try_lock_memory_tracker() else { return };
+ let Some(tracker) = locked_tracker.as_mut() else { return };
+ tracker.unshare_all_memory()
+}
+
+/// Unshare the UART page, previously shared with the host.
+pub fn unshare_uart() -> Result<()> {
+ let Some(mmio_guard) = get_mmio_guard() else { return Ok(()) };
+ Ok(mmio_guard.unmap(layout::UART_PAGE_ADDR)?)
+}
+
+/// Map the provided range as normal memory, with R/W permissions.
+///
+/// This fails if the range has already been (partially) mapped.
+pub fn map_data(addr: usize, size: NonZeroUsize) -> Result<()> {
+ let mut locked_tracker = try_lock_memory_tracker()?;
+ let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+ let _ = tracker.alloc_mut(addr, size)?;
+ Ok(())
+}
+
+/// Map the region potentially holding data appended to the image, with read-write permissions.
+///
+/// This fails if the footer has already been mapped.
+pub fn map_image_footer() -> Result<Range<usize>> {
+ let mut locked_tracker = try_lock_memory_tracker()?;
+ let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+ let range = tracker.map_image_footer()?;
+ Ok(range)
+}
+
+/// Map the provided range as normal memory, with read-only permissions.
+///
+/// This fails if the range has already been (partially) mapped.
+pub fn map_rodata(addr: usize, size: NonZeroUsize) -> Result<()> {
+ let mut locked_tracker = try_lock_memory_tracker()?;
+ let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+ let _ = tracker.alloc(addr, size)?;
+ Ok(())
+}
+
+// TODO(ptosi): Merge this into map_rodata.
+/// Map the provided range as normal memory, with read-only permissions.
+///
+/// # Safety
+///
+/// Callers of this method need to ensure that the `range` is valid for mapping as read-only data.
+pub unsafe fn map_rodata_outside_main_memory(addr: usize, size: NonZeroUsize) -> Result<()> {
+ let mut locked_tracker = try_lock_memory_tracker()?;
+ let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+ let end = addr + usize::from(size);
+ // SAFETY: Caller has checked that it is valid to map the range.
+ let _ = unsafe { tracker.alloc_range_outside_main_memory(&(addr..end)) }?;
+ Ok(())
+}
+
+/// Map the provided range as device memory.
+///
+/// This fails if the range has already been (partially) mapped.
+pub fn map_device(addr: usize, size: NonZeroUsize) -> Result<()> {
+ let mut locked_tracker = try_lock_memory_tracker()?;
+ let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+ let range = addr..(addr + usize::from(size));
+ tracker.map_mmio_range(range.clone())
+}
+
#[derive(Clone, Copy, Debug, Default, PartialEq)]
enum MemoryType {
#[default]
@@ -57,7 +192,7 @@
}
/// Tracks non-overlapping slices of main memory.
-pub struct MemoryTracker {
+pub(crate) struct MemoryTracker {
total: MemoryRange,
page_table: PageTable,
regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
@@ -72,7 +207,7 @@
const MMIO_CAPACITY: usize = 5;
/// Creates a new instance from an active page table, covering the maximum RAM size.
- pub fn new(mut page_table: PageTable, total: MemoryRange, mmio_range: MemoryRange) -> Self {
+ fn new(mut page_table: PageTable, total: MemoryRange, mmio_range: MemoryRange) -> Self {
assert!(
!total.overlaps(&mmio_range),
"MMIO space should not overlap with the main memory region."
@@ -103,7 +238,7 @@
/// Resize the total RAM size.
///
/// This function fails if it contains regions that are not included within the new size.
- pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
+ fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
if range.start != self.total.start {
return Err(MemoryTrackerError::DifferentBaseAddress);
}
@@ -119,7 +254,7 @@
}
/// Allocate the address range for a const slice; returns None if failed.
- pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
+ fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
self.check_allocatable(®ion)?;
self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
@@ -135,7 +270,7 @@
///
/// Callers of this method need to ensure that the `range` is valid for mapping as read-only
/// data.
- pub unsafe fn alloc_range_outside_main_memory(
+ unsafe fn alloc_range_outside_main_memory(
&mut self,
range: &MemoryRange,
) -> Result<MemoryRange> {
@@ -149,7 +284,7 @@
}
/// Allocate the address range for a mutable slice; returns None if failed.
- pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
+ fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
self.check_allocatable(®ion)?;
self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
@@ -160,7 +295,7 @@
}
/// Maps the image footer read-write, with permissions.
- pub fn map_image_footer(&mut self) -> Result<MemoryRange> {
+ fn map_image_footer(&mut self) -> Result<MemoryRange> {
if self.image_footer_mapped {
return Err(MemoryTrackerError::FooterAlreadyMapped);
}
@@ -174,18 +309,18 @@
}
/// Allocate the address range for a const slice; returns None if failed.
- pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
+ fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
self.alloc_range(&(base..(base + size.get())))
}
/// Allocate the address range for a mutable slice; returns None if failed.
- pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
+ fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
self.alloc_range_mut(&(base..(base + size.get())))
}
/// Checks that the given range of addresses is within the MMIO region, and then maps it
/// appropriately.
- pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
+ fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
if !range.is_within(&self.mmio_range) {
return Err(MemoryTrackerError::OutOfRange);
}
@@ -247,14 +382,14 @@
}
/// Unshares any MMIO region previously shared with the MMIO guard.
- pub fn unshare_all_mmio(&mut self) -> Result<()> {
+ fn unshare_all_mmio(&mut self) -> Result<()> {
self.mmio_sharer.unshare_all();
Ok(())
}
/// Initialize the shared heap to dynamically share memory from the global allocator.
- pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
+ fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
const INIT_CAP: usize = 10;
let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
@@ -276,7 +411,7 @@
/// of guest memory as "shared" ahead of guest starting its execution. The
/// shared memory region is indicated in swiotlb node. On such platforms use
/// a separate heap to allocate buffers that can be shared with host.
- pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
+ fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
let size = NonZeroUsize::new(range.len()).unwrap();
let range = self.alloc_mut(range.start, size)?;
let shared_pool = LockedFrameAllocator::<32>::new();
@@ -295,7 +430,7 @@
/// When running on "non-protected" hypervisors which permit host direct accesses to guest
/// memory, there is no need to perform any memory sharing and/or allocate buffers from a
/// dedicated region so this function instructs the shared pool to use the global allocator.
- pub fn init_heap_shared_pool(&mut self) -> Result<()> {
+ fn init_heap_shared_pool(&mut self) -> Result<()> {
// As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
// using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
// without any actual "dynamic memory sharing" taking place and, as such, the granule may
diff --git a/libs/libvmbase/src/virtio/pci.rs b/libs/libvmbase/src/virtio/pci.rs
index 72e648b..ec89b6b 100644
--- a/libs/libvmbase/src/virtio/pci.rs
+++ b/libs/libvmbase/src/virtio/pci.rs
@@ -16,7 +16,7 @@
use crate::{
fdt::pci::PciInfo,
- memory::{MemoryTracker, MemoryTrackerError},
+ memory::{map_device, MemoryTrackerError},
};
use alloc::boxed::Box;
use core::fmt;
@@ -65,16 +65,19 @@
/// 2. Stores the `PciInfo` for the VirtIO HAL to use later.
/// 3. Creates and returns a `PciRoot`.
///
-/// This must only be called once; it will panic if it is called a second time.
-pub fn initialize(pci_info: PciInfo, memory: &mut MemoryTracker) -> Result<PciRoot, PciError> {
+/// This must only be called once and after having switched to the dynamic page tables.
+pub fn initialize(pci_info: PciInfo) -> Result<PciRoot, PciError> {
PCI_INFO.set(Box::new(pci_info.clone())).map_err(|_| PciError::DuplicateInitialization)?;
- memory.map_mmio_range(pci_info.cam_range.clone()).map_err(PciError::CamMapFailed)?;
- let bar_range = pci_info.bar_range.start as usize..pci_info.bar_range.end as usize;
- memory.map_mmio_range(bar_range).map_err(PciError::BarMapFailed)?;
+ let cam_start = pci_info.cam_range.start;
+ let cam_size = pci_info.cam_range.len().try_into().unwrap();
+ map_device(cam_start, cam_size).map_err(PciError::CamMapFailed)?;
- // Safety: This is the only place where we call make_pci_root, and `PCI_INFO.set` above will
- // panic if it is called a second time.
+ let bar_start = pci_info.bar_range.start.try_into().unwrap();
+ let bar_size = pci_info.bar_range.len().try_into().unwrap();
+ map_device(bar_start, bar_size).map_err(PciError::BarMapFailed)?;
+
+ // SAFETY: This is the only place where we call make_pci_root, validated by `PCI_INFO.set`.
Ok(unsafe { pci_info.make_pci_root() })
}