vmbase: Introduce mem API & turn MEMORY private
Stop allowing clients from accessing MEMORY directly, which will enable
limiting the refactoring of memory management to libvmbase only. As a
result, change the visibility of MemoryTracker and MEMORY to pub(crate).
Expose the functionality currently needed by client as individual
functions, that can be re-used between Rialto and pvmfw, de-duping some
code. Again, this prepares the code for an in-vmbase only refactoring.
Note that some of those functions will eventually be integrated into
libvmbase's rust_entry(), simplifying clients.
Where touching client code using aarch64_paging::VirtualAddress, make
use of usize so that we make progress towards limiting the use of that
crate to the aarch64-specific subset of vmbase only, for portability.
Bug: 377276983
Test: m {pvmfw,rialto,vmbase_example_{bios,kernel}}_bin
Test: atest rialto_test vmbase_example.integration_test
Change-Id: Ic510dba126200d61ad3691dce415193a0055ef8e
diff --git a/guest/pvmfw/src/entry.rs b/guest/pvmfw/src/entry.rs
index e55d4ca..0607a5a 100644
--- a/guest/pvmfw/src/entry.rs
+++ b/guest/pvmfw/src/entry.rs
@@ -17,21 +17,21 @@
use crate::config;
use crate::memory;
use core::arch::asm;
-use core::mem::{drop, size_of};
+use core::mem::size_of;
use core::ops::Range;
use core::slice;
-use hypervisor_backends::get_mmio_guard;
use log::error;
-use log::info;
use log::warn;
use log::LevelFilter;
use vmbase::util::RangeExt as _;
use vmbase::{
arch::aarch64::min_dcache_line_size,
- configure_heap, console_writeln,
- layout::{self, crosvm, UART_PAGE_ADDR},
- main,
- memory::{MemoryTracker, MemoryTrackerError, MEMORY, SIZE_128KB, SIZE_4KB},
+ configure_heap, console_writeln, layout, main,
+ memory::{
+ deactivate_dynamic_page_tables, map_image_footer, switch_to_dynamic_page_tables,
+ unshare_all_memory, unshare_all_mmio_except_uart, unshare_uart, MemoryTrackerError,
+ SIZE_128KB, SIZE_4KB,
+ },
power::reboot,
};
use zeroize::Zeroize;
@@ -112,13 +112,8 @@
error!("Failed to set up the dynamic page tables: {e}");
RebootReason::InternalError
})?;
-
// Up to this point, we were using the built-in static (from .rodata) page tables.
- MEMORY.lock().replace(MemoryTracker::new(
- page_table,
- crosvm::MEM_START..layout::MAX_VIRT_ADDR,
- crosvm::MMIO_RANGE,
- ));
+ switch_to_dynamic_page_tables(page_table);
let appended_data = get_appended_data_slice().map_err(|e| {
error!("Failed to map the appended data: {e}");
@@ -152,27 +147,23 @@
// Writable-dirty regions will be flushed when MemoryTracker is dropped.
config_entries.bcc.zeroize();
- info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
- MEMORY.lock().as_mut().unwrap().unshare_all_mmio().map_err(|e| {
+ unshare_all_mmio_except_uart().map_err(|e| {
error!("Failed to unshare MMIO ranges: {e}");
RebootReason::InternalError
})?;
// Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
- MEMORY.lock().as_mut().unwrap().unshare_all_memory();
+ unshare_all_memory();
- if let Some(mmio_guard) = get_mmio_guard() {
- if cfg!(debuggable_vms_improvements) && debuggable_payload {
- // Keep UART MMIO_GUARD-ed for debuggable payloads, to enable earlycon.
- } else {
- mmio_guard.unmap(UART_PAGE_ADDR).map_err(|e| {
- error!("Failed to unshare the UART: {e}");
- RebootReason::InternalError
- })?;
- }
+ if cfg!(debuggable_vms_improvements) && debuggable_payload {
+ // Keep UART MMIO_GUARD-ed for debuggable payloads, to enable earlycon.
+ } else {
+ unshare_uart().map_err(|e| {
+ error!("Failed to unshare the UART: {e}");
+ RebootReason::InternalError
+ })?;
}
- // Drop MemoryTracker and deactivate page table.
- drop(MEMORY.lock().take());
+ deactivate_dynamic_page_tables();
Ok((slices.kernel.as_ptr() as usize, next_bcc))
}
@@ -322,7 +313,7 @@
}
fn get_appended_data_slice() -> Result<&'static mut [u8], MemoryTrackerError> {
- let range = MEMORY.lock().as_mut().unwrap().map_image_footer()?;
+ let range = map_image_footer()?;
// SAFETY: This region was just mapped for the first time (as map_image_footer() didn't fail)
// and the linker script prevents it from overlapping with other objects.
Ok(unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) })
diff --git a/guest/pvmfw/src/main.rs b/guest/pvmfw/src/main.rs
index 612281b..bde03ff 100644
--- a/guest/pvmfw/src/main.rs
+++ b/guest/pvmfw/src/main.rs
@@ -55,7 +55,6 @@
use vmbase::fdt::pci::{PciError, PciInfo};
use vmbase::heap;
use vmbase::memory::flush;
-use vmbase::memory::MEMORY;
use vmbase::rand;
use vmbase::virtio::pci;
@@ -101,7 +100,7 @@
// Set up PCI bus for VirtIO devices.
let pci_info = PciInfo::from_fdt(fdt).map_err(handle_pci_error)?;
debug!("PCI: {:#x?}", pci_info);
- let mut pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap()).map_err(|e| {
+ let mut pci_root = pci::initialize(pci_info).map_err(|e| {
error!("Failed to initialize PCI: {e}");
RebootReason::InternalError
})?;
diff --git a/guest/pvmfw/src/memory.rs b/guest/pvmfw/src/memory.rs
index 64a6850..b54f014 100644
--- a/guest/pvmfw/src/memory.rs
+++ b/guest/pvmfw/src/memory.rs
@@ -23,14 +23,13 @@
use core::ops::Range;
use core::result;
use core::slice;
-use hypervisor_backends::get_mem_sharer;
use log::debug;
use log::error;
use log::info;
use log::warn;
use vmbase::{
layout::{self, crosvm},
- memory::{PageTable, MEMORY},
+ memory::{init_shared_pool, map_data, map_rodata, resize_available_memory, PageTable},
};
/// Region allocated for the stack.
@@ -75,13 +74,13 @@
// TODO - Only map the FDT as read-only, until we modify it right before jump_to_payload()
// e.g. by generating a DTBO for a template DT in main() and, on return, re-map DT as RW,
// overwrite with the template DT and apply the DTBO.
- let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, fdt_size).map_err(|e| {
+ map_data(fdt, fdt_size).map_err(|e| {
error!("Failed to allocate the FDT range: {e}");
RebootReason::InternalError
})?;
- // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
- let fdt = unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) };
+ // SAFETY: map_data validated the range to be in main memory, mapped, and not overlap.
+ let fdt = unsafe { slice::from_raw_parts_mut(fdt as *mut u8, fdt_size.into()) };
let info = fdt::sanitize_device_tree(fdt, vm_dtbo, vm_ref_dt)?;
let fdt = libfdt::Fdt::from_mut_slice(fdt).map_err(|e| {
@@ -92,67 +91,56 @@
let memory_range = info.memory_range;
debug!("Resizing MemoryTracker to range {memory_range:#x?}");
- MEMORY.lock().as_mut().unwrap().shrink(&memory_range).map_err(|e| {
+ resize_available_memory(&memory_range).map_err(|e| {
error!("Failed to use memory range value from DT: {memory_range:#x?}: {e}");
RebootReason::InvalidFdt
})?;
- if let Some(mem_sharer) = get_mem_sharer() {
- let granule = mem_sharer.granule().map_err(|e| {
- error!("Failed to get memory protection granule: {e}");
- RebootReason::InternalError
- })?;
- MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
- error!("Failed to initialize dynamically shared pool: {e}");
- RebootReason::InternalError
- })?;
- } else {
- let range = info.swiotlb_info.fixed_range().ok_or_else(|| {
- error!("Pre-shared pool range not specified in swiotlb node");
- RebootReason::InvalidFdt
- })?;
+ init_shared_pool(info.swiotlb_info.fixed_range()).map_err(|e| {
+ error!("Failed to initialize shared pool: {e}");
+ RebootReason::InternalError
+ })?;
- MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).map_err(|e| {
- error!("Failed to initialize pre-shared pool {e}");
- RebootReason::InvalidFdt
- })?;
- }
-
- let kernel_range = if let Some(r) = info.kernel_range {
- MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
- error!("Failed to obtain the kernel range with DT range: {e}");
+ let (kernel_start, kernel_size) = if let Some(r) = info.kernel_range {
+ let size = r.len().try_into().map_err(|_| {
+ error!("Invalid kernel size: {:#x}", r.len());
RebootReason::InternalError
- })?
+ })?;
+ (r.start, size)
} else if cfg!(feature = "legacy") {
warn!("Failed to find the kernel range in the DT; falling back to legacy ABI");
-
- let kernel_size = NonZeroUsize::new(kernel_size).ok_or_else(|| {
+ let size = NonZeroUsize::new(kernel_size).ok_or_else(|| {
error!("Invalid kernel size: {kernel_size:#x}");
RebootReason::InvalidPayload
})?;
-
- MEMORY.lock().as_mut().unwrap().alloc(kernel, kernel_size).map_err(|e| {
- error!("Failed to obtain the kernel range with legacy range: {e}");
- RebootReason::InternalError
- })?
+ (kernel, size)
} else {
error!("Failed to locate the kernel from the DT");
return Err(RebootReason::InvalidPayload);
};
- let kernel = kernel_range.start as *const u8;
- // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
- let kernel = unsafe { slice::from_raw_parts(kernel, kernel_range.len()) };
+ map_rodata(kernel_start, kernel_size).map_err(|e| {
+ error!("Failed to map kernel range: {e}");
+ RebootReason::InternalError
+ })?;
+
+ let kernel = kernel_start as *const u8;
+ // SAFETY: map_rodata validated the range to be in main memory, mapped, and not overlap.
+ let kernel = unsafe { slice::from_raw_parts(kernel, kernel_size.into()) };
let ramdisk = if let Some(r) = info.initrd_range {
debug!("Located ramdisk at {r:?}");
- let r = MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
+ let ramdisk_size = r.len().try_into().map_err(|_| {
+ error!("Invalid ramdisk size: {:#x}", r.len());
+ RebootReason::InvalidRamdisk
+ })?;
+ map_rodata(r.start, ramdisk_size).map_err(|e| {
error!("Failed to obtain the initrd range: {e}");
RebootReason::InvalidRamdisk
})?;
- // SAFETY: The region was validated by memory to be in main memory, mapped, and
- // not overlap.
+ // SAFETY: map_rodata validated the range to be in main memory, mapped, and not
+ // overlap.
Some(unsafe { slice::from_raw_parts(r.start as *const u8, r.len()) })
} else {
info!("Couldn't locate the ramdisk from the device tree");
diff --git a/guest/rialto/src/main.rs b/guest/rialto/src/main.rs
index 456af7f..8095a1f 100644
--- a/guest/rialto/src/main.rs
+++ b/guest/rialto/src/main.rs
@@ -32,8 +32,6 @@
use core::num::NonZeroUsize;
use core::slice;
use diced_open_dice::{bcc_handover_parse, DiceArtifacts};
-use hypervisor_backends::get_mem_sharer;
-use libfdt::FdtError;
use log::{debug, error, info};
use service_vm_comm::{ServiceVmRequest, VmType};
use service_vm_fake_chain::service_vm;
@@ -50,7 +48,10 @@
generate_image_header,
layout::{self, crosvm},
main,
- memory::{MemoryTracker, PageTable, MEMORY, PAGE_SIZE, SIZE_128KB},
+ memory::{
+ init_shared_pool, map_rodata, map_rodata_outside_main_memory, resize_available_memory,
+ switch_to_dynamic_page_tables, PageTable, PAGE_SIZE, SIZE_128KB,
+ },
power::reboot,
virtio::{
pci::{self, PciTransportIterator, VirtIOSocket},
@@ -91,60 +92,40 @@
info!("Welcome to Rialto!");
let page_table = new_page_table()?;
- MEMORY.lock().replace(MemoryTracker::new(
- page_table,
- crosvm::MEM_START..layout::MAX_VIRT_ADDR,
- crosvm::MMIO_RANGE,
- ));
+ switch_to_dynamic_page_tables(page_table);
- let fdt_range = MEMORY
- .lock()
- .as_mut()
- .unwrap()
- .alloc(fdt_addr, NonZeroUsize::new(crosvm::FDT_MAX_SIZE).unwrap())?;
+ let fdt_size = NonZeroUsize::new(crosvm::FDT_MAX_SIZE).unwrap();
+ map_rodata(fdt_addr, fdt_size)?;
// SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
- let fdt = unsafe { slice::from_raw_parts(fdt_range.start as *mut u8, fdt_range.len()) };
+ let fdt = unsafe { slice::from_raw_parts(fdt_addr as *mut u8, fdt_size.into()) };
// We do not need to validate the DT since it is already validated in pvmfw.
let fdt = libfdt::Fdt::from_slice(fdt)?;
let memory_range = fdt.first_memory_range()?;
- MEMORY.lock().as_mut().unwrap().shrink(&memory_range).inspect_err(|_| {
+ resize_available_memory(&memory_range).inspect_err(|_| {
error!("Failed to use memory range value from DT: {memory_range:#x?}");
})?;
- if let Some(mem_sharer) = get_mem_sharer() {
- let granule = mem_sharer.granule()?;
- MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).inspect_err(|_| {
- error!("Failed to initialize dynamically shared pool.");
- })?;
- } else if let Ok(Some(swiotlb_info)) = SwiotlbInfo::new_from_fdt(fdt) {
- let range = swiotlb_info.fixed_range().ok_or_else(|| {
- error!("Pre-shared pool range not specified in swiotlb node");
- Error::from(FdtError::BadValue)
- })?;
- MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).inspect_err(|_| {
- error!("Failed to initialize pre-shared pool.");
- })?;
- } else {
- info!("No MEM_SHARE capability detected or swiotlb found: allocating buffers from heap.");
- MEMORY.lock().as_mut().unwrap().init_heap_shared_pool().inspect_err(|_| {
- error!("Failed to initialize heap-based pseudo-shared pool.");
- })?;
- }
+ let swiotlb_range = SwiotlbInfo::new_from_fdt(fdt)
+ .inspect_err(|_| {
+ error!("Rialto failed when access swiotlb");
+ })?
+ .and_then(|info| info.fixed_range());
+ init_shared_pool(swiotlb_range).inspect_err(|_| {
+ error!("Failed to initialize shared pool.");
+ })?;
let bcc_handover: Box<dyn DiceArtifacts> = match vm_type(fdt)? {
VmType::ProtectedVm => {
let dice_range = read_dice_range_from(fdt)?;
info!("DICE range: {dice_range:#x?}");
- // SAFETY: This region was written by pvmfw in its writable_data region. The region
- // has no overlap with the main memory region and is safe to be mapped as read-only
- // data.
- let res = unsafe {
- MEMORY.lock().as_mut().unwrap().alloc_range_outside_main_memory(&dice_range)
- };
- res.inspect_err(|_| {
- error!("Failed to use DICE range from DT: {dice_range:#x?}");
- })?;
+ let dice_size = dice_range.len().try_into().unwrap();
+ // SAFETY: The DICE memory region has been generated by pvmfw and doesn't overlap.
+ unsafe { map_rodata_outside_main_memory(dice_range.start, dice_size) }.inspect_err(
+ |_| {
+ error!("Failed to use DICE range from DT: {dice_range:#x?}");
+ },
+ )?;
let dice_start = dice_range.start as *const u8;
// SAFETY: There's no memory overlap and the region is mapped as read-only data.
let bcc_handover = unsafe { slice::from_raw_parts(dice_start, dice_range.len()) };
@@ -157,8 +138,7 @@
let pci_info = PciInfo::from_fdt(fdt)?;
debug!("PCI: {pci_info:#x?}");
- let mut pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap())
- .map_err(Error::PciInitializationFailed)?;
+ let mut pci_root = pci::initialize(pci_info).map_err(Error::PciInitializationFailed)?;
debug!("PCI root: {pci_root:#x?}");
let socket_device = find_socket_device::<HalImpl>(&mut pci_root)?;
debug!("Found socket device: guest cid = {:?}", socket_device.guest_cid());