vmbase: Introduce mem API & turn MEMORY private

Stop allowing clients from accessing MEMORY directly, which will enable
limiting the refactoring of memory management to libvmbase only. As a
result, change the visibility of MemoryTracker and MEMORY to pub(crate).

Expose the functionality currently needed by client as individual
functions, that can be re-used between Rialto and pvmfw, de-duping some
code. Again, this prepares the code for an in-vmbase only refactoring.
Note that some of those functions will eventually be integrated into
libvmbase's rust_entry(), simplifying clients.

Where touching client code using aarch64_paging::VirtualAddress, make
use of usize so that we make progress towards limiting the use of that
crate to the aarch64-specific subset of vmbase only, for portability.

Bug: 377276983
Test: m {pvmfw,rialto,vmbase_example_{bios,kernel}}_bin
Test: atest rialto_test vmbase_example.integration_test
Change-Id: Ic510dba126200d61ad3691dce415193a0055ef8e
diff --git a/guest/pvmfw/src/entry.rs b/guest/pvmfw/src/entry.rs
index e55d4ca..0607a5a 100644
--- a/guest/pvmfw/src/entry.rs
+++ b/guest/pvmfw/src/entry.rs
@@ -17,21 +17,21 @@
 use crate::config;
 use crate::memory;
 use core::arch::asm;
-use core::mem::{drop, size_of};
+use core::mem::size_of;
 use core::ops::Range;
 use core::slice;
-use hypervisor_backends::get_mmio_guard;
 use log::error;
-use log::info;
 use log::warn;
 use log::LevelFilter;
 use vmbase::util::RangeExt as _;
 use vmbase::{
     arch::aarch64::min_dcache_line_size,
-    configure_heap, console_writeln,
-    layout::{self, crosvm, UART_PAGE_ADDR},
-    main,
-    memory::{MemoryTracker, MemoryTrackerError, MEMORY, SIZE_128KB, SIZE_4KB},
+    configure_heap, console_writeln, layout, main,
+    memory::{
+        deactivate_dynamic_page_tables, map_image_footer, switch_to_dynamic_page_tables,
+        unshare_all_memory, unshare_all_mmio_except_uart, unshare_uart, MemoryTrackerError,
+        SIZE_128KB, SIZE_4KB,
+    },
     power::reboot,
 };
 use zeroize::Zeroize;
@@ -112,13 +112,8 @@
         error!("Failed to set up the dynamic page tables: {e}");
         RebootReason::InternalError
     })?;
-
     // Up to this point, we were using the built-in static (from .rodata) page tables.
-    MEMORY.lock().replace(MemoryTracker::new(
-        page_table,
-        crosvm::MEM_START..layout::MAX_VIRT_ADDR,
-        crosvm::MMIO_RANGE,
-    ));
+    switch_to_dynamic_page_tables(page_table);
 
     let appended_data = get_appended_data_slice().map_err(|e| {
         error!("Failed to map the appended data: {e}");
@@ -152,27 +147,23 @@
     // Writable-dirty regions will be flushed when MemoryTracker is dropped.
     config_entries.bcc.zeroize();
 
-    info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
-    MEMORY.lock().as_mut().unwrap().unshare_all_mmio().map_err(|e| {
+    unshare_all_mmio_except_uart().map_err(|e| {
         error!("Failed to unshare MMIO ranges: {e}");
         RebootReason::InternalError
     })?;
     // Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
-    MEMORY.lock().as_mut().unwrap().unshare_all_memory();
+    unshare_all_memory();
 
-    if let Some(mmio_guard) = get_mmio_guard() {
-        if cfg!(debuggable_vms_improvements) && debuggable_payload {
-            // Keep UART MMIO_GUARD-ed for debuggable payloads, to enable earlycon.
-        } else {
-            mmio_guard.unmap(UART_PAGE_ADDR).map_err(|e| {
-                error!("Failed to unshare the UART: {e}");
-                RebootReason::InternalError
-            })?;
-        }
+    if cfg!(debuggable_vms_improvements) && debuggable_payload {
+        // Keep UART MMIO_GUARD-ed for debuggable payloads, to enable earlycon.
+    } else {
+        unshare_uart().map_err(|e| {
+            error!("Failed to unshare the UART: {e}");
+            RebootReason::InternalError
+        })?;
     }
 
-    // Drop MemoryTracker and deactivate page table.
-    drop(MEMORY.lock().take());
+    deactivate_dynamic_page_tables();
 
     Ok((slices.kernel.as_ptr() as usize, next_bcc))
 }
@@ -322,7 +313,7 @@
 }
 
 fn get_appended_data_slice() -> Result<&'static mut [u8], MemoryTrackerError> {
-    let range = MEMORY.lock().as_mut().unwrap().map_image_footer()?;
+    let range = map_image_footer()?;
     // SAFETY: This region was just mapped for the first time (as map_image_footer() didn't fail)
     // and the linker script prevents it from overlapping with other objects.
     Ok(unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) })
diff --git a/guest/pvmfw/src/main.rs b/guest/pvmfw/src/main.rs
index 612281b..bde03ff 100644
--- a/guest/pvmfw/src/main.rs
+++ b/guest/pvmfw/src/main.rs
@@ -55,7 +55,6 @@
 use vmbase::fdt::pci::{PciError, PciInfo};
 use vmbase::heap;
 use vmbase::memory::flush;
-use vmbase::memory::MEMORY;
 use vmbase::rand;
 use vmbase::virtio::pci;
 
@@ -101,7 +100,7 @@
     // Set up PCI bus for VirtIO devices.
     let pci_info = PciInfo::from_fdt(fdt).map_err(handle_pci_error)?;
     debug!("PCI: {:#x?}", pci_info);
-    let mut pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap()).map_err(|e| {
+    let mut pci_root = pci::initialize(pci_info).map_err(|e| {
         error!("Failed to initialize PCI: {e}");
         RebootReason::InternalError
     })?;
diff --git a/guest/pvmfw/src/memory.rs b/guest/pvmfw/src/memory.rs
index 64a6850..b54f014 100644
--- a/guest/pvmfw/src/memory.rs
+++ b/guest/pvmfw/src/memory.rs
@@ -23,14 +23,13 @@
 use core::ops::Range;
 use core::result;
 use core::slice;
-use hypervisor_backends::get_mem_sharer;
 use log::debug;
 use log::error;
 use log::info;
 use log::warn;
 use vmbase::{
     layout::{self, crosvm},
-    memory::{PageTable, MEMORY},
+    memory::{init_shared_pool, map_data, map_rodata, resize_available_memory, PageTable},
 };
 
 /// Region allocated for the stack.
@@ -75,13 +74,13 @@
         // TODO - Only map the FDT as read-only, until we modify it right before jump_to_payload()
         // e.g. by generating a DTBO for a template DT in main() and, on return, re-map DT as RW,
         // overwrite with the template DT and apply the DTBO.
-        let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, fdt_size).map_err(|e| {
+        map_data(fdt, fdt_size).map_err(|e| {
             error!("Failed to allocate the FDT range: {e}");
             RebootReason::InternalError
         })?;
 
-        // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
-        let fdt = unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) };
+        // SAFETY: map_data validated the range to be in main memory, mapped, and not overlap.
+        let fdt = unsafe { slice::from_raw_parts_mut(fdt as *mut u8, fdt_size.into()) };
 
         let info = fdt::sanitize_device_tree(fdt, vm_dtbo, vm_ref_dt)?;
         let fdt = libfdt::Fdt::from_mut_slice(fdt).map_err(|e| {
@@ -92,67 +91,56 @@
 
         let memory_range = info.memory_range;
         debug!("Resizing MemoryTracker to range {memory_range:#x?}");
-        MEMORY.lock().as_mut().unwrap().shrink(&memory_range).map_err(|e| {
+        resize_available_memory(&memory_range).map_err(|e| {
             error!("Failed to use memory range value from DT: {memory_range:#x?}: {e}");
             RebootReason::InvalidFdt
         })?;
 
-        if let Some(mem_sharer) = get_mem_sharer() {
-            let granule = mem_sharer.granule().map_err(|e| {
-                error!("Failed to get memory protection granule: {e}");
-                RebootReason::InternalError
-            })?;
-            MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
-                error!("Failed to initialize dynamically shared pool: {e}");
-                RebootReason::InternalError
-            })?;
-        } else {
-            let range = info.swiotlb_info.fixed_range().ok_or_else(|| {
-                error!("Pre-shared pool range not specified in swiotlb node");
-                RebootReason::InvalidFdt
-            })?;
+        init_shared_pool(info.swiotlb_info.fixed_range()).map_err(|e| {
+            error!("Failed to initialize shared pool: {e}");
+            RebootReason::InternalError
+        })?;
 
-            MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).map_err(|e| {
-                error!("Failed to initialize pre-shared pool {e}");
-                RebootReason::InvalidFdt
-            })?;
-        }
-
-        let kernel_range = if let Some(r) = info.kernel_range {
-            MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
-                error!("Failed to obtain the kernel range with DT range: {e}");
+        let (kernel_start, kernel_size) = if let Some(r) = info.kernel_range {
+            let size = r.len().try_into().map_err(|_| {
+                error!("Invalid kernel size: {:#x}", r.len());
                 RebootReason::InternalError
-            })?
+            })?;
+            (r.start, size)
         } else if cfg!(feature = "legacy") {
             warn!("Failed to find the kernel range in the DT; falling back to legacy ABI");
-
-            let kernel_size = NonZeroUsize::new(kernel_size).ok_or_else(|| {
+            let size = NonZeroUsize::new(kernel_size).ok_or_else(|| {
                 error!("Invalid kernel size: {kernel_size:#x}");
                 RebootReason::InvalidPayload
             })?;
-
-            MEMORY.lock().as_mut().unwrap().alloc(kernel, kernel_size).map_err(|e| {
-                error!("Failed to obtain the kernel range with legacy range: {e}");
-                RebootReason::InternalError
-            })?
+            (kernel, size)
         } else {
             error!("Failed to locate the kernel from the DT");
             return Err(RebootReason::InvalidPayload);
         };
 
-        let kernel = kernel_range.start as *const u8;
-        // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
-        let kernel = unsafe { slice::from_raw_parts(kernel, kernel_range.len()) };
+        map_rodata(kernel_start, kernel_size).map_err(|e| {
+            error!("Failed to map kernel range: {e}");
+            RebootReason::InternalError
+        })?;
+
+        let kernel = kernel_start as *const u8;
+        // SAFETY: map_rodata validated the range to be in main memory, mapped, and not overlap.
+        let kernel = unsafe { slice::from_raw_parts(kernel, kernel_size.into()) };
 
         let ramdisk = if let Some(r) = info.initrd_range {
             debug!("Located ramdisk at {r:?}");
-            let r = MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
+            let ramdisk_size = r.len().try_into().map_err(|_| {
+                error!("Invalid ramdisk size: {:#x}", r.len());
+                RebootReason::InvalidRamdisk
+            })?;
+            map_rodata(r.start, ramdisk_size).map_err(|e| {
                 error!("Failed to obtain the initrd range: {e}");
                 RebootReason::InvalidRamdisk
             })?;
 
-            // SAFETY: The region was validated by memory to be in main memory, mapped, and
-            // not overlap.
+            // SAFETY: map_rodata validated the range to be in main memory, mapped, and not
+            // overlap.
             Some(unsafe { slice::from_raw_parts(r.start as *const u8, r.len()) })
         } else {
             info!("Couldn't locate the ramdisk from the device tree");
diff --git a/guest/rialto/src/main.rs b/guest/rialto/src/main.rs
index 456af7f..8095a1f 100644
--- a/guest/rialto/src/main.rs
+++ b/guest/rialto/src/main.rs
@@ -32,8 +32,6 @@
 use core::num::NonZeroUsize;
 use core::slice;
 use diced_open_dice::{bcc_handover_parse, DiceArtifacts};
-use hypervisor_backends::get_mem_sharer;
-use libfdt::FdtError;
 use log::{debug, error, info};
 use service_vm_comm::{ServiceVmRequest, VmType};
 use service_vm_fake_chain::service_vm;
@@ -50,7 +48,10 @@
     generate_image_header,
     layout::{self, crosvm},
     main,
-    memory::{MemoryTracker, PageTable, MEMORY, PAGE_SIZE, SIZE_128KB},
+    memory::{
+        init_shared_pool, map_rodata, map_rodata_outside_main_memory, resize_available_memory,
+        switch_to_dynamic_page_tables, PageTable, PAGE_SIZE, SIZE_128KB,
+    },
     power::reboot,
     virtio::{
         pci::{self, PciTransportIterator, VirtIOSocket},
@@ -91,60 +92,40 @@
     info!("Welcome to Rialto!");
     let page_table = new_page_table()?;
 
-    MEMORY.lock().replace(MemoryTracker::new(
-        page_table,
-        crosvm::MEM_START..layout::MAX_VIRT_ADDR,
-        crosvm::MMIO_RANGE,
-    ));
+    switch_to_dynamic_page_tables(page_table);
 
-    let fdt_range = MEMORY
-        .lock()
-        .as_mut()
-        .unwrap()
-        .alloc(fdt_addr, NonZeroUsize::new(crosvm::FDT_MAX_SIZE).unwrap())?;
+    let fdt_size = NonZeroUsize::new(crosvm::FDT_MAX_SIZE).unwrap();
+    map_rodata(fdt_addr, fdt_size)?;
     // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
-    let fdt = unsafe { slice::from_raw_parts(fdt_range.start as *mut u8, fdt_range.len()) };
+    let fdt = unsafe { slice::from_raw_parts(fdt_addr as *mut u8, fdt_size.into()) };
     // We do not need to validate the DT since it is already validated in pvmfw.
     let fdt = libfdt::Fdt::from_slice(fdt)?;
 
     let memory_range = fdt.first_memory_range()?;
-    MEMORY.lock().as_mut().unwrap().shrink(&memory_range).inspect_err(|_| {
+    resize_available_memory(&memory_range).inspect_err(|_| {
         error!("Failed to use memory range value from DT: {memory_range:#x?}");
     })?;
 
-    if let Some(mem_sharer) = get_mem_sharer() {
-        let granule = mem_sharer.granule()?;
-        MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).inspect_err(|_| {
-            error!("Failed to initialize dynamically shared pool.");
-        })?;
-    } else if let Ok(Some(swiotlb_info)) = SwiotlbInfo::new_from_fdt(fdt) {
-        let range = swiotlb_info.fixed_range().ok_or_else(|| {
-            error!("Pre-shared pool range not specified in swiotlb node");
-            Error::from(FdtError::BadValue)
-        })?;
-        MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).inspect_err(|_| {
-            error!("Failed to initialize pre-shared pool.");
-        })?;
-    } else {
-        info!("No MEM_SHARE capability detected or swiotlb found: allocating buffers from heap.");
-        MEMORY.lock().as_mut().unwrap().init_heap_shared_pool().inspect_err(|_| {
-            error!("Failed to initialize heap-based pseudo-shared pool.");
-        })?;
-    }
+    let swiotlb_range = SwiotlbInfo::new_from_fdt(fdt)
+        .inspect_err(|_| {
+            error!("Rialto failed when access swiotlb");
+        })?
+        .and_then(|info| info.fixed_range());
+    init_shared_pool(swiotlb_range).inspect_err(|_| {
+        error!("Failed to initialize shared pool.");
+    })?;
 
     let bcc_handover: Box<dyn DiceArtifacts> = match vm_type(fdt)? {
         VmType::ProtectedVm => {
             let dice_range = read_dice_range_from(fdt)?;
             info!("DICE range: {dice_range:#x?}");
-            // SAFETY: This region was written by pvmfw in its writable_data region. The region
-            // has no overlap with the main memory region and is safe to be mapped as read-only
-            // data.
-            let res = unsafe {
-                MEMORY.lock().as_mut().unwrap().alloc_range_outside_main_memory(&dice_range)
-            };
-            res.inspect_err(|_| {
-                error!("Failed to use DICE range from DT: {dice_range:#x?}");
-            })?;
+            let dice_size = dice_range.len().try_into().unwrap();
+            // SAFETY: The DICE memory region has been generated by pvmfw and doesn't overlap.
+            unsafe { map_rodata_outside_main_memory(dice_range.start, dice_size) }.inspect_err(
+                |_| {
+                    error!("Failed to use DICE range from DT: {dice_range:#x?}");
+                },
+            )?;
             let dice_start = dice_range.start as *const u8;
             // SAFETY: There's no memory overlap and the region is mapped as read-only data.
             let bcc_handover = unsafe { slice::from_raw_parts(dice_start, dice_range.len()) };
@@ -157,8 +138,7 @@
 
     let pci_info = PciInfo::from_fdt(fdt)?;
     debug!("PCI: {pci_info:#x?}");
-    let mut pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap())
-        .map_err(Error::PciInitializationFailed)?;
+    let mut pci_root = pci::initialize(pci_info).map_err(Error::PciInitializationFailed)?;
     debug!("PCI root: {pci_root:#x?}");
     let socket_device = find_socket_device::<HalImpl>(&mut pci_root)?;
     debug!("Found socket device: guest cid = {:?}", socket_device.guest_cid());
diff --git a/libs/libvmbase/src/memory.rs b/libs/libvmbase/src/memory.rs
index e0ea207..145f766 100644
--- a/libs/libvmbase/src/memory.rs
+++ b/libs/libvmbase/src/memory.rs
@@ -24,11 +24,16 @@
 pub use error::MemoryTrackerError;
 pub use page_table::PageTable;
 pub use shared::MemoryRange;
-pub use tracker::{MemoryTracker, MEMORY};
+pub use tracker::{
+    deactivate_dynamic_page_tables, init_shared_pool, map_data, map_device, map_image_footer,
+    map_rodata, map_rodata_outside_main_memory, resize_available_memory,
+    switch_to_dynamic_page_tables, unshare_all_memory, unshare_all_mmio_except_uart, unshare_uart,
+};
 pub use util::{
     flush, flushed_zeroize, page_4kb_of, PAGE_SIZE, SIZE_128KB, SIZE_16KB, SIZE_2MB, SIZE_4KB,
     SIZE_4MB, SIZE_64KB,
 };
 
 pub(crate) use shared::{alloc_shared, dealloc_shared};
+pub(crate) use tracker::MEMORY;
 pub(crate) use util::{phys_to_virt, virt_to_phys};
diff --git a/libs/libvmbase/src/memory/error.rs b/libs/libvmbase/src/memory/error.rs
index 1d42a04..2c00518 100644
--- a/libs/libvmbase/src/memory/error.rs
+++ b/libs/libvmbase/src/memory/error.rs
@@ -21,6 +21,8 @@
 /// Errors for MemoryTracker operations.
 #[derive(Debug, Clone)]
 pub enum MemoryTrackerError {
+    /// MemoryTracker not configured or deactivated.
+    Unavailable,
     /// Tried to modify the memory base address.
     DifferentBaseAddress,
     /// Tried to shrink to a larger memory size.
@@ -60,6 +62,7 @@
 impl fmt::Display for MemoryTrackerError {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self {
+            Self::Unavailable => write!(f, "MemoryTracker is not available"),
             Self::DifferentBaseAddress => write!(f, "Received different base address"),
             Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
             Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
diff --git a/libs/libvmbase/src/memory/tracker.rs b/libs/libvmbase/src/memory/tracker.rs
index d699c4c..e75dc11 100644
--- a/libs/libvmbase/src/memory/tracker.rs
+++ b/libs/libvmbase/src/memory/tracker.rs
@@ -29,13 +29,13 @@
 use core::num::NonZeroUsize;
 use core::ops::Range;
 use core::result;
-use hypervisor_backends::get_mmio_guard;
-use log::{debug, error};
-use spin::mutex::SpinMutex;
+use hypervisor_backends::{get_mem_sharer, get_mmio_guard};
+use log::{debug, error, info};
+use spin::mutex::{SpinMutex, SpinMutexGuard};
 use tinyvec::ArrayVec;
 
 /// A global static variable representing the system memory tracker, protected by a spin mutex.
-pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
+pub(crate) static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
 
 fn get_va_range(range: &MemoryRange) -> VaRange {
     VaRange::new(range.start, range.end)
@@ -43,6 +43,141 @@
 
 type Result<T> = result::Result<T, MemoryTrackerError>;
 
+/// Attempts to lock `MEMORY`, returns an error if already deactivated.
+fn try_lock_memory_tracker() -> Result<SpinMutexGuard<'static, Option<MemoryTracker>>> {
+    // Being single-threaded, we only spin if `deactivate_dynamic_page_tables()` leaked the lock.
+    MEMORY.try_lock().ok_or(MemoryTrackerError::Unavailable)
+}
+
+/// Switch the MMU to the provided PageTable.
+///
+/// Panics if called more than once.
+pub fn switch_to_dynamic_page_tables(pt: PageTable) {
+    let mut locked_tracker = try_lock_memory_tracker().unwrap();
+    if locked_tracker.is_some() {
+        panic!("switch_to_dynamic_page_tables() called more than once.");
+    }
+
+    locked_tracker.replace(MemoryTracker::new(
+        pt,
+        layout::crosvm::MEM_START..layout::MAX_VIRT_ADDR,
+        layout::crosvm::MMIO_RANGE,
+    ));
+}
+
+/// Switch the MMU back to the static page tables (see `idmap` C symbol).
+///
+/// Panics if called before `switch_to_dynamic_page_tables()` or more than once.
+pub fn deactivate_dynamic_page_tables() {
+    let locked_tracker = try_lock_memory_tracker().unwrap();
+    // Force future calls to try_lock_memory_tracker() to fail by leaking this lock guard.
+    let leaked_tracker = SpinMutexGuard::leak(locked_tracker);
+    // Force deallocation/unsharing of all the resources used by the MemoryTracker.
+    drop(leaked_tracker.take())
+}
+
+/// Redefines the actual mappable range of memory.
+///
+/// Fails if a region has already been mapped beyond the new upper limit.
+pub fn resize_available_memory(memory_range: &Range<usize>) -> Result<()> {
+    let mut locked_tracker = try_lock_memory_tracker()?;
+    let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+    tracker.shrink(memory_range)
+}
+
+/// Initialize the memory pool for page sharing with the host.
+pub fn init_shared_pool(static_range: Option<Range<usize>>) -> Result<()> {
+    let mut locked_tracker = try_lock_memory_tracker()?;
+    let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+    if let Some(mem_sharer) = get_mem_sharer() {
+        let granule = mem_sharer.granule()?;
+        tracker.init_dynamic_shared_pool(granule)
+    } else if let Some(r) = static_range {
+        tracker.init_static_shared_pool(r)
+    } else {
+        info!("Initialized shared pool from heap memory without MEM_SHARE");
+        tracker.init_heap_shared_pool()
+    }
+}
+
+/// Unshare all MMIO that was previously shared with the host, with the exception of the UART page.
+pub fn unshare_all_mmio_except_uart() -> Result<()> {
+    let Ok(mut locked_tracker) = try_lock_memory_tracker() else { return Ok(()) };
+    let Some(tracker) = locked_tracker.as_mut() else { return Ok(()) };
+    if cfg!(feature = "compat_android_13") {
+        info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
+    }
+    tracker.unshare_all_mmio()
+}
+
+/// Unshare all memory that was previously shared with the host.
+pub fn unshare_all_memory() {
+    let Ok(mut locked_tracker) = try_lock_memory_tracker() else { return };
+    let Some(tracker) = locked_tracker.as_mut() else { return };
+    tracker.unshare_all_memory()
+}
+
+/// Unshare the UART page, previously shared with the host.
+pub fn unshare_uart() -> Result<()> {
+    let Some(mmio_guard) = get_mmio_guard() else { return Ok(()) };
+    Ok(mmio_guard.unmap(layout::UART_PAGE_ADDR)?)
+}
+
+/// Map the provided range as normal memory, with R/W permissions.
+///
+/// This fails if the range has already been (partially) mapped.
+pub fn map_data(addr: usize, size: NonZeroUsize) -> Result<()> {
+    let mut locked_tracker = try_lock_memory_tracker()?;
+    let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+    let _ = tracker.alloc_mut(addr, size)?;
+    Ok(())
+}
+
+/// Map the region potentially holding data appended to the image, with read-write permissions.
+///
+/// This fails if the footer has already been mapped.
+pub fn map_image_footer() -> Result<Range<usize>> {
+    let mut locked_tracker = try_lock_memory_tracker()?;
+    let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+    let range = tracker.map_image_footer()?;
+    Ok(range)
+}
+
+/// Map the provided range as normal memory, with read-only permissions.
+///
+/// This fails if the range has already been (partially) mapped.
+pub fn map_rodata(addr: usize, size: NonZeroUsize) -> Result<()> {
+    let mut locked_tracker = try_lock_memory_tracker()?;
+    let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+    let _ = tracker.alloc(addr, size)?;
+    Ok(())
+}
+
+// TODO(ptosi): Merge this into map_rodata.
+/// Map the provided range as normal memory, with read-only permissions.
+///
+/// # Safety
+///
+/// Callers of this method need to ensure that the `range` is valid for mapping as read-only data.
+pub unsafe fn map_rodata_outside_main_memory(addr: usize, size: NonZeroUsize) -> Result<()> {
+    let mut locked_tracker = try_lock_memory_tracker()?;
+    let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+    let end = addr + usize::from(size);
+    // SAFETY: Caller has checked that it is valid to map the range.
+    let _ = unsafe { tracker.alloc_range_outside_main_memory(&(addr..end)) }?;
+    Ok(())
+}
+
+/// Map the provided range as device memory.
+///
+/// This fails if the range has already been (partially) mapped.
+pub fn map_device(addr: usize, size: NonZeroUsize) -> Result<()> {
+    let mut locked_tracker = try_lock_memory_tracker()?;
+    let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+    let range = addr..(addr + usize::from(size));
+    tracker.map_mmio_range(range.clone())
+}
+
 #[derive(Clone, Copy, Debug, Default, PartialEq)]
 enum MemoryType {
     #[default]
@@ -57,7 +192,7 @@
 }
 
 /// Tracks non-overlapping slices of main memory.
-pub struct MemoryTracker {
+pub(crate) struct MemoryTracker {
     total: MemoryRange,
     page_table: PageTable,
     regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
@@ -72,7 +207,7 @@
     const MMIO_CAPACITY: usize = 5;
 
     /// Creates a new instance from an active page table, covering the maximum RAM size.
-    pub fn new(mut page_table: PageTable, total: MemoryRange, mmio_range: MemoryRange) -> Self {
+    fn new(mut page_table: PageTable, total: MemoryRange, mmio_range: MemoryRange) -> Self {
         assert!(
             !total.overlaps(&mmio_range),
             "MMIO space should not overlap with the main memory region."
@@ -103,7 +238,7 @@
     /// Resize the total RAM size.
     ///
     /// This function fails if it contains regions that are not included within the new size.
-    pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
+    fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
         if range.start != self.total.start {
             return Err(MemoryTrackerError::DifferentBaseAddress);
         }
@@ -119,7 +254,7 @@
     }
 
     /// Allocate the address range for a const slice; returns None if failed.
-    pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
+    fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
         self.check_allocatable(&region)?;
         self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
@@ -135,7 +270,7 @@
     ///
     /// Callers of this method need to ensure that the `range` is valid for mapping as read-only
     /// data.
-    pub unsafe fn alloc_range_outside_main_memory(
+    unsafe fn alloc_range_outside_main_memory(
         &mut self,
         range: &MemoryRange,
     ) -> Result<MemoryRange> {
@@ -149,7 +284,7 @@
     }
 
     /// Allocate the address range for a mutable slice; returns None if failed.
-    pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
+    fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
         self.check_allocatable(&region)?;
         self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
@@ -160,7 +295,7 @@
     }
 
     /// Maps the image footer read-write, with permissions.
-    pub fn map_image_footer(&mut self) -> Result<MemoryRange> {
+    fn map_image_footer(&mut self) -> Result<MemoryRange> {
         if self.image_footer_mapped {
             return Err(MemoryTrackerError::FooterAlreadyMapped);
         }
@@ -174,18 +309,18 @@
     }
 
     /// Allocate the address range for a const slice; returns None if failed.
-    pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
+    fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
         self.alloc_range(&(base..(base + size.get())))
     }
 
     /// Allocate the address range for a mutable slice; returns None if failed.
-    pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
+    fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
         self.alloc_range_mut(&(base..(base + size.get())))
     }
 
     /// Checks that the given range of addresses is within the MMIO region, and then maps it
     /// appropriately.
-    pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
+    fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
         if !range.is_within(&self.mmio_range) {
             return Err(MemoryTrackerError::OutOfRange);
         }
@@ -247,14 +382,14 @@
     }
 
     /// Unshares any MMIO region previously shared with the MMIO guard.
-    pub fn unshare_all_mmio(&mut self) -> Result<()> {
+    fn unshare_all_mmio(&mut self) -> Result<()> {
         self.mmio_sharer.unshare_all();
 
         Ok(())
     }
 
     /// Initialize the shared heap to dynamically share memory from the global allocator.
-    pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
+    fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
         const INIT_CAP: usize = 10;
 
         let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
@@ -276,7 +411,7 @@
     /// of guest memory as "shared" ahead of guest starting its execution. The
     /// shared memory region is indicated in swiotlb node. On such platforms use
     /// a separate heap to allocate buffers that can be shared with host.
-    pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
+    fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
         let size = NonZeroUsize::new(range.len()).unwrap();
         let range = self.alloc_mut(range.start, size)?;
         let shared_pool = LockedFrameAllocator::<32>::new();
@@ -295,7 +430,7 @@
     /// When running on "non-protected" hypervisors which permit host direct accesses to guest
     /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
     /// dedicated region so this function instructs the shared pool to use the global allocator.
-    pub fn init_heap_shared_pool(&mut self) -> Result<()> {
+    fn init_heap_shared_pool(&mut self) -> Result<()> {
         // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
         // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
         // without any actual "dynamic memory sharing" taking place and, as such, the granule may
diff --git a/libs/libvmbase/src/virtio/pci.rs b/libs/libvmbase/src/virtio/pci.rs
index 72e648b..ec89b6b 100644
--- a/libs/libvmbase/src/virtio/pci.rs
+++ b/libs/libvmbase/src/virtio/pci.rs
@@ -16,7 +16,7 @@
 
 use crate::{
     fdt::pci::PciInfo,
-    memory::{MemoryTracker, MemoryTrackerError},
+    memory::{map_device, MemoryTrackerError},
 };
 use alloc::boxed::Box;
 use core::fmt;
@@ -65,16 +65,19 @@
 /// 2. Stores the `PciInfo` for the VirtIO HAL to use later.
 /// 3. Creates and returns a `PciRoot`.
 ///
-/// This must only be called once; it will panic if it is called a second time.
-pub fn initialize(pci_info: PciInfo, memory: &mut MemoryTracker) -> Result<PciRoot, PciError> {
+/// This must only be called once and after having switched to the dynamic page tables.
+pub fn initialize(pci_info: PciInfo) -> Result<PciRoot, PciError> {
     PCI_INFO.set(Box::new(pci_info.clone())).map_err(|_| PciError::DuplicateInitialization)?;
 
-    memory.map_mmio_range(pci_info.cam_range.clone()).map_err(PciError::CamMapFailed)?;
-    let bar_range = pci_info.bar_range.start as usize..pci_info.bar_range.end as usize;
-    memory.map_mmio_range(bar_range).map_err(PciError::BarMapFailed)?;
+    let cam_start = pci_info.cam_range.start;
+    let cam_size = pci_info.cam_range.len().try_into().unwrap();
+    map_device(cam_start, cam_size).map_err(PciError::CamMapFailed)?;
 
-    // Safety: This is the only place where we call make_pci_root, and `PCI_INFO.set` above will
-    // panic if it is called a second time.
+    let bar_start = pci_info.bar_range.start.try_into().unwrap();
+    let bar_size = pci_info.bar_range.len().try_into().unwrap();
+    map_device(bar_start, bar_size).map_err(PciError::BarMapFailed)?;
+
+    // SAFETY: This is the only place where we call make_pci_root, validated by `PCI_INFO.set`.
     Ok(unsafe { pci_info.make_pci_root() })
 }