vmbase: Introduce mem API & turn MEMORY private

Stop allowing clients from accessing MEMORY directly, which will enable
limiting the refactoring of memory management to libvmbase only. As a
result, change the visibility of MemoryTracker and MEMORY to pub(crate).

Expose the functionality currently needed by client as individual
functions, that can be re-used between Rialto and pvmfw, de-duping some
code. Again, this prepares the code for an in-vmbase only refactoring.
Note that some of those functions will eventually be integrated into
libvmbase's rust_entry(), simplifying clients.

Where touching client code using aarch64_paging::VirtualAddress, make
use of usize so that we make progress towards limiting the use of that
crate to the aarch64-specific subset of vmbase only, for portability.

Bug: 377276983
Test: m {pvmfw,rialto,vmbase_example_{bios,kernel}}_bin
Test: atest rialto_test vmbase_example.integration_test
Change-Id: Ic510dba126200d61ad3691dce415193a0055ef8e
diff --git a/guest/pvmfw/src/entry.rs b/guest/pvmfw/src/entry.rs
index e55d4ca..0607a5a 100644
--- a/guest/pvmfw/src/entry.rs
+++ b/guest/pvmfw/src/entry.rs
@@ -17,21 +17,21 @@
 use crate::config;
 use crate::memory;
 use core::arch::asm;
-use core::mem::{drop, size_of};
+use core::mem::size_of;
 use core::ops::Range;
 use core::slice;
-use hypervisor_backends::get_mmio_guard;
 use log::error;
-use log::info;
 use log::warn;
 use log::LevelFilter;
 use vmbase::util::RangeExt as _;
 use vmbase::{
     arch::aarch64::min_dcache_line_size,
-    configure_heap, console_writeln,
-    layout::{self, crosvm, UART_PAGE_ADDR},
-    main,
-    memory::{MemoryTracker, MemoryTrackerError, MEMORY, SIZE_128KB, SIZE_4KB},
+    configure_heap, console_writeln, layout, main,
+    memory::{
+        deactivate_dynamic_page_tables, map_image_footer, switch_to_dynamic_page_tables,
+        unshare_all_memory, unshare_all_mmio_except_uart, unshare_uart, MemoryTrackerError,
+        SIZE_128KB, SIZE_4KB,
+    },
     power::reboot,
 };
 use zeroize::Zeroize;
@@ -112,13 +112,8 @@
         error!("Failed to set up the dynamic page tables: {e}");
         RebootReason::InternalError
     })?;
-
     // Up to this point, we were using the built-in static (from .rodata) page tables.
-    MEMORY.lock().replace(MemoryTracker::new(
-        page_table,
-        crosvm::MEM_START..layout::MAX_VIRT_ADDR,
-        crosvm::MMIO_RANGE,
-    ));
+    switch_to_dynamic_page_tables(page_table);
 
     let appended_data = get_appended_data_slice().map_err(|e| {
         error!("Failed to map the appended data: {e}");
@@ -152,27 +147,23 @@
     // Writable-dirty regions will be flushed when MemoryTracker is dropped.
     config_entries.bcc.zeroize();
 
-    info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
-    MEMORY.lock().as_mut().unwrap().unshare_all_mmio().map_err(|e| {
+    unshare_all_mmio_except_uart().map_err(|e| {
         error!("Failed to unshare MMIO ranges: {e}");
         RebootReason::InternalError
     })?;
     // Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
-    MEMORY.lock().as_mut().unwrap().unshare_all_memory();
+    unshare_all_memory();
 
-    if let Some(mmio_guard) = get_mmio_guard() {
-        if cfg!(debuggable_vms_improvements) && debuggable_payload {
-            // Keep UART MMIO_GUARD-ed for debuggable payloads, to enable earlycon.
-        } else {
-            mmio_guard.unmap(UART_PAGE_ADDR).map_err(|e| {
-                error!("Failed to unshare the UART: {e}");
-                RebootReason::InternalError
-            })?;
-        }
+    if cfg!(debuggable_vms_improvements) && debuggable_payload {
+        // Keep UART MMIO_GUARD-ed for debuggable payloads, to enable earlycon.
+    } else {
+        unshare_uart().map_err(|e| {
+            error!("Failed to unshare the UART: {e}");
+            RebootReason::InternalError
+        })?;
     }
 
-    // Drop MemoryTracker and deactivate page table.
-    drop(MEMORY.lock().take());
+    deactivate_dynamic_page_tables();
 
     Ok((slices.kernel.as_ptr() as usize, next_bcc))
 }
@@ -322,7 +313,7 @@
 }
 
 fn get_appended_data_slice() -> Result<&'static mut [u8], MemoryTrackerError> {
-    let range = MEMORY.lock().as_mut().unwrap().map_image_footer()?;
+    let range = map_image_footer()?;
     // SAFETY: This region was just mapped for the first time (as map_image_footer() didn't fail)
     // and the linker script prevents it from overlapping with other objects.
     Ok(unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) })
diff --git a/guest/pvmfw/src/main.rs b/guest/pvmfw/src/main.rs
index 612281b..bde03ff 100644
--- a/guest/pvmfw/src/main.rs
+++ b/guest/pvmfw/src/main.rs
@@ -55,7 +55,6 @@
 use vmbase::fdt::pci::{PciError, PciInfo};
 use vmbase::heap;
 use vmbase::memory::flush;
-use vmbase::memory::MEMORY;
 use vmbase::rand;
 use vmbase::virtio::pci;
 
@@ -101,7 +100,7 @@
     // Set up PCI bus for VirtIO devices.
     let pci_info = PciInfo::from_fdt(fdt).map_err(handle_pci_error)?;
     debug!("PCI: {:#x?}", pci_info);
-    let mut pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap()).map_err(|e| {
+    let mut pci_root = pci::initialize(pci_info).map_err(|e| {
         error!("Failed to initialize PCI: {e}");
         RebootReason::InternalError
     })?;
diff --git a/guest/pvmfw/src/memory.rs b/guest/pvmfw/src/memory.rs
index 64a6850..b54f014 100644
--- a/guest/pvmfw/src/memory.rs
+++ b/guest/pvmfw/src/memory.rs
@@ -23,14 +23,13 @@
 use core::ops::Range;
 use core::result;
 use core::slice;
-use hypervisor_backends::get_mem_sharer;
 use log::debug;
 use log::error;
 use log::info;
 use log::warn;
 use vmbase::{
     layout::{self, crosvm},
-    memory::{PageTable, MEMORY},
+    memory::{init_shared_pool, map_data, map_rodata, resize_available_memory, PageTable},
 };
 
 /// Region allocated for the stack.
@@ -75,13 +74,13 @@
         // TODO - Only map the FDT as read-only, until we modify it right before jump_to_payload()
         // e.g. by generating a DTBO for a template DT in main() and, on return, re-map DT as RW,
         // overwrite with the template DT and apply the DTBO.
-        let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, fdt_size).map_err(|e| {
+        map_data(fdt, fdt_size).map_err(|e| {
             error!("Failed to allocate the FDT range: {e}");
             RebootReason::InternalError
         })?;
 
-        // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
-        let fdt = unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) };
+        // SAFETY: map_data validated the range to be in main memory, mapped, and not overlap.
+        let fdt = unsafe { slice::from_raw_parts_mut(fdt as *mut u8, fdt_size.into()) };
 
         let info = fdt::sanitize_device_tree(fdt, vm_dtbo, vm_ref_dt)?;
         let fdt = libfdt::Fdt::from_mut_slice(fdt).map_err(|e| {
@@ -92,67 +91,56 @@
 
         let memory_range = info.memory_range;
         debug!("Resizing MemoryTracker to range {memory_range:#x?}");
-        MEMORY.lock().as_mut().unwrap().shrink(&memory_range).map_err(|e| {
+        resize_available_memory(&memory_range).map_err(|e| {
             error!("Failed to use memory range value from DT: {memory_range:#x?}: {e}");
             RebootReason::InvalidFdt
         })?;
 
-        if let Some(mem_sharer) = get_mem_sharer() {
-            let granule = mem_sharer.granule().map_err(|e| {
-                error!("Failed to get memory protection granule: {e}");
-                RebootReason::InternalError
-            })?;
-            MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
-                error!("Failed to initialize dynamically shared pool: {e}");
-                RebootReason::InternalError
-            })?;
-        } else {
-            let range = info.swiotlb_info.fixed_range().ok_or_else(|| {
-                error!("Pre-shared pool range not specified in swiotlb node");
-                RebootReason::InvalidFdt
-            })?;
+        init_shared_pool(info.swiotlb_info.fixed_range()).map_err(|e| {
+            error!("Failed to initialize shared pool: {e}");
+            RebootReason::InternalError
+        })?;
 
-            MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).map_err(|e| {
-                error!("Failed to initialize pre-shared pool {e}");
-                RebootReason::InvalidFdt
-            })?;
-        }
-
-        let kernel_range = if let Some(r) = info.kernel_range {
-            MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
-                error!("Failed to obtain the kernel range with DT range: {e}");
+        let (kernel_start, kernel_size) = if let Some(r) = info.kernel_range {
+            let size = r.len().try_into().map_err(|_| {
+                error!("Invalid kernel size: {:#x}", r.len());
                 RebootReason::InternalError
-            })?
+            })?;
+            (r.start, size)
         } else if cfg!(feature = "legacy") {
             warn!("Failed to find the kernel range in the DT; falling back to legacy ABI");
-
-            let kernel_size = NonZeroUsize::new(kernel_size).ok_or_else(|| {
+            let size = NonZeroUsize::new(kernel_size).ok_or_else(|| {
                 error!("Invalid kernel size: {kernel_size:#x}");
                 RebootReason::InvalidPayload
             })?;
-
-            MEMORY.lock().as_mut().unwrap().alloc(kernel, kernel_size).map_err(|e| {
-                error!("Failed to obtain the kernel range with legacy range: {e}");
-                RebootReason::InternalError
-            })?
+            (kernel, size)
         } else {
             error!("Failed to locate the kernel from the DT");
             return Err(RebootReason::InvalidPayload);
         };
 
-        let kernel = kernel_range.start as *const u8;
-        // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
-        let kernel = unsafe { slice::from_raw_parts(kernel, kernel_range.len()) };
+        map_rodata(kernel_start, kernel_size).map_err(|e| {
+            error!("Failed to map kernel range: {e}");
+            RebootReason::InternalError
+        })?;
+
+        let kernel = kernel_start as *const u8;
+        // SAFETY: map_rodata validated the range to be in main memory, mapped, and not overlap.
+        let kernel = unsafe { slice::from_raw_parts(kernel, kernel_size.into()) };
 
         let ramdisk = if let Some(r) = info.initrd_range {
             debug!("Located ramdisk at {r:?}");
-            let r = MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
+            let ramdisk_size = r.len().try_into().map_err(|_| {
+                error!("Invalid ramdisk size: {:#x}", r.len());
+                RebootReason::InvalidRamdisk
+            })?;
+            map_rodata(r.start, ramdisk_size).map_err(|e| {
                 error!("Failed to obtain the initrd range: {e}");
                 RebootReason::InvalidRamdisk
             })?;
 
-            // SAFETY: The region was validated by memory to be in main memory, mapped, and
-            // not overlap.
+            // SAFETY: map_rodata validated the range to be in main memory, mapped, and not
+            // overlap.
             Some(unsafe { slice::from_raw_parts(r.start as *const u8, r.len()) })
         } else {
             info!("Couldn't locate the ramdisk from the device tree");