vmbase: Introduce mem API & turn MEMORY private

Stop allowing clients from accessing MEMORY directly, which will enable
limiting the refactoring of memory management to libvmbase only. As a
result, change the visibility of MemoryTracker and MEMORY to pub(crate).

Expose the functionality currently needed by client as individual
functions, that can be re-used between Rialto and pvmfw, de-duping some
code. Again, this prepares the code for an in-vmbase only refactoring.
Note that some of those functions will eventually be integrated into
libvmbase's rust_entry(), simplifying clients.

Where touching client code using aarch64_paging::VirtualAddress, make
use of usize so that we make progress towards limiting the use of that
crate to the aarch64-specific subset of vmbase only, for portability.

Bug: 377276983
Test: m {pvmfw,rialto,vmbase_example_{bios,kernel}}_bin
Test: atest rialto_test vmbase_example.integration_test
Change-Id: Ic510dba126200d61ad3691dce415193a0055ef8e
diff --git a/guest/pvmfw/src/entry.rs b/guest/pvmfw/src/entry.rs
index e55d4ca..0607a5a 100644
--- a/guest/pvmfw/src/entry.rs
+++ b/guest/pvmfw/src/entry.rs
@@ -17,21 +17,21 @@
 use crate::config;
 use crate::memory;
 use core::arch::asm;
-use core::mem::{drop, size_of};
+use core::mem::size_of;
 use core::ops::Range;
 use core::slice;
-use hypervisor_backends::get_mmio_guard;
 use log::error;
-use log::info;
 use log::warn;
 use log::LevelFilter;
 use vmbase::util::RangeExt as _;
 use vmbase::{
     arch::aarch64::min_dcache_line_size,
-    configure_heap, console_writeln,
-    layout::{self, crosvm, UART_PAGE_ADDR},
-    main,
-    memory::{MemoryTracker, MemoryTrackerError, MEMORY, SIZE_128KB, SIZE_4KB},
+    configure_heap, console_writeln, layout, main,
+    memory::{
+        deactivate_dynamic_page_tables, map_image_footer, switch_to_dynamic_page_tables,
+        unshare_all_memory, unshare_all_mmio_except_uart, unshare_uart, MemoryTrackerError,
+        SIZE_128KB, SIZE_4KB,
+    },
     power::reboot,
 };
 use zeroize::Zeroize;
@@ -112,13 +112,8 @@
         error!("Failed to set up the dynamic page tables: {e}");
         RebootReason::InternalError
     })?;
-
     // Up to this point, we were using the built-in static (from .rodata) page tables.
-    MEMORY.lock().replace(MemoryTracker::new(
-        page_table,
-        crosvm::MEM_START..layout::MAX_VIRT_ADDR,
-        crosvm::MMIO_RANGE,
-    ));
+    switch_to_dynamic_page_tables(page_table);
 
     let appended_data = get_appended_data_slice().map_err(|e| {
         error!("Failed to map the appended data: {e}");
@@ -152,27 +147,23 @@
     // Writable-dirty regions will be flushed when MemoryTracker is dropped.
     config_entries.bcc.zeroize();
 
-    info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
-    MEMORY.lock().as_mut().unwrap().unshare_all_mmio().map_err(|e| {
+    unshare_all_mmio_except_uart().map_err(|e| {
         error!("Failed to unshare MMIO ranges: {e}");
         RebootReason::InternalError
     })?;
     // Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
-    MEMORY.lock().as_mut().unwrap().unshare_all_memory();
+    unshare_all_memory();
 
-    if let Some(mmio_guard) = get_mmio_guard() {
-        if cfg!(debuggable_vms_improvements) && debuggable_payload {
-            // Keep UART MMIO_GUARD-ed for debuggable payloads, to enable earlycon.
-        } else {
-            mmio_guard.unmap(UART_PAGE_ADDR).map_err(|e| {
-                error!("Failed to unshare the UART: {e}");
-                RebootReason::InternalError
-            })?;
-        }
+    if cfg!(debuggable_vms_improvements) && debuggable_payload {
+        // Keep UART MMIO_GUARD-ed for debuggable payloads, to enable earlycon.
+    } else {
+        unshare_uart().map_err(|e| {
+            error!("Failed to unshare the UART: {e}");
+            RebootReason::InternalError
+        })?;
     }
 
-    // Drop MemoryTracker and deactivate page table.
-    drop(MEMORY.lock().take());
+    deactivate_dynamic_page_tables();
 
     Ok((slices.kernel.as_ptr() as usize, next_bcc))
 }
@@ -322,7 +313,7 @@
 }
 
 fn get_appended_data_slice() -> Result<&'static mut [u8], MemoryTrackerError> {
-    let range = MEMORY.lock().as_mut().unwrap().map_image_footer()?;
+    let range = map_image_footer()?;
     // SAFETY: This region was just mapped for the first time (as map_image_footer() didn't fail)
     // and the linker script prevents it from overlapping with other objects.
     Ok(unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) })