vmbase: Clean up MMIO_GUARD in MemoryTracker

Ensure that we panic if the share failed, to avoid having a valid S1
mapping for an invalid S2 mapping.

Rename mmio_unmap_all() to unshare_all_mmio() as it doesn't actually
unmap from S1 and to mimic unshare_all_memory().

Detangle a confusion between PAGE_SIZE and MMIO_GUARD_GRANULE_SIZE.

Introduce map_lazy_mmio_as_valid() for encapsulation.

Test: m libpvmfw libvmbase_example librialto
Change-Id: I2e3573267c062fd18f0240665e14f74424d2632f
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 61ea65b..43822a5 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -249,7 +249,7 @@
     config_entries.bcc.zeroize();
 
     info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
-    MEMORY.lock().as_mut().unwrap().mmio_unmap_all().map_err(|e| {
+    MEMORY.lock().as_mut().unwrap().unshare_all_mmio().map_err(|e| {
         error!("Failed to unshare MMIO ranges: {e}");
         RebootReason::InternalError
     })?;
diff --git a/vmbase/Android.bp b/vmbase/Android.bp
index 17f4bda..f01e8aa 100644
--- a/vmbase/Android.bp
+++ b/vmbase/Android.bp
@@ -84,6 +84,7 @@
         "libonce_cell_nostd",
         "libsmccc",
         "libspin_nostd",
+        "libstatic_assertions",
         "libtinyvec_nostd",
         "libuuid_nostd",
         "libvirtio_drivers",
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 8fda7a6..6e70e6a 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -39,6 +39,7 @@
 use log::{debug, error, trace};
 use once_cell::race::OnceBox;
 use spin::mutex::SpinMutex;
+use static_assertions::const_assert_eq;
 use tinyvec::ArrayVec;
 
 /// A global static variable representing the system memory tracker, protected by a spin mutex.
@@ -248,10 +249,8 @@
         Ok(self.regions.last().unwrap().range.clone())
     }
 
-    /// Unmaps all tracked MMIO regions from the MMIO guard.
-    ///
-    /// Note that they are not unmapped from the page table.
-    pub fn mmio_unmap_all(&mut self) -> Result<()> {
+    /// Unshares any MMIO region previously shared with the MMIO guard.
+    pub fn unshare_all_mmio(&mut self) -> Result<()> {
         if get_mmio_guard().is_some() {
             for range in &self.mmio_regions {
                 self.page_table
@@ -322,13 +321,25 @@
     fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
         let page_start = VirtualAddress(page_4kb_of(addr.0));
         assert_eq!(page_start.0 % MMIO_GUARD_GRANULE_SIZE, 0);
-        let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
+        const_assert_eq!(MMIO_GUARD_GRANULE_SIZE, PAGE_SIZE); // For good measure.
+        let page_range: VaRange = (page_start..page_start + PAGE_SIZE).into();
+
         let mmio_guard = get_mmio_guard().unwrap();
+        mmio_guard.map(page_start.0)?;
+        self.map_lazy_mmio_as_valid(&page_range)?;
+
+        Ok(())
+    }
+
+    /// Modify the PTEs corresponding to a given range from (invalid) "lazy MMIO" to valid MMIO.
+    ///
+    /// Returns an error if any PTE in the range is not an invalid lazy MMIO mapping.
+    fn map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()> {
         // This must be safe and free from break-before-make (BBM) violations, given that the
         // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
         // created inside the mapping has the same size and alignment.
         self.page_table
-            .modify_range(&page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
+            .modify_range(page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
                 let flags = desc.flags().expect("Unsupported PTE flags set");
                 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
                     desc.modify_flags(Attributes::VALID, Attributes::empty());
@@ -337,8 +348,7 @@
                     Err(())
                 }
             })
-            .map_err(|_| MemoryTrackerError::InvalidPte)?;
-        Ok(mmio_guard.map(page_start.0)?)
+            .map_err(|_| MemoryTrackerError::InvalidPte)
     }
 
     /// Flush all memory regions marked as writable-dirty.