Simplify lazy MMIO on-demand mapping

Now that modify_range() will split block mappings if the provided VA
range is not aligned to the block size, we can update the descriptors
directly rather than going via map_range() to recreate a page level
mapping when taking a fault on a lazy MMIO region that may be mapped
using block descriptors.

Test: build tested only
Change-Id: Ib8f99dbab3c631ff62b03e91f15558bfef8b6978
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 6c8a844..37bcf2d 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -319,14 +319,24 @@
     /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
     fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
         let page_start = VirtualAddress(page_4kb_of(addr.0));
+        assert_eq!(page_start.0 % MMIO_GUARD_GRANULE_SIZE, 0);
         let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
         let mmio_guard = get_mmio_guard().unwrap();
+        // This must be safe and free from break-before-make (BBM) violations, given that the
+        // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
+        // created inside the mapping has the same size and alignment.
         self.page_table
-            .modify_range(&page_range, &verify_lazy_mapped_block)
+            .modify_range(&page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
+                let flags = desc.flags().expect("Unsupported PTE flags set");
+                if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
+                    desc.modify_flags(Attributes::VALID, Attributes::empty());
+                    Ok(())
+                } else {
+                    Err(())
+                }
+            })
             .map_err(|_| MemoryTrackerError::InvalidPte)?;
-        mmio_guard.map(page_start.0)?;
-        // Maps a single device page, breaking up block mappings if necessary.
-        self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
+        Ok(mmio_guard.map(page_start.0)?)
     }
 
     /// Flush all memory regions marked as writable-dirty.
@@ -467,23 +477,6 @@
     }
 }
 
-/// Checks whether block flags indicate it should be MMIO guard mapped.
-fn verify_lazy_mapped_block(
-    _range: &VaRange,
-    desc: &mut Descriptor,
-    level: usize,
-) -> result::Result<(), ()> {
-    let flags = desc.flags().expect("Unsupported PTE flags set");
-    if !is_leaf_pte(&flags, level) {
-        return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
-    }
-    if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
-        Ok(())
-    } else {
-        Err(())
-    }
-}
-
 /// MMIO guard unmaps page
 fn mmio_guard_unmap_page(
     va_range: &VaRange,