Use walk_range when traversing the page tables without modifying them

modify_range() has been updated to split block entries to match the
provided range, so that modifications to the descriptor cannot impact
adjacent mappings inadvertently.

This is not always desirable, and most of our uses of modify_range()
don't make any changes at all to the descriptor so let's use the newly
added walk_range() instead. Note that the region is not expanded
outwards to match the granularity of the a block descriptor at the given
level, so we need to calculate this value from the level argument where
needed.

Test: build tested only
Change-Id: I154f7e7f64c58613dd409c17a7125c92db259314
diff --git a/vmbase/src/memory/dbm.rs b/vmbase/src/memory/dbm.rs
index 401022e..45f409c 100644
--- a/vmbase/src/memory/dbm.rs
+++ b/vmbase/src/memory/dbm.rs
@@ -52,14 +52,10 @@
 /// Flushes a memory range the descriptor refers to, if the descriptor is in writable-dirty state.
 pub(super) fn flush_dirty_range(
     va_range: &MemoryRegion,
-    desc: &mut Descriptor,
-    level: usize,
+    desc: &Descriptor,
+    _level: usize,
 ) -> Result<(), ()> {
-    // Only flush ranges corresponding to dirty leaf PTEs.
     let flags = desc.flags().ok_or(())?;
-    if !is_leaf_pte(&flags, level) {
-        return Ok(());
-    }
     if !flags.contains(Attributes::READ_ONLY) {
         flush_region(va_range.start().0, va_range.len());
     }
diff --git a/vmbase/src/memory/page_table.rs b/vmbase/src/memory/page_table.rs
index e067e96..e355d4d 100644
--- a/vmbase/src/memory/page_table.rs
+++ b/vmbase/src/memory/page_table.rs
@@ -16,7 +16,7 @@
 
 use crate::read_sysreg;
 use aarch64_paging::idmap::IdMap;
-use aarch64_paging::paging::{Attributes, MemoryRegion, PteUpdater};
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion, PteUpdater};
 use aarch64_paging::MapError;
 use core::result;
 
@@ -127,6 +127,16 @@
     pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<()> {
         self.idmap.modify_range(range, f)
     }
+
+    /// Applies the provided callback function to a number of PTEs corresponding to a given memory
+    /// range.
+    pub fn walk_range<F>(&self, range: &MemoryRegion, f: &F) -> Result<()>
+    where
+        F: Fn(&MemoryRegion, &Descriptor, usize) -> result::Result<(), ()>,
+    {
+        let mut callback = |mr: &MemoryRegion, d: &Descriptor, l: usize| f(mr, d, l);
+        self.idmap.walk_range(range, &mut callback)
+    }
 }
 
 /// Checks whether a PTE at given level is a page or block descriptor.
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 37bcf2d..dd433d4 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -16,12 +16,14 @@
 
 use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
 use super::error::MemoryTrackerError;
-use super::page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG};
+use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
 use super::util::{page_4kb_of, virt_to_phys};
 use crate::dsb;
 use crate::exceptions::HandleExceptionError;
 use crate::util::RangeExt as _;
-use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
+use aarch64_paging::paging::{
+    Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress, BITS_PER_LEVEL, PAGE_SIZE,
+};
 use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
 use alloc::boxed::Box;
 use alloc::vec::Vec;
@@ -253,7 +255,7 @@
         if get_mmio_guard().is_some() {
             for range in &self.mmio_regions {
                 self.page_table
-                    .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
+                    .walk_range(&get_va_range(range), &mmio_guard_unmap_page)
                     .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
             }
         }
@@ -350,7 +352,7 @@
         // Now flush writable-dirty pages in those regions.
         for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
             self.page_table
-                .modify_range(&get_va_range(range), &flush_dirty_range)
+                .walk_range(&get_va_range(range), &flush_dirty_range)
                 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
         }
         Ok(())
@@ -480,13 +482,10 @@
 /// MMIO guard unmaps page
 fn mmio_guard_unmap_page(
     va_range: &VaRange,
-    desc: &mut Descriptor,
+    desc: &Descriptor,
     level: usize,
 ) -> result::Result<(), ()> {
     let flags = desc.flags().expect("Unsupported PTE flags set");
-    if !is_leaf_pte(&flags, level) {
-        return Ok(());
-    }
     // This function will be called on an address range that corresponds to a device. Only if a
     // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
     // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
@@ -496,9 +495,11 @@
             flags.contains(MMIO_LAZY_MAP_FLAG),
             "Attempting MMIO guard unmap for non-device pages"
         );
+        const MMIO_GUARD_GRANULE_SHIFT: u32 = MMIO_GUARD_GRANULE_SIZE.ilog2() - PAGE_SIZE.ilog2();
+        const MMIO_GUARD_GRANULE_LEVEL: usize =
+            3 - (MMIO_GUARD_GRANULE_SHIFT as usize / BITS_PER_LEVEL);
         assert_eq!(
-            va_range.len(),
-            MMIO_GUARD_GRANULE_SIZE,
+            level, MMIO_GUARD_GRANULE_LEVEL,
             "Failed to break down block mapping before MMIO guard mapping"
         );
         let page_base = va_range.start().0;