[pvmfw][vmbase] Move page table update functions to vmbase

to simplify the task of moving MemoryTracker to vmbase for reuse
in both rialto and pvmfw.

Bug: 284462758
Test: m pvmfw_img
Change-Id: Ic4c912caf0d5526cfe70eac2bfbfcf62ec45dacf
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 989120d..fa2d56b 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -17,7 +17,6 @@
 #![deny(unsafe_op_in_unsafe_fn)]
 
 use crate::helpers::PVMFW_PAGE_SIZE;
-use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
 use aarch64_paging::MapError;
 use alloc::alloc::handle_alloc_error;
 use alloc::boxed::Box;
@@ -36,19 +35,18 @@
 use spin::mutex::SpinMutex;
 use tinyvec::ArrayVec;
 use vmbase::{
-    dsb, isb, layout,
+    dsb, layout,
     memory::{
-        flush_dirty_range, is_leaf_pte, page_4kb_of, set_dbm_enabled, MemorySharer, PageTable,
-        MMIO_LAZY_MAP_FLAG, PT_ASID, SIZE_2MB, SIZE_4KB,
+        flush_dirty_range, mark_dirty_block, mmio_guard_unmap_page, page_4kb_of, set_dbm_enabled,
+        verify_lazy_mapped_block, MemorySharer, PageTable, SIZE_2MB, SIZE_4KB,
     },
-    tlbi,
     util::{align_up, RangeExt as _},
 };
 
 /// First address that can't be translated by a level 1 TTBR0_EL1.
 pub const MAX_ADDR: usize = 1 << 40;
 
-pub type MemoryRange = Range<usize>;
+type MemoryRange = Range<usize>;
 
 pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
 unsafe impl Send for MemoryTracker {}
@@ -419,87 +417,6 @@
     Ok(())
 }
 
-/// Checks whether block flags indicate it should be MMIO guard mapped.
-fn verify_lazy_mapped_block(
-    _range: &VaRange,
-    desc: &mut Descriptor,
-    level: usize,
-) -> result::Result<(), ()> {
-    let flags = desc.flags().expect("Unsupported PTE flags set");
-    if !is_leaf_pte(&flags, level) {
-        return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
-    }
-    if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
-        Ok(())
-    } else {
-        Err(())
-    }
-}
-
-/// MMIO guard unmaps page
-fn mmio_guard_unmap_page(
-    va_range: &VaRange,
-    desc: &mut Descriptor,
-    level: usize,
-) -> result::Result<(), ()> {
-    let flags = desc.flags().expect("Unsupported PTE flags set");
-    if !is_leaf_pte(&flags, level) {
-        return Ok(());
-    }
-    // This function will be called on an address range that corresponds to a device. Only if a
-    // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
-    // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
-    // mapped anyway.
-    if flags.contains(Attributes::VALID) {
-        assert!(
-            flags.contains(MMIO_LAZY_MAP_FLAG),
-            "Attempting MMIO guard unmap for non-device pages"
-        );
-        assert_eq!(
-            va_range.len(),
-            PVMFW_PAGE_SIZE,
-            "Failed to break down block mapping before MMIO guard mapping"
-        );
-        let page_base = va_range.start().0;
-        assert_eq!(page_base % PVMFW_PAGE_SIZE, 0);
-        // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
-        // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
-        // virt_to_phys here, and just pass page_base instead.
-        get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
-            error!("Error MMIO guard unmapping: {e}");
-        })?;
-    }
-    Ok(())
-}
-
-/// Clears read-only flag on a PTE, making it writable-dirty. Used when dirty state is managed
-/// in software to handle permission faults on read-only descriptors.
-fn mark_dirty_block(
-    va_range: &VaRange,
-    desc: &mut Descriptor,
-    level: usize,
-) -> result::Result<(), ()> {
-    let flags = desc.flags().ok_or(())?;
-    if !is_leaf_pte(&flags, level) {
-        return Ok(());
-    }
-    if flags.contains(Attributes::DBM) {
-        assert!(flags.contains(Attributes::READ_ONLY), "unexpected PTE writable state");
-        desc.modify_flags(Attributes::empty(), Attributes::READ_ONLY);
-        // Updating the read-only bit of a PTE requires TLB invalidation.
-        // A TLB maintenance instruction is only guaranteed to be complete after a DSB instruction.
-        // An ISB instruction is required to ensure the effects of completed TLB maintenance
-        // instructions are visible to instructions fetched afterwards.
-        // See ARM ARM E2.3.10, and G5.9.
-        tlbi!("vale1", PT_ASID, va_range.start().0);
-        dsb!("ish");
-        isb!();
-        Ok(())
-    } else {
-        Err(())
-    }
-}
-
 /// Returns memory range reserved for the appended payload.
 pub fn appended_payload_range() -> MemoryRange {
     let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
diff --git a/vmbase/src/memory/dbm.rs b/vmbase/src/memory/dbm.rs
index 235c0e0..333d3f6 100644
--- a/vmbase/src/memory/dbm.rs
+++ b/vmbase/src/memory/dbm.rs
@@ -14,9 +14,9 @@
 
 //! Hardware management of the access flag and dirty state.
 
-use super::page_table::is_leaf_pte;
+use super::page_table::{is_leaf_pte, PT_ASID};
 use super::util::flush_region;
-use crate::{isb, read_sysreg, write_sysreg};
+use crate::{dsb, isb, read_sysreg, tlbi, write_sysreg};
 use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion};
 
 /// Sets whether the hardware management of access and dirty state is enabled with
@@ -68,3 +68,34 @@
     }
     Ok(())
 }
+
+/// Clears read-only flag on a PTE, making it writable-dirty. Used when dirty state is managed
+/// in software to handle permission faults on read-only descriptors.
+/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
+/// issue `clippy::result_unit_err`.
+#[allow(clippy::result_unit_err)]
+pub fn mark_dirty_block(
+    va_range: &MemoryRegion,
+    desc: &mut Descriptor,
+    level: usize,
+) -> Result<(), ()> {
+    let flags = desc.flags().ok_or(())?;
+    if !is_leaf_pte(&flags, level) {
+        return Ok(());
+    }
+    if flags.contains(Attributes::DBM) {
+        assert!(flags.contains(Attributes::READ_ONLY), "unexpected PTE writable state");
+        desc.modify_flags(Attributes::empty(), Attributes::READ_ONLY);
+        // Updating the read-only bit of a PTE requires TLB invalidation.
+        // A TLB maintenance instruction is only guaranteed to be complete after a DSB instruction.
+        // An ISB instruction is required to ensure the effects of completed TLB maintenance
+        // instructions are visible to instructions fetched afterwards.
+        // See ARM ARM E2.3.10, and G5.9.
+        tlbi!("vale1", PT_ASID, va_range.start().0);
+        dsb!("ish");
+        isb!();
+        Ok(())
+    } else {
+        Err(())
+    }
+}
diff --git a/vmbase/src/memory/mod.rs b/vmbase/src/memory/mod.rs
index bb9149c..c4a54f2 100644
--- a/vmbase/src/memory/mod.rs
+++ b/vmbase/src/memory/mod.rs
@@ -19,9 +19,9 @@
 mod shared;
 mod util;
 
-pub use dbm::{flush_dirty_range, set_dbm_enabled};
-pub use page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG, PT_ASID};
-pub use shared::MemorySharer;
+pub use dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
+pub use page_table::PageTable;
+pub use shared::{mmio_guard_unmap_page, verify_lazy_mapped_block, MemorySharer};
 pub use util::{
     flush, flushed_zeroize, min_dcache_line_size, page_4kb_of, phys_to_virt, virt_to_phys,
     PAGE_SIZE, SIZE_2MB, SIZE_4KB, SIZE_4MB,
diff --git a/vmbase/src/memory/page_table.rs b/vmbase/src/memory/page_table.rs
index 1a9d0f8..7196e67 100644
--- a/vmbase/src/memory/page_table.rs
+++ b/vmbase/src/memory/page_table.rs
@@ -20,7 +20,7 @@
 use core::{ops::Range, result};
 
 /// Software bit used to indicate a device that should be lazily mapped.
-pub const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
+pub(super) const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
 
 // We assume that:
 // - MAIR_EL1.Attr0 = "Device-nGnRE memory" (0b0000_0100)
@@ -39,7 +39,7 @@
 /// entry.S. For 4KB granule and 39-bit VA, the root level is 1.
 const PT_ROOT_LEVEL: usize = 1;
 /// Page table ASID.
-pub const PT_ASID: usize = 1;
+pub(super) const PT_ASID: usize = 1;
 
 type Result<T> = result::Result<T, MapError>;
 
@@ -123,7 +123,7 @@
 
 /// Checks whether a PTE at given level is a page or block descriptor.
 #[inline]
-pub fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
+pub(super) fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
     const LEAF_PTE_LEVEL: usize = 3;
     if flags.contains(Attributes::TABLE_OR_PAGE) {
         level == LEAF_PTE_LEVEL
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 0a2444f..568eb81 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -14,14 +14,17 @@
 
 //! Shared memory management.
 
-use super::util::virt_to_phys;
+use super::page_table::{is_leaf_pte, MMIO_LAZY_MAP_FLAG};
+use super::util::{virt_to_phys, PAGE_SIZE};
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
 use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
 use alloc::vec::Vec;
 use buddy_system_allocator::FrameAllocator;
 use core::alloc::Layout;
 use core::ptr::NonNull;
+use core::result;
 use hyp::get_hypervisor;
-use log::trace;
+use log::{error, trace};
 
 /// Allocates memory on the heap and shares it with the host.
 ///
@@ -76,3 +79,62 @@
         }
     }
 }
+
+/// Checks whether block flags indicate it should be MMIO guard mapped.
+/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
+/// issue `clippy::result_unit_err`.
+#[allow(clippy::result_unit_err)]
+pub fn verify_lazy_mapped_block(
+    _range: &VaRange,
+    desc: &mut Descriptor,
+    level: usize,
+) -> result::Result<(), ()> {
+    let flags = desc.flags().expect("Unsupported PTE flags set");
+    if !is_leaf_pte(&flags, level) {
+        return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
+    }
+    if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
+        Ok(())
+    } else {
+        Err(())
+    }
+}
+
+/// MMIO guard unmaps page
+/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
+/// issue `clippy::result_unit_err`.
+#[allow(clippy::result_unit_err)]
+pub fn mmio_guard_unmap_page(
+    va_range: &VaRange,
+    desc: &mut Descriptor,
+    level: usize,
+) -> result::Result<(), ()> {
+    let flags = desc.flags().expect("Unsupported PTE flags set");
+    if !is_leaf_pte(&flags, level) {
+        return Ok(());
+    }
+    // This function will be called on an address range that corresponds to a device. Only if a
+    // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
+    // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
+    // mapped anyway.
+    if flags.contains(Attributes::VALID) {
+        assert!(
+            flags.contains(MMIO_LAZY_MAP_FLAG),
+            "Attempting MMIO guard unmap for non-device pages"
+        );
+        assert_eq!(
+            va_range.len(),
+            PAGE_SIZE,
+            "Failed to break down block mapping before MMIO guard mapping"
+        );
+        let page_base = va_range.start().0;
+        assert_eq!(page_base % PAGE_SIZE, 0);
+        // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
+        // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
+        // virt_to_phys here, and just pass page_base instead.
+        get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
+            error!("Error MMIO guard unmapping: {e}");
+        })?;
+    }
+    Ok(())
+}