Merge changes Ia90e2c0a,Ie30d7b43

* changes:
  Update kernel to builds 10280421
  Update kernel to builds 10280421
diff --git a/libs/hyp/src/hypervisor/common.rs b/libs/hyp/src/hypervisor/common.rs
index accef72..ec7d168 100644
--- a/libs/hyp/src/hypervisor/common.rs
+++ b/libs/hyp/src/hypervisor/common.rs
@@ -15,8 +15,12 @@
 //! This module regroups some common traits shared by all the hypervisors.
 
 use crate::error::Result;
+use crate::util::SIZE_4KB;
 use bitflags::bitflags;
 
+/// Expected MMIO guard granule size, validated during MMIO guard initialization.
+pub const MMIO_GUARD_GRANULE_SIZE: usize = SIZE_4KB;
+
 bitflags! {
     /// Capabilities that Hypervisor backends can declare support for.
     pub struct HypervisorCap: u32 {
diff --git a/libs/hyp/src/hypervisor/gunyah.rs b/libs/hyp/src/hypervisor/gunyah.rs
index b335c87..252430f 100644
--- a/libs/hyp/src/hypervisor/gunyah.rs
+++ b/libs/hyp/src/hypervisor/gunyah.rs
@@ -1,6 +1,5 @@
-use super::common::{Hypervisor, HypervisorCap};
+use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
 use crate::error::Result;
-use crate::util::SIZE_4KB;
 use uuid::{uuid, Uuid};
 
 pub(super) struct GunyahHypervisor;
@@ -31,7 +30,7 @@
     }
 
     fn memory_protection_granule(&self) -> Result<usize> {
-        Ok(SIZE_4KB)
+        Ok(MMIO_GUARD_GRANULE_SIZE)
     }
 
     fn has_cap(&self, _cap: HypervisorCap) -> bool {
diff --git a/libs/hyp/src/hypervisor/kvm.rs b/libs/hyp/src/hypervisor/kvm.rs
index 08eb891..a89f9b8 100644
--- a/libs/hyp/src/hypervisor/kvm.rs
+++ b/libs/hyp/src/hypervisor/kvm.rs
@@ -14,9 +14,9 @@
 
 //! Wrappers around calls to the KVM hypervisor.
 
-use super::common::{Hypervisor, HypervisorCap};
+use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
 use crate::error::{Error, Result};
-use crate::util::{page_address, SIZE_4KB};
+use crate::util::page_address;
 use core::fmt::{self, Display, Formatter};
 use smccc::{
     error::{positive_or_error_64, success_or_error_32, success_or_error_64},
@@ -83,7 +83,7 @@
     fn mmio_guard_init(&self) -> Result<()> {
         mmio_guard_enroll()?;
         let mmio_granule = mmio_guard_granule()?;
-        if mmio_granule != SIZE_4KB {
+        if mmio_granule != MMIO_GUARD_GRANULE_SIZE {
             return Err(Error::UnsupportedMmioGuardGranule(mmio_granule));
         }
         Ok(())
diff --git a/libs/hyp/src/hypervisor/mod.rs b/libs/hyp/src/hypervisor/mod.rs
index 394da2c..923a21d 100644
--- a/libs/hyp/src/hypervisor/mod.rs
+++ b/libs/hyp/src/hypervisor/mod.rs
@@ -24,6 +24,7 @@
 use alloc::boxed::Box;
 pub use common::Hypervisor;
 pub use common::HypervisorCap;
+pub use common::MMIO_GUARD_GRANULE_SIZE;
 use gunyah::GunyahHypervisor;
 pub use kvm::KvmError;
 use kvm::KvmHypervisor;
diff --git a/libs/hyp/src/lib.rs b/libs/hyp/src/lib.rs
index 694f957..2c2d1d6 100644
--- a/libs/hyp/src/lib.rs
+++ b/libs/hyp/src/lib.rs
@@ -21,4 +21,4 @@
 mod util;
 
 pub use error::{Error, Result};
-pub use hypervisor::{get_hypervisor, Hypervisor, HypervisorCap, KvmError};
+pub use hypervisor::{get_hypervisor, Hypervisor, HypervisorCap, KvmError, MMIO_GUARD_GRANULE_SIZE};
diff --git a/pvmfw/src/exceptions.rs b/pvmfw/src/exceptions.rs
index a10c300..4d376cd 100644
--- a/pvmfw/src/exceptions.rs
+++ b/pvmfw/src/exceptions.rs
@@ -14,11 +14,11 @@
 
 //! Exception handlers.
 
-use crate::memory::{MemoryTrackerError, MEMORY};
+use crate::memory::MEMORY;
 use core::fmt;
 use vmbase::console;
 use vmbase::logger;
-use vmbase::memory::page_4kb_of;
+use vmbase::memory::{page_4kb_of, MemoryTrackerError};
 use vmbase::read_sysreg;
 use vmbase::{eprintln, power::reboot};
 
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 989120d..c34afc9 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -17,13 +17,11 @@
 #![deny(unsafe_op_in_unsafe_fn)]
 
 use crate::helpers::PVMFW_PAGE_SIZE;
-use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
 use aarch64_paging::MapError;
 use alloc::alloc::handle_alloc_error;
 use alloc::boxed::Box;
 use buddy_system_allocator::LockedFrameAllocator;
 use core::alloc::Layout;
-use core::fmt;
 use core::iter::once;
 use core::num::NonZeroUsize;
 use core::ops::Range;
@@ -36,19 +34,18 @@
 use spin::mutex::SpinMutex;
 use tinyvec::ArrayVec;
 use vmbase::{
-    dsb, isb, layout,
+    dsb, layout,
     memory::{
-        flush_dirty_range, is_leaf_pte, page_4kb_of, set_dbm_enabled, MemorySharer, PageTable,
-        MMIO_LAZY_MAP_FLAG, PT_ASID, SIZE_2MB, SIZE_4KB,
+        flush_dirty_range, mark_dirty_block, mmio_guard_unmap_page, page_4kb_of, set_dbm_enabled,
+        verify_lazy_mapped_block, MemorySharer, MemoryTrackerError, PageTable, SIZE_2MB, SIZE_4KB,
     },
-    tlbi,
     util::{align_up, RangeExt as _},
 };
 
 /// First address that can't be translated by a level 1 TTBR0_EL1.
 pub const MAX_ADDR: usize = 1 << 40;
 
-pub type MemoryRange = Range<usize>;
+type MemoryRange = Range<usize>;
 
 pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
 unsafe impl Send for MemoryTracker {}
@@ -76,66 +73,6 @@
     payload_range: MemoryRange,
 }
 
-/// Errors for MemoryTracker operations.
-#[derive(Debug, Clone)]
-pub enum MemoryTrackerError {
-    /// Tried to modify the memory base address.
-    DifferentBaseAddress,
-    /// Tried to shrink to a larger memory size.
-    SizeTooLarge,
-    /// Tracked regions would not fit in memory size.
-    SizeTooSmall,
-    /// Reached limit number of tracked regions.
-    Full,
-    /// Region is out of the tracked memory address space.
-    OutOfRange,
-    /// New region overlaps with tracked regions.
-    Overlaps,
-    /// Region couldn't be mapped.
-    FailedToMap,
-    /// Region couldn't be unmapped.
-    FailedToUnmap,
-    /// Error from the interaction with the hypervisor.
-    Hypervisor(hyp::Error),
-    /// Failure to set `SHARED_MEMORY`.
-    SharedMemorySetFailure,
-    /// Failure to set `SHARED_POOL`.
-    SharedPoolSetFailure,
-    /// Invalid page table entry.
-    InvalidPte,
-    /// Failed to flush memory region.
-    FlushRegionFailed,
-    /// Failed to set PTE dirty state.
-    SetPteDirtyFailed,
-}
-
-impl fmt::Display for MemoryTrackerError {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match self {
-            Self::DifferentBaseAddress => write!(f, "Received different base address"),
-            Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
-            Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
-            Self::Full => write!(f, "Reached limit number of tracked regions"),
-            Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"),
-            Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
-            Self::FailedToMap => write!(f, "Failed to map the new region"),
-            Self::FailedToUnmap => write!(f, "Failed to unmap the new region"),
-            Self::Hypervisor(e) => e.fmt(f),
-            Self::SharedMemorySetFailure => write!(f, "Failed to set SHARED_MEMORY"),
-            Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"),
-            Self::InvalidPte => write!(f, "Page table entry is not valid"),
-            Self::FlushRegionFailed => write!(f, "Failed to flush memory region"),
-            Self::SetPteDirtyFailed => write!(f, "Failed to set PTE dirty state"),
-        }
-    }
-}
-
-impl From<hyp::Error> for MemoryTrackerError {
-    fn from(e: hyp::Error) -> Self {
-        Self::Hypervisor(e)
-    }
-}
-
 type Result<T> = result::Result<T, MemoryTrackerError>;
 
 static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
@@ -419,87 +356,6 @@
     Ok(())
 }
 
-/// Checks whether block flags indicate it should be MMIO guard mapped.
-fn verify_lazy_mapped_block(
-    _range: &VaRange,
-    desc: &mut Descriptor,
-    level: usize,
-) -> result::Result<(), ()> {
-    let flags = desc.flags().expect("Unsupported PTE flags set");
-    if !is_leaf_pte(&flags, level) {
-        return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
-    }
-    if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
-        Ok(())
-    } else {
-        Err(())
-    }
-}
-
-/// MMIO guard unmaps page
-fn mmio_guard_unmap_page(
-    va_range: &VaRange,
-    desc: &mut Descriptor,
-    level: usize,
-) -> result::Result<(), ()> {
-    let flags = desc.flags().expect("Unsupported PTE flags set");
-    if !is_leaf_pte(&flags, level) {
-        return Ok(());
-    }
-    // This function will be called on an address range that corresponds to a device. Only if a
-    // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
-    // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
-    // mapped anyway.
-    if flags.contains(Attributes::VALID) {
-        assert!(
-            flags.contains(MMIO_LAZY_MAP_FLAG),
-            "Attempting MMIO guard unmap for non-device pages"
-        );
-        assert_eq!(
-            va_range.len(),
-            PVMFW_PAGE_SIZE,
-            "Failed to break down block mapping before MMIO guard mapping"
-        );
-        let page_base = va_range.start().0;
-        assert_eq!(page_base % PVMFW_PAGE_SIZE, 0);
-        // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
-        // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
-        // virt_to_phys here, and just pass page_base instead.
-        get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
-            error!("Error MMIO guard unmapping: {e}");
-        })?;
-    }
-    Ok(())
-}
-
-/// Clears read-only flag on a PTE, making it writable-dirty. Used when dirty state is managed
-/// in software to handle permission faults on read-only descriptors.
-fn mark_dirty_block(
-    va_range: &VaRange,
-    desc: &mut Descriptor,
-    level: usize,
-) -> result::Result<(), ()> {
-    let flags = desc.flags().ok_or(())?;
-    if !is_leaf_pte(&flags, level) {
-        return Ok(());
-    }
-    if flags.contains(Attributes::DBM) {
-        assert!(flags.contains(Attributes::READ_ONLY), "unexpected PTE writable state");
-        desc.modify_flags(Attributes::empty(), Attributes::READ_ONLY);
-        // Updating the read-only bit of a PTE requires TLB invalidation.
-        // A TLB maintenance instruction is only guaranteed to be complete after a DSB instruction.
-        // An ISB instruction is required to ensure the effects of completed TLB maintenance
-        // instructions are visible to instructions fetched afterwards.
-        // See ARM ARM E2.3.10, and G5.9.
-        tlbi!("vale1", PT_ASID, va_range.start().0);
-        dsb!("ish");
-        isb!();
-        Ok(())
-    } else {
-        Err(())
-    }
-}
-
 /// Returns memory range reserved for the appended payload.
 pub fn appended_payload_range() -> MemoryRange {
     let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index bc5ab2c..9736aa4 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -69,15 +69,19 @@
     Ok(())
 }
 
-fn try_init_logger() -> Result<()> {
-    match get_hypervisor().mmio_guard_init() {
+fn try_init_logger() -> Result<bool> {
+    let mmio_guard_supported = match get_hypervisor().mmio_guard_init() {
         // pKVM blocks MMIO by default, we need to enable MMIO guard to support logging.
-        Ok(()) => get_hypervisor().mmio_guard_map(vmbase::console::BASE_ADDRESS)?,
+        Ok(()) => {
+            get_hypervisor().mmio_guard_map(vmbase::console::BASE_ADDRESS)?;
+            true
+        }
         // MMIO guard enroll is not supported in unprotected VM.
-        Err(hyp::Error::MmioGuardNotsupported) => {}
+        Err(hyp::Error::MmioGuardNotsupported) => false,
         Err(e) => return Err(e.into()),
     };
-    vmbase::logger::init(log::LevelFilter::Debug).map_err(|_| Error::LoggerInit)
+    vmbase::logger::init(log::LevelFilter::Debug).map_err(|_| Error::LoggerInit)?;
+    Ok(mmio_guard_supported)
 }
 
 /// # Safety
@@ -96,19 +100,39 @@
     Ok(())
 }
 
+fn try_unshare_all_memory(mmio_guard_supported: bool) -> Result<()> {
+    if !mmio_guard_supported {
+        return Ok(());
+    }
+    info!("Starting unsharing memory...");
+
+    // TODO(b/284462758): Unshare all the memory here.
+
+    // No logging after unmapping UART.
+    get_hypervisor().mmio_guard_unmap(vmbase::console::BASE_ADDRESS)?;
+    Ok(())
+}
+
+fn unshare_all_memory(mmio_guard_supported: bool) {
+    if let Err(e) = try_unshare_all_memory(mmio_guard_supported) {
+        error!("Failed to unshare the memory: {e}");
+    }
+}
+
 /// Entry point for Rialto.
 pub fn main(fdt_addr: u64, _a1: u64, _a2: u64, _a3: u64) {
     init_heap();
-    if try_init_logger().is_err() {
+    let Ok(mmio_guard_supported) = try_init_logger() else {
         // Don't log anything if the logger initialization fails.
         reboot();
-    }
+    };
     // SAFETY: `fdt_addr` is supposed to be a valid pointer and points to
     // a valid `Fdt`.
     match unsafe { try_main(fdt_addr as usize) } {
-        Ok(()) => info!("Rialto ends successfully."),
+        Ok(()) => unshare_all_memory(mmio_guard_supported),
         Err(e) => {
             error!("Rialto failed with {e}");
+            unshare_all_memory(mmio_guard_supported);
             reboot()
         }
     }
diff --git a/vmbase/src/memory/dbm.rs b/vmbase/src/memory/dbm.rs
index 235c0e0..333d3f6 100644
--- a/vmbase/src/memory/dbm.rs
+++ b/vmbase/src/memory/dbm.rs
@@ -14,9 +14,9 @@
 
 //! Hardware management of the access flag and dirty state.
 
-use super::page_table::is_leaf_pte;
+use super::page_table::{is_leaf_pte, PT_ASID};
 use super::util::flush_region;
-use crate::{isb, read_sysreg, write_sysreg};
+use crate::{dsb, isb, read_sysreg, tlbi, write_sysreg};
 use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion};
 
 /// Sets whether the hardware management of access and dirty state is enabled with
@@ -68,3 +68,34 @@
     }
     Ok(())
 }
+
+/// Clears read-only flag on a PTE, making it writable-dirty. Used when dirty state is managed
+/// in software to handle permission faults on read-only descriptors.
+/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
+/// issue `clippy::result_unit_err`.
+#[allow(clippy::result_unit_err)]
+pub fn mark_dirty_block(
+    va_range: &MemoryRegion,
+    desc: &mut Descriptor,
+    level: usize,
+) -> Result<(), ()> {
+    let flags = desc.flags().ok_or(())?;
+    if !is_leaf_pte(&flags, level) {
+        return Ok(());
+    }
+    if flags.contains(Attributes::DBM) {
+        assert!(flags.contains(Attributes::READ_ONLY), "unexpected PTE writable state");
+        desc.modify_flags(Attributes::empty(), Attributes::READ_ONLY);
+        // Updating the read-only bit of a PTE requires TLB invalidation.
+        // A TLB maintenance instruction is only guaranteed to be complete after a DSB instruction.
+        // An ISB instruction is required to ensure the effects of completed TLB maintenance
+        // instructions are visible to instructions fetched afterwards.
+        // See ARM ARM E2.3.10, and G5.9.
+        tlbi!("vale1", PT_ASID, va_range.start().0);
+        dsb!("ish");
+        isb!();
+        Ok(())
+    } else {
+        Err(())
+    }
+}
diff --git a/vmbase/src/memory/error.rs b/vmbase/src/memory/error.rs
new file mode 100644
index 0000000..273db56
--- /dev/null
+++ b/vmbase/src/memory/error.rs
@@ -0,0 +1,77 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Error relating to memory management.
+
+use core::fmt;
+
+/// Errors for MemoryTracker operations.
+#[derive(Debug, Clone)]
+pub enum MemoryTrackerError {
+    /// Tried to modify the memory base address.
+    DifferentBaseAddress,
+    /// Tried to shrink to a larger memory size.
+    SizeTooLarge,
+    /// Tracked regions would not fit in memory size.
+    SizeTooSmall,
+    /// Reached limit number of tracked regions.
+    Full,
+    /// Region is out of the tracked memory address space.
+    OutOfRange,
+    /// New region overlaps with tracked regions.
+    Overlaps,
+    /// Region couldn't be mapped.
+    FailedToMap,
+    /// Region couldn't be unmapped.
+    FailedToUnmap,
+    /// Error from the interaction with the hypervisor.
+    Hypervisor(hyp::Error),
+    /// Failure to set `SHARED_MEMORY`.
+    SharedMemorySetFailure,
+    /// Failure to set `SHARED_POOL`.
+    SharedPoolSetFailure,
+    /// Invalid page table entry.
+    InvalidPte,
+    /// Failed to flush memory region.
+    FlushRegionFailed,
+    /// Failed to set PTE dirty state.
+    SetPteDirtyFailed,
+}
+
+impl fmt::Display for MemoryTrackerError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self {
+            Self::DifferentBaseAddress => write!(f, "Received different base address"),
+            Self::SizeTooLarge => write!(f, "Tried to shrink to a larger memory size"),
+            Self::SizeTooSmall => write!(f, "Tracked regions would not fit in memory size"),
+            Self::Full => write!(f, "Reached limit number of tracked regions"),
+            Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"),
+            Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
+            Self::FailedToMap => write!(f, "Failed to map the new region"),
+            Self::FailedToUnmap => write!(f, "Failed to unmap the new region"),
+            Self::Hypervisor(e) => e.fmt(f),
+            Self::SharedMemorySetFailure => write!(f, "Failed to set SHARED_MEMORY"),
+            Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"),
+            Self::InvalidPte => write!(f, "Page table entry is not valid"),
+            Self::FlushRegionFailed => write!(f, "Failed to flush memory region"),
+            Self::SetPteDirtyFailed => write!(f, "Failed to set PTE dirty state"),
+        }
+    }
+}
+
+impl From<hyp::Error> for MemoryTrackerError {
+    fn from(e: hyp::Error) -> Self {
+        Self::Hypervisor(e)
+    }
+}
diff --git a/vmbase/src/memory/mod.rs b/vmbase/src/memory/mod.rs
index bb9149c..f919bcd 100644
--- a/vmbase/src/memory/mod.rs
+++ b/vmbase/src/memory/mod.rs
@@ -15,13 +15,15 @@
 //! Memory management.
 
 mod dbm;
+mod error;
 mod page_table;
 mod shared;
 mod util;
 
-pub use dbm::{flush_dirty_range, set_dbm_enabled};
-pub use page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG, PT_ASID};
-pub use shared::MemorySharer;
+pub use dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
+pub use error::MemoryTrackerError;
+pub use page_table::PageTable;
+pub use shared::{mmio_guard_unmap_page, verify_lazy_mapped_block, MemorySharer};
 pub use util::{
     flush, flushed_zeroize, min_dcache_line_size, page_4kb_of, phys_to_virt, virt_to_phys,
     PAGE_SIZE, SIZE_2MB, SIZE_4KB, SIZE_4MB,
diff --git a/vmbase/src/memory/page_table.rs b/vmbase/src/memory/page_table.rs
index 1a9d0f8..7196e67 100644
--- a/vmbase/src/memory/page_table.rs
+++ b/vmbase/src/memory/page_table.rs
@@ -20,7 +20,7 @@
 use core::{ops::Range, result};
 
 /// Software bit used to indicate a device that should be lazily mapped.
-pub const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
+pub(super) const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
 
 // We assume that:
 // - MAIR_EL1.Attr0 = "Device-nGnRE memory" (0b0000_0100)
@@ -39,7 +39,7 @@
 /// entry.S. For 4KB granule and 39-bit VA, the root level is 1.
 const PT_ROOT_LEVEL: usize = 1;
 /// Page table ASID.
-pub const PT_ASID: usize = 1;
+pub(super) const PT_ASID: usize = 1;
 
 type Result<T> = result::Result<T, MapError>;
 
@@ -123,7 +123,7 @@
 
 /// Checks whether a PTE at given level is a page or block descriptor.
 #[inline]
-pub fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
+pub(super) fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
     const LEAF_PTE_LEVEL: usize = 3;
     if flags.contains(Attributes::TABLE_OR_PAGE) {
         level == LEAF_PTE_LEVEL
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 0a2444f..5284e30 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -14,14 +14,17 @@
 
 //! Shared memory management.
 
+use super::page_table::{is_leaf_pte, MMIO_LAZY_MAP_FLAG};
 use super::util::virt_to_phys;
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
 use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
 use alloc::vec::Vec;
 use buddy_system_allocator::FrameAllocator;
 use core::alloc::Layout;
 use core::ptr::NonNull;
-use hyp::get_hypervisor;
-use log::trace;
+use core::result;
+use hyp::{get_hypervisor, MMIO_GUARD_GRANULE_SIZE};
+use log::{error, trace};
 
 /// Allocates memory on the heap and shares it with the host.
 ///
@@ -76,3 +79,62 @@
         }
     }
 }
+
+/// Checks whether block flags indicate it should be MMIO guard mapped.
+/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
+/// issue `clippy::result_unit_err`.
+#[allow(clippy::result_unit_err)]
+pub fn verify_lazy_mapped_block(
+    _range: &VaRange,
+    desc: &mut Descriptor,
+    level: usize,
+) -> result::Result<(), ()> {
+    let flags = desc.flags().expect("Unsupported PTE flags set");
+    if !is_leaf_pte(&flags, level) {
+        return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
+    }
+    if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
+        Ok(())
+    } else {
+        Err(())
+    }
+}
+
+/// MMIO guard unmaps page
+/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
+/// issue `clippy::result_unit_err`.
+#[allow(clippy::result_unit_err)]
+pub fn mmio_guard_unmap_page(
+    va_range: &VaRange,
+    desc: &mut Descriptor,
+    level: usize,
+) -> result::Result<(), ()> {
+    let flags = desc.flags().expect("Unsupported PTE flags set");
+    if !is_leaf_pte(&flags, level) {
+        return Ok(());
+    }
+    // This function will be called on an address range that corresponds to a device. Only if a
+    // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
+    // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
+    // mapped anyway.
+    if flags.contains(Attributes::VALID) {
+        assert!(
+            flags.contains(MMIO_LAZY_MAP_FLAG),
+            "Attempting MMIO guard unmap for non-device pages"
+        );
+        assert_eq!(
+            va_range.len(),
+            MMIO_GUARD_GRANULE_SIZE,
+            "Failed to break down block mapping before MMIO guard mapping"
+        );
+        let page_base = va_range.start().0;
+        assert_eq!(page_base % MMIO_GUARD_GRANULE_SIZE, 0);
+        // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
+        // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
+        // virt_to_phys here, and just pass page_base instead.
+        get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
+            error!("Error MMIO guard unmapping: {e}");
+        })?;
+    }
+    Ok(())
+}