[pvmfw][vmbase] Move page table update functions to vmbase
to simplify the task of moving MemoryTracker to vmbase for reuse
in both rialto and pvmfw.
Bug: 284462758
Test: m pvmfw_img
Change-Id: Ic4c912caf0d5526cfe70eac2bfbfcf62ec45dacf
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 0a2444f..568eb81 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -14,14 +14,17 @@
//! Shared memory management.
-use super::util::virt_to_phys;
+use super::page_table::{is_leaf_pte, MMIO_LAZY_MAP_FLAG};
+use super::util::{virt_to_phys, PAGE_SIZE};
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
use alloc::vec::Vec;
use buddy_system_allocator::FrameAllocator;
use core::alloc::Layout;
use core::ptr::NonNull;
+use core::result;
use hyp::get_hypervisor;
-use log::trace;
+use log::{error, trace};
/// Allocates memory on the heap and shares it with the host.
///
@@ -76,3 +79,62 @@
}
}
}
+
+/// Checks whether block flags indicate it should be MMIO guard mapped.
+/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
+/// issue `clippy::result_unit_err`.
+#[allow(clippy::result_unit_err)]
+pub fn verify_lazy_mapped_block(
+ _range: &VaRange,
+ desc: &mut Descriptor,
+ level: usize,
+) -> result::Result<(), ()> {
+ let flags = desc.flags().expect("Unsupported PTE flags set");
+ if !is_leaf_pte(&flags, level) {
+ return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
+ }
+ if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
+ Ok(())
+ } else {
+ Err(())
+ }
+}
+
+/// MMIO guard unmaps page
+/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
+/// issue `clippy::result_unit_err`.
+#[allow(clippy::result_unit_err)]
+pub fn mmio_guard_unmap_page(
+ va_range: &VaRange,
+ desc: &mut Descriptor,
+ level: usize,
+) -> result::Result<(), ()> {
+ let flags = desc.flags().expect("Unsupported PTE flags set");
+ if !is_leaf_pte(&flags, level) {
+ return Ok(());
+ }
+ // This function will be called on an address range that corresponds to a device. Only if a
+ // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
+ // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
+ // mapped anyway.
+ if flags.contains(Attributes::VALID) {
+ assert!(
+ flags.contains(MMIO_LAZY_MAP_FLAG),
+ "Attempting MMIO guard unmap for non-device pages"
+ );
+ assert_eq!(
+ va_range.len(),
+ PAGE_SIZE,
+ "Failed to break down block mapping before MMIO guard mapping"
+ );
+ let page_base = va_range.start().0;
+ assert_eq!(page_base % PAGE_SIZE, 0);
+ // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
+ // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
+ // virt_to_phys here, and just pass page_base instead.
+ get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
+ error!("Error MMIO guard unmapping: {e}");
+ })?;
+ }
+ Ok(())
+}