Update layout/PageTable memory range to Range<VirtualAddress>
This cl updates the return type of the layout functions and the
parameter type in PageTable memory mapping functions from
Range<usize> to Range<VirtualAddress>. This makes it explicit
that the ranges used here are virtual memory ranges.
Test: atest vmbase_example.integration_test rialto_test
Test: m pvmfw_img
Bug: 284462758
Change-Id: I19d4859a03edffedb00ab2831f43929befcb98d8
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 4a75b97..c8b7d35 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -20,7 +20,7 @@
use super::util::{page_4kb_of, virt_to_phys};
use crate::dsb;
use crate::util::RangeExt as _;
-use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
use alloc::boxed::Box;
use alloc::vec::Vec;
@@ -44,6 +44,11 @@
/// Memory range.
pub type MemoryRange = Range<usize>;
+
+fn get_va_range(range: &MemoryRange) -> VaRange {
+ VaRange::new(range.start, range.end)
+}
+
type Result<T> = result::Result<T, MemoryTrackerError>;
#[derive(Clone, Copy, Debug, Default, PartialEq)]
@@ -82,7 +87,7 @@
mut page_table: PageTable,
total: MemoryRange,
mmio_range: MemoryRange,
- payload_range: Option<MemoryRange>,
+ payload_range: Option<Range<VirtualAddress>>,
) -> Self {
assert!(
!total.overlaps(&mmio_range),
@@ -106,7 +111,7 @@
regions: ArrayVec::new(),
mmio_regions: ArrayVec::new(),
mmio_range,
- payload_range,
+ payload_range: payload_range.map(|r| r.start.0..r.end.0),
}
}
@@ -132,7 +137,7 @@
pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
self.check(®ion)?;
- self.page_table.map_rodata(range).map_err(|e| {
+ self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
error!("Error during range allocation: {e}");
MemoryTrackerError::FailedToMap
})?;
@@ -143,7 +148,7 @@
pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
self.check(®ion)?;
- self.page_table.map_data_dbm(range).map_err(|e| {
+ self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
error!("Error during mutable range allocation: {e}");
MemoryTrackerError::FailedToMap
})?;
@@ -173,7 +178,7 @@
return Err(MemoryTrackerError::Full);
}
- self.page_table.map_device_lazy(&range).map_err(|e| {
+ self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
error!("Error during MMIO device mapping: {e}");
MemoryTrackerError::FailedToMap
})?;
@@ -215,7 +220,7 @@
pub fn mmio_unmap_all(&mut self) -> Result<()> {
for range in &self.mmio_regions {
self.page_table
- .modify_range(range, &mmio_guard_unmap_page)
+ .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
.map_err(|_| MemoryTrackerError::FailedToUnmap)?;
}
Ok(())
@@ -266,11 +271,12 @@
/// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
/// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
pub fn handle_mmio_fault(&mut self, addr: usize) -> Result<()> {
- let page_range = page_4kb_of(addr)..page_4kb_of(addr) + MMIO_GUARD_GRANULE_SIZE;
+ let page_start = VirtualAddress(page_4kb_of(addr));
+ let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
self.page_table
.modify_range(&page_range, &verify_lazy_mapped_block)
.map_err(|_| MemoryTrackerError::InvalidPte)?;
- get_hypervisor().mmio_guard_map(page_range.start)?;
+ get_hypervisor().mmio_guard_map(page_start.0)?;
// Maps a single device page, breaking up block mappings if necessary.
self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
}
@@ -286,7 +292,7 @@
// Now flush writable-dirty pages in those regions.
for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
self.page_table
- .modify_range(range, &flush_dirty_range)
+ .modify_range(&get_va_range(range), &flush_dirty_range)
.map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
}
Ok(())
@@ -296,8 +302,9 @@
/// In general, this should be called from the exception handler when hardware dirty
/// state management is disabled or unavailable.
pub fn handle_permission_fault(&mut self, addr: usize) -> Result<()> {
+ let addr = VirtualAddress(addr);
self.page_table
- .modify_range(&(addr..addr + 1), &mark_dirty_block)
+ .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
.map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
}
}