pvmfw: Extract pvmfw-specific code out of mmu.rs
Prepare the file to be moved to vmbase for re-use by
- Extracting pvmfw-specific functions dealing with the appended data
and setting up the initial dynamic PTs to memory.rs
- Renaming PageTable::map_data to map_data_dbm and re-introducing
map_data for PTEs that don't set the DBM bit
- Documenting public functions
- Implementing From<IdMap> for PageTable
- Introducing a Result wrapping type
Bug: 282928116
Test: m pvmfw_img
Change-Id: Ibec97c31fa7a86e3843b6e2167c39273be68aba9
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 7655f40..08f076b 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -20,8 +20,7 @@
use crate::heap;
use crate::helpers;
use crate::helpers::RangeExt as _;
-use crate::memory::{MemoryTracker, MEMORY};
-use crate::mmu;
+use crate::memory::{self, MemoryTracker, MEMORY};
use crate::rand;
use core::arch::asm;
use core::mem::{drop, size_of};
@@ -208,7 +207,7 @@
// script prevents it from overlapping with other objects.
let appended_data = unsafe { get_appended_data_slice() };
- let mut page_table = mmu::PageTable::from_static_layout().map_err(|e| {
+ let mut page_table = memory::init_page_table().map_err(|e| {
error!("Failed to set up the dynamic page tables: {e}");
RebootReason::InternalError
})?;
@@ -285,7 +284,7 @@
assert_eq!(bcc.start % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
assert_eq!(bcc.end % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
- let stack = mmu::stack_range();
+ let stack = memory::stack_range();
assert_ne!(stack.len(), 0, "stack region is empty.");
assert_eq!(stack.start % ASM_STP_ALIGN, 0, "Misaligned stack region.");
@@ -388,7 +387,7 @@
}
unsafe fn get_appended_data_slice() -> &'static mut [u8] {
- let range = mmu::PageTable::appended_payload_range();
+ let range = memory::appended_payload_range();
// SAFETY: This region is mapped and the linker script prevents it from overlapping with other
// objects.
unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) }
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 3205a4d..c97ed99 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -17,8 +17,10 @@
#![deny(unsafe_op_in_unsafe_fn)]
use crate::helpers::{self, page_4kb_of, RangeExt, PVMFW_PAGE_SIZE, SIZE_4MB};
-use crate::mmu;
+use crate::mmu::{PageTable, MMIO_LAZY_MAP_FLAG};
+use aarch64_paging::idmap::IdMap;
use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
+use aarch64_paging::MapError;
use alloc::alloc::alloc_zeroed;
use alloc::alloc::dealloc;
use alloc::alloc::handle_alloc_error;
@@ -40,13 +42,16 @@
use once_cell::race::OnceBox;
use spin::mutex::SpinMutex;
use tinyvec::ArrayVec;
-use vmbase::{dsb, isb, memory::set_dbm_enabled, tlbi};
+use vmbase::{dsb, isb, layout, memory::set_dbm_enabled, tlbi};
/// Base of the system's contiguous "main" memory.
pub const BASE_ADDR: usize = 0x8000_0000;
/// First address that can't be translated by a level 1 TTBR0_EL1.
pub const MAX_ADDR: usize = 1 << 40;
+const PT_ROOT_LEVEL: usize = 1;
+const PT_ASID: usize = 1;
+
pub type MemoryRange = Range<usize>;
pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
@@ -91,7 +96,7 @@
/// Tracks non-overlapping slices of main memory.
pub struct MemoryTracker {
total: MemoryRange,
- page_table: mmu::PageTable,
+ page_table: PageTable,
regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
}
@@ -221,7 +226,7 @@
const PVMFW_RANGE: MemoryRange = (BASE_ADDR - SIZE_4MB)..BASE_ADDR;
/// Create a new instance from an active page table, covering the maximum RAM size.
- pub fn new(mut page_table: mmu::PageTable) -> Self {
+ pub fn new(mut page_table: PageTable) -> Self {
// Activate dirty state management first, otherwise we may get permission faults immediately
// after activating the new page table. This has no effect before the new page table is
// activated because none of the entries in the initial idmap have the DBM flag.
@@ -230,7 +235,7 @@
debug!("Activating dynamic page table...");
// SAFETY - page_table duplicates the static mappings for everything that the Rust code is
// aware of so activating it shouldn't have any visible effect.
- unsafe { page_table.activate() };
+ unsafe { page_table.activate() }
debug!("... Success!");
Self {
@@ -274,7 +279,7 @@
pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
self.check(®ion)?;
- self.page_table.map_data(range).map_err(|e| {
+ self.page_table.map_data_dbm(range).map_err(|e| {
error!("Error during mutable range allocation: {e}");
MemoryTrackerError::FailedToMap
})?;
@@ -411,7 +416,7 @@
// Collect memory ranges for which dirty state is tracked.
let writable_regions =
self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
- let payload_range = mmu::PageTable::appended_payload_range();
+ let payload_range = appended_payload_range();
// Execute a barrier instruction to ensure all hardware updates to the page table have been
// observed before reading PTE flags to determine dirty state.
dsb!("ish");
@@ -519,7 +524,7 @@
if !is_leaf_pte(&flags, level) {
return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
}
- if flags.contains(mmu::MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
+ if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
Ok(())
} else {
Err(())
@@ -542,7 +547,7 @@
// mapped anyway.
if flags.contains(Attributes::VALID) {
assert!(
- flags.contains(mmu::MMIO_LAZY_MAP_FLAG),
+ flags.contains(MMIO_LAZY_MAP_FLAG),
"Attempting MMIO guard unmap for non-device pages"
);
assert_eq!(
@@ -598,7 +603,7 @@
// An ISB instruction is required to ensure the effects of completed TLB maintenance
// instructions are visible to instructions fetched afterwards.
// See ARM ARM E2.3.10, and G5.9.
- tlbi!("vale1", mmu::PageTable::ASID, va_range.start().0);
+ tlbi!("vale1", PT_ASID, va_range.start().0);
dsb!("ish");
isb!();
Ok(())
@@ -606,3 +611,32 @@
Err(())
}
}
+
+/// Returns memory range reserved for the appended payload.
+pub fn appended_payload_range() -> Range<usize> {
+ let start = helpers::align_up(layout::binary_end(), helpers::SIZE_4KB).unwrap();
+ // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
+ let end = helpers::align_up(start, helpers::SIZE_2MB).unwrap();
+ start..end
+}
+
+/// Region allocated for the stack.
+pub fn stack_range() -> Range<usize> {
+ const STACK_PAGES: usize = 8;
+
+ layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)
+}
+
+pub fn init_page_table() -> result::Result<PageTable, MapError> {
+ let mut page_table: PageTable = IdMap::new(PT_ASID, PT_ROOT_LEVEL).into();
+
+ // Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload,
+ // so dirty state management can be omitted.
+ page_table.map_data(&layout::scratch_range())?;
+ page_table.map_data(&stack_range())?;
+ page_table.map_code(&layout::text_range())?;
+ page_table.map_rodata(&layout::rodata_range())?;
+ page_table.map_data_dbm(&appended_payload_range())?;
+
+ Ok(page_table)
+}
diff --git a/pvmfw/src/mmu.rs b/pvmfw/src/mmu.rs
index c72ceea..bc71e97 100644
--- a/pvmfw/src/mmu.rs
+++ b/pvmfw/src/mmu.rs
@@ -12,15 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//! Memory management.
+//! Page table management.
-use crate::helpers;
-use crate::helpers::PVMFW_PAGE_SIZE;
use aarch64_paging::idmap::IdMap;
use aarch64_paging::paging::{Attributes, MemoryRegion, PteUpdater};
use aarch64_paging::MapError;
-use core::ops::Range;
-use vmbase::layout;
+use core::{ops::Range, result};
/// Software bit used to indicate a device that should be lazily mapped.
pub const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
@@ -38,74 +35,76 @@
const RODATA: Attributes = DATA.union(Attributes::READ_ONLY);
const DATA_DBM: Attributes = RODATA.union(Attributes::DBM);
+type Result<T> = result::Result<T, MapError>;
+
/// High-level API for managing MMU mappings.
pub struct PageTable {
idmap: IdMap,
}
-/// Region allocated for the stack.
-pub fn stack_range() -> Range<usize> {
- const STACK_PAGES: usize = 8;
-
- layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)
+impl From<IdMap> for PageTable {
+ fn from(idmap: IdMap) -> Self {
+ Self { idmap }
+ }
}
impl PageTable {
- pub const ASID: usize = 1;
- const ROOT_LEVEL: usize = 1;
-
- /// Returns memory range reserved for the appended payload.
- pub fn appended_payload_range() -> Range<usize> {
- let start = helpers::align_up(layout::binary_end(), helpers::SIZE_4KB).unwrap();
- // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
- let end = helpers::align_up(start, helpers::SIZE_2MB).unwrap();
- start..end
- }
-
- /// Creates an instance pre-populated with pvmfw's binary layout.
- pub fn from_static_layout() -> Result<Self, MapError> {
- let mut page_table = Self { idmap: IdMap::new(Self::ASID, Self::ROOT_LEVEL) };
-
- // Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload,
- // so dirty state management can be omitted.
- page_table.map_range(&layout::scratch_range(), DATA)?;
- page_table.map_range(&stack_range(), DATA)?;
- page_table.map_code(&layout::text_range())?;
- page_table.map_rodata(&layout::rodata_range())?;
- page_table.map_data(&Self::appended_payload_range())?;
-
- Ok(page_table)
- }
-
+ /// Activates the page table.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the PageTable instance has valid and identical mappings for the
+ /// code being currently executed. Otherwise, the Rust execution model (on which the borrow
+ /// checker relies) would be violated.
pub unsafe fn activate(&mut self) {
self.idmap.activate()
}
- pub fn map_device_lazy(&mut self, range: &Range<usize>) -> Result<(), MapError> {
+ /// Maps the given range of virtual addresses to the physical addresses as lazily mapped
+ /// nGnRE device memory.
+ pub fn map_device_lazy(&mut self, range: &Range<usize>) -> Result<()> {
self.map_range(range, DEVICE_LAZY)
}
- pub fn map_device(&mut self, range: &Range<usize>) -> Result<(), MapError> {
+ /// Maps the given range of virtual addresses to the physical addresses as valid device
+ /// nGnRE device memory.
+ pub fn map_device(&mut self, range: &Range<usize>) -> Result<()> {
self.map_range(range, DEVICE)
}
- pub fn map_data(&mut self, range: &Range<usize>) -> Result<(), MapError> {
+ /// Maps the given range of virtual addresses to the physical addresses as non-executable
+ /// and writable normal memory.
+ pub fn map_data(&mut self, range: &Range<usize>) -> Result<()> {
+ self.map_range(range, DATA)
+ }
+
+ /// Maps the given range of virtual addresses to the physical addresses as non-executable,
+ /// read-only and writable-clean normal memory.
+ pub fn map_data_dbm(&mut self, range: &Range<usize>) -> Result<()> {
self.map_range(range, DATA_DBM)
}
- pub fn map_code(&mut self, range: &Range<usize>) -> Result<(), MapError> {
+ /// Maps the given range of virtual addresses to the physical addresses as read-only
+ /// normal memory.
+ pub fn map_code(&mut self, range: &Range<usize>) -> Result<()> {
self.map_range(range, CODE)
}
- pub fn map_rodata(&mut self, range: &Range<usize>) -> Result<(), MapError> {
+ /// Maps the given range of virtual addresses to the physical addresses as non-executable
+ /// and read-only normal memory.
+ pub fn map_rodata(&mut self, range: &Range<usize>) -> Result<()> {
self.map_range(range, RODATA)
}
- fn map_range(&mut self, range: &Range<usize>, attr: Attributes) -> Result<(), MapError> {
+ /// Maps the given range of virtual addresses to the physical addresses with the given
+ /// attributes.
+ fn map_range(&mut self, range: &Range<usize>, attr: Attributes) -> Result<()> {
self.idmap.map_range(&MemoryRegion::new(range.start, range.end), attr)
}
- pub fn modify_range(&mut self, range: &Range<usize>, f: &PteUpdater) -> Result<(), MapError> {
+ /// Applies the provided updater function to a number of PTEs corresponding to a given memory
+ /// range.
+ pub fn modify_range(&mut self, range: &Range<usize>, f: &PteUpdater) -> Result<()> {
self.idmap.modify_range(&MemoryRegion::new(range.start, range.end), f)
}
}