rialto: Use PageTable from pvmfw through vmbase
Extract the file to vmbase.
Use it in Rialto to replace the existing PT manipulation cofiguration.
Bug: 282928116
Test: atest rialto_test
Change-Id: If9aaa30fb60781cebc82cf34ebe94a9a580beace
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index 9afd816..5108eb4 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -33,7 +33,6 @@
mod hvc;
mod instance;
mod memory;
-mod mmu;
mod rand;
mod virtio;
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index c97ed99..d4e548b 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -17,7 +17,6 @@
#![deny(unsafe_op_in_unsafe_fn)]
use crate::helpers::{self, page_4kb_of, RangeExt, PVMFW_PAGE_SIZE, SIZE_4MB};
-use crate::mmu::{PageTable, MMIO_LAZY_MAP_FLAG};
use aarch64_paging::idmap::IdMap;
use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
use aarch64_paging::MapError;
@@ -42,7 +41,11 @@
use once_cell::race::OnceBox;
use spin::mutex::SpinMutex;
use tinyvec::ArrayVec;
-use vmbase::{dsb, isb, layout, memory::set_dbm_enabled, tlbi};
+use vmbase::{
+ dsb, isb, layout,
+ memory::{set_dbm_enabled, PageTable, MMIO_LAZY_MAP_FLAG},
+ tlbi,
+};
/// Base of the system's contiguous "main" memory.
pub const BASE_ADDR: usize = 0x8000_0000;
diff --git a/pvmfw/src/mmu.rs b/pvmfw/src/mmu.rs
deleted file mode 100644
index bc71e97..0000000
--- a/pvmfw/src/mmu.rs
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2022, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Page table management.
-
-use aarch64_paging::idmap::IdMap;
-use aarch64_paging::paging::{Attributes, MemoryRegion, PteUpdater};
-use aarch64_paging::MapError;
-use core::{ops::Range, result};
-
-/// Software bit used to indicate a device that should be lazily mapped.
-pub const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
-
-// We assume that:
-// - MAIR_EL1.Attr0 = "Device-nGnRE memory" (0b0000_0100)
-// - MAIR_EL1.Attr1 = "Normal memory, Outer & Inner WB Non-transient, R/W-Allocate" (0b1111_1111)
-const MEMORY: Attributes =
- Attributes::VALID.union(Attributes::NORMAL).union(Attributes::NON_GLOBAL);
-const DEVICE_LAZY: Attributes =
- MMIO_LAZY_MAP_FLAG.union(Attributes::DEVICE_NGNRE).union(Attributes::EXECUTE_NEVER);
-const DEVICE: Attributes = DEVICE_LAZY.union(Attributes::VALID);
-const CODE: Attributes = MEMORY.union(Attributes::READ_ONLY);
-const DATA: Attributes = MEMORY.union(Attributes::EXECUTE_NEVER);
-const RODATA: Attributes = DATA.union(Attributes::READ_ONLY);
-const DATA_DBM: Attributes = RODATA.union(Attributes::DBM);
-
-type Result<T> = result::Result<T, MapError>;
-
-/// High-level API for managing MMU mappings.
-pub struct PageTable {
- idmap: IdMap,
-}
-
-impl From<IdMap> for PageTable {
- fn from(idmap: IdMap) -> Self {
- Self { idmap }
- }
-}
-
-impl PageTable {
- /// Activates the page table.
- ///
- /// # Safety
- ///
- /// The caller must ensure that the PageTable instance has valid and identical mappings for the
- /// code being currently executed. Otherwise, the Rust execution model (on which the borrow
- /// checker relies) would be violated.
- pub unsafe fn activate(&mut self) {
- self.idmap.activate()
- }
-
- /// Maps the given range of virtual addresses to the physical addresses as lazily mapped
- /// nGnRE device memory.
- pub fn map_device_lazy(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DEVICE_LAZY)
- }
-
- /// Maps the given range of virtual addresses to the physical addresses as valid device
- /// nGnRE device memory.
- pub fn map_device(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DEVICE)
- }
-
- /// Maps the given range of virtual addresses to the physical addresses as non-executable
- /// and writable normal memory.
- pub fn map_data(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DATA)
- }
-
- /// Maps the given range of virtual addresses to the physical addresses as non-executable,
- /// read-only and writable-clean normal memory.
- pub fn map_data_dbm(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DATA_DBM)
- }
-
- /// Maps the given range of virtual addresses to the physical addresses as read-only
- /// normal memory.
- pub fn map_code(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, CODE)
- }
-
- /// Maps the given range of virtual addresses to the physical addresses as non-executable
- /// and read-only normal memory.
- pub fn map_rodata(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, RODATA)
- }
-
- /// Maps the given range of virtual addresses to the physical addresses with the given
- /// attributes.
- fn map_range(&mut self, range: &Range<usize>, attr: Attributes) -> Result<()> {
- self.idmap.map_range(&MemoryRegion::new(range.start, range.end), attr)
- }
-
- /// Applies the provided updater function to a number of PTEs corresponding to a given memory
- /// range.
- pub fn modify_range(&mut self, range: &Range<usize>, f: &PteUpdater) -> Result<()> {
- self.idmap.modify_range(&MemoryRegion::new(range.start, range.end), f)
- }
-}