[pvmfw] Move MemorySharer from pvmfw to vmbase
This cl mvoes MemorySharer and two utility functions
virt_to_phys and phys_to_virt from pvmfw to vmbase for reuse in
rialto later.
Bug: 284462758
Test: m pvmfw_img
Change-Id: I48a410792370beaa531ea0408670b8d831150272
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index f4fc3b1..76950a2 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -20,12 +20,9 @@
use aarch64_paging::idmap::IdMap;
use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
use aarch64_paging::MapError;
-use alloc::alloc::alloc_zeroed;
-use alloc::alloc::dealloc;
use alloc::alloc::handle_alloc_error;
use alloc::boxed::Box;
-use alloc::vec::Vec;
-use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
+use buddy_system_allocator::LockedFrameAllocator;
use core::alloc::Layout;
use core::cmp::max;
use core::cmp::min;
@@ -43,7 +40,7 @@
use tinyvec::ArrayVec;
use vmbase::{
dsb, isb, layout,
- memory::{set_dbm_enabled, PageTable, MMIO_LAZY_MAP_FLAG},
+ memory::{set_dbm_enabled, MemorySharer, PageTable, MMIO_LAZY_MAP_FLAG},
tlbi,
};
@@ -169,60 +166,6 @@
static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
-/// Allocates memory on the heap and shares it with the host.
-///
-/// Unshares all pages when dropped.
-pub struct MemorySharer {
- granule: usize,
- shared_regions: Vec<(usize, Layout)>,
-}
-
-impl MemorySharer {
- const INIT_CAP: usize = 10;
-
- pub fn new(granule: usize) -> Self {
- assert!(granule.is_power_of_two());
- Self { granule, shared_regions: Vec::with_capacity(Self::INIT_CAP) }
- }
-
- /// Get from the global allocator a granule-aligned region that suits `hint` and share it.
- pub fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
- let layout = hint.align_to(self.granule).unwrap().pad_to_align();
- assert_ne!(layout.size(), 0);
- // SAFETY - layout has non-zero size.
- let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
- handle_alloc_error(layout);
- };
-
- let base = shared.as_ptr() as usize;
- let end = base.checked_add(layout.size()).unwrap();
- trace!("Sharing memory region {:#x?}", base..end);
- for vaddr in (base..end).step_by(self.granule) {
- let vaddr = NonNull::new(vaddr as *mut _).unwrap();
- get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
- }
- self.shared_regions.push((base, layout));
-
- pool.add_frame(base, end);
- }
-}
-
-impl Drop for MemorySharer {
- fn drop(&mut self) {
- while let Some((base, layout)) = self.shared_regions.pop() {
- let end = base.checked_add(layout.size()).unwrap();
- trace!("Unsharing memory region {:#x?}", base..end);
- for vaddr in (base..end).step_by(self.granule) {
- let vaddr = NonNull::new(vaddr as *mut _).unwrap();
- get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
- }
-
- // SAFETY - The region was obtained from alloc_zeroed() with the recorded layout.
- unsafe { dealloc(base as *mut _, layout) };
- }
- }
-}
-
impl MemoryTracker {
const CAPACITY: usize = 5;
const MMIO_CAPACITY: usize = 5;
@@ -363,8 +306,10 @@
/// Initialize the shared heap to dynamically share memory from the global allocator.
pub fn init_dynamic_shared_pool(&mut self) -> Result<()> {
+ const INIT_CAP: usize = 10;
+
let granule = get_hypervisor().memory_protection_granule()?;
- let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule));
+ let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
if previous.is_some() {
return Err(MemoryTrackerError::SharedMemorySetFailure);
}
@@ -490,22 +435,6 @@
Ok(())
}
-/// Returns the intermediate physical address corresponding to the given virtual address.
-///
-/// As we use identity mapping for everything, this is just a cast, but it's useful to use it to be
-/// explicit about where we are converting from virtual to physical address.
-pub fn virt_to_phys(vaddr: NonNull<u8>) -> usize {
- vaddr.as_ptr() as _
-}
-
-/// Returns a pointer for the virtual address corresponding to the given non-zero intermediate
-/// physical address.
-///
-/// Panics if `paddr` is 0.
-pub fn phys_to_virt(paddr: usize) -> NonNull<u8> {
- NonNull::new(paddr as _).unwrap()
-}
-
/// Checks whether a PTE at given level is a page or block descriptor.
#[inline]
fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
diff --git a/pvmfw/src/virtio/hal.rs b/pvmfw/src/virtio/hal.rs
index becc263..ce246b1 100644
--- a/pvmfw/src/virtio/hal.rs
+++ b/pvmfw/src/virtio/hal.rs
@@ -16,12 +16,13 @@
use super::pci::PCI_INFO;
use crate::helpers::RangeExt as _;
-use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys};
+use crate::memory::{alloc_shared, dealloc_shared};
use core::alloc::Layout;
use core::mem::size_of;
use core::ptr::{copy_nonoverlapping, NonNull};
use log::trace;
use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
+use vmbase::memory::{phys_to_virt, virt_to_phys};
/// The alignment to use for the temporary buffers allocated by `HalImpl::share`. There doesn't seem
/// to be any particular alignment required by VirtIO for these, so 16 bytes should be enough to