[pvmfw] Move MemorySharer from pvmfw to vmbase
This cl mvoes MemorySharer and two utility functions
virt_to_phys and phys_to_virt from pvmfw to vmbase for reuse in
rialto later.
Bug: 284462758
Test: m pvmfw_img
Change-Id: I48a410792370beaa531ea0408670b8d831150272
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index f4fc3b1..76950a2 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -20,12 +20,9 @@
use aarch64_paging::idmap::IdMap;
use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
use aarch64_paging::MapError;
-use alloc::alloc::alloc_zeroed;
-use alloc::alloc::dealloc;
use alloc::alloc::handle_alloc_error;
use alloc::boxed::Box;
-use alloc::vec::Vec;
-use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
+use buddy_system_allocator::LockedFrameAllocator;
use core::alloc::Layout;
use core::cmp::max;
use core::cmp::min;
@@ -43,7 +40,7 @@
use tinyvec::ArrayVec;
use vmbase::{
dsb, isb, layout,
- memory::{set_dbm_enabled, PageTable, MMIO_LAZY_MAP_FLAG},
+ memory::{set_dbm_enabled, MemorySharer, PageTable, MMIO_LAZY_MAP_FLAG},
tlbi,
};
@@ -169,60 +166,6 @@
static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
-/// Allocates memory on the heap and shares it with the host.
-///
-/// Unshares all pages when dropped.
-pub struct MemorySharer {
- granule: usize,
- shared_regions: Vec<(usize, Layout)>,
-}
-
-impl MemorySharer {
- const INIT_CAP: usize = 10;
-
- pub fn new(granule: usize) -> Self {
- assert!(granule.is_power_of_two());
- Self { granule, shared_regions: Vec::with_capacity(Self::INIT_CAP) }
- }
-
- /// Get from the global allocator a granule-aligned region that suits `hint` and share it.
- pub fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
- let layout = hint.align_to(self.granule).unwrap().pad_to_align();
- assert_ne!(layout.size(), 0);
- // SAFETY - layout has non-zero size.
- let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
- handle_alloc_error(layout);
- };
-
- let base = shared.as_ptr() as usize;
- let end = base.checked_add(layout.size()).unwrap();
- trace!("Sharing memory region {:#x?}", base..end);
- for vaddr in (base..end).step_by(self.granule) {
- let vaddr = NonNull::new(vaddr as *mut _).unwrap();
- get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
- }
- self.shared_regions.push((base, layout));
-
- pool.add_frame(base, end);
- }
-}
-
-impl Drop for MemorySharer {
- fn drop(&mut self) {
- while let Some((base, layout)) = self.shared_regions.pop() {
- let end = base.checked_add(layout.size()).unwrap();
- trace!("Unsharing memory region {:#x?}", base..end);
- for vaddr in (base..end).step_by(self.granule) {
- let vaddr = NonNull::new(vaddr as *mut _).unwrap();
- get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
- }
-
- // SAFETY - The region was obtained from alloc_zeroed() with the recorded layout.
- unsafe { dealloc(base as *mut _, layout) };
- }
- }
-}
-
impl MemoryTracker {
const CAPACITY: usize = 5;
const MMIO_CAPACITY: usize = 5;
@@ -363,8 +306,10 @@
/// Initialize the shared heap to dynamically share memory from the global allocator.
pub fn init_dynamic_shared_pool(&mut self) -> Result<()> {
+ const INIT_CAP: usize = 10;
+
let granule = get_hypervisor().memory_protection_granule()?;
- let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule));
+ let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
if previous.is_some() {
return Err(MemoryTrackerError::SharedMemorySetFailure);
}
@@ -490,22 +435,6 @@
Ok(())
}
-/// Returns the intermediate physical address corresponding to the given virtual address.
-///
-/// As we use identity mapping for everything, this is just a cast, but it's useful to use it to be
-/// explicit about where we are converting from virtual to physical address.
-pub fn virt_to_phys(vaddr: NonNull<u8>) -> usize {
- vaddr.as_ptr() as _
-}
-
-/// Returns a pointer for the virtual address corresponding to the given non-zero intermediate
-/// physical address.
-///
-/// Panics if `paddr` is 0.
-pub fn phys_to_virt(paddr: usize) -> NonNull<u8> {
- NonNull::new(paddr as _).unwrap()
-}
-
/// Checks whether a PTE at given level is a page or block descriptor.
#[inline]
fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
diff --git a/pvmfw/src/virtio/hal.rs b/pvmfw/src/virtio/hal.rs
index becc263..ce246b1 100644
--- a/pvmfw/src/virtio/hal.rs
+++ b/pvmfw/src/virtio/hal.rs
@@ -16,12 +16,13 @@
use super::pci::PCI_INFO;
use crate::helpers::RangeExt as _;
-use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys};
+use crate::memory::{alloc_shared, dealloc_shared};
use core::alloc::Layout;
use core::mem::size_of;
use core::ptr::{copy_nonoverlapping, NonNull};
use log::trace;
use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
+use vmbase::memory::{phys_to_virt, virt_to_phys};
/// The alignment to use for the temporary buffers allocated by `HalImpl::share`. There doesn't seem
/// to be any particular alignment required by VirtIO for these, so 16 bytes should be enough to
diff --git a/vmbase/Android.bp b/vmbase/Android.bp
index 9b246c0..72be0b0 100644
--- a/vmbase/Android.bp
+++ b/vmbase/Android.bp
@@ -64,6 +64,8 @@
srcs: ["src/lib.rs"],
rustlibs: [
"libaarch64_paging",
+ "libbuddy_system_allocator",
+ "libhyp",
"liblog_rust_nostd",
"libsmccc",
"libspin_nostd",
diff --git a/vmbase/src/lib.rs b/vmbase/src/lib.rs
index d086f1c..2541b8a 100644
--- a/vmbase/src/lib.rs
+++ b/vmbase/src/lib.rs
@@ -16,6 +16,8 @@
#![no_std]
+extern crate alloc;
+
pub mod arch;
mod bionic;
pub mod console;
diff --git a/vmbase/src/memory/mod.rs b/vmbase/src/memory/mod.rs
index 149fd18..5e1065a 100644
--- a/vmbase/src/memory/mod.rs
+++ b/vmbase/src/memory/mod.rs
@@ -16,6 +16,10 @@
mod dbm;
mod page_table;
+mod shared;
+mod util;
pub use dbm::set_dbm_enabled;
pub use page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
+pub use shared::MemorySharer;
+pub use util::{phys_to_virt, virt_to_phys};
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
new file mode 100644
index 0000000..0a2444f
--- /dev/null
+++ b/vmbase/src/memory/shared.rs
@@ -0,0 +1,78 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Shared memory management.
+
+use super::util::virt_to_phys;
+use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
+use alloc::vec::Vec;
+use buddy_system_allocator::FrameAllocator;
+use core::alloc::Layout;
+use core::ptr::NonNull;
+use hyp::get_hypervisor;
+use log::trace;
+
+/// Allocates memory on the heap and shares it with the host.
+///
+/// Unshares all pages when dropped.
+pub struct MemorySharer {
+ granule: usize,
+ shared_regions: Vec<(usize, Layout)>,
+}
+
+impl MemorySharer {
+ /// Constructs a new `MemorySharer` instance with the specified granule size and capacity.
+ /// `granule` must be a power of 2.
+ pub fn new(granule: usize, capacity: usize) -> Self {
+ assert!(granule.is_power_of_two());
+ Self { granule, shared_regions: Vec::with_capacity(capacity) }
+ }
+
+ /// Get from the global allocator a granule-aligned region that suits `hint` and share it.
+ pub fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
+ let layout = hint.align_to(self.granule).unwrap().pad_to_align();
+ assert_ne!(layout.size(), 0);
+ // SAFETY - layout has non-zero size.
+ let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
+ handle_alloc_error(layout);
+ };
+
+ let base = shared.as_ptr() as usize;
+ let end = base.checked_add(layout.size()).unwrap();
+ trace!("Sharing memory region {:#x?}", base..end);
+ for vaddr in (base..end).step_by(self.granule) {
+ let vaddr = NonNull::new(vaddr as *mut _).unwrap();
+ get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+ }
+ self.shared_regions.push((base, layout));
+
+ pool.add_frame(base, end);
+ }
+}
+
+impl Drop for MemorySharer {
+ fn drop(&mut self) {
+ while let Some((base, layout)) = self.shared_regions.pop() {
+ let end = base.checked_add(layout.size()).unwrap();
+ trace!("Unsharing memory region {:#x?}", base..end);
+ for vaddr in (base..end).step_by(self.granule) {
+ let vaddr = NonNull::new(vaddr as *mut _).unwrap();
+ get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+ }
+
+ // SAFETY - The region was obtained from alloc_zeroed() with the recorded layout.
+ unsafe { dealloc(base as *mut _, layout) };
+ }
+ }
+}
diff --git a/vmbase/src/memory/util.rs b/vmbase/src/memory/util.rs
new file mode 100644
index 0000000..5b89cd1
--- /dev/null
+++ b/vmbase/src/memory/util.rs
@@ -0,0 +1,33 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Utility functions for memory management.
+
+use core::ptr::NonNull;
+
+/// Returns the intermediate physical address corresponding to the given virtual address.
+///
+/// As we use identity mapping for everything, this is just a cast, but it's useful to use it to be
+/// explicit about where we are converting from virtual to physical address.
+pub fn virt_to_phys(vaddr: NonNull<u8>) -> usize {
+ vaddr.as_ptr() as _
+}
+
+/// Returns a pointer for the virtual address corresponding to the given non-zero intermediate
+/// physical address.
+///
+/// Panics if `paddr` is 0.
+pub fn phys_to_virt(paddr: usize) -> NonNull<u8> {
+ NonNull::new(paddr as _).unwrap()
+}