[rialto] Initialize the shared memory pool
Test: atest rialto_test
Bug: 284462758
Change-Id: I88f04be6fcd29ade9bc5ab0af2f1e0e16d9b04d9
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 3a870ab..0d2dfda 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -116,7 +116,11 @@
})?;
if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
- MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool().map_err(|e| {
+ let granule = get_hypervisor().memory_protection_granule().map_err(|e| {
+ error!("Failed to get memory protection granule: {e}");
+ RebootReason::InternalError
+ })?;
+ MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
error!("Failed to initialize dynamically shared pool: {e}");
RebootReason::InternalError
})?;
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index 578d7a0..29056f1 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -25,11 +25,14 @@
use crate::error::{Error, Result};
use buddy_system_allocator::LockedHeap;
use core::num::NonZeroUsize;
+use core::result;
use core::slice;
use fdtpci::PciInfo;
-use hyp::get_hypervisor;
+use hyp::{get_hypervisor, HypervisorCap, KvmError};
+use libfdt::FdtError;
use log::{debug, error, info};
use vmbase::{
+ fdt::SwiotlbInfo,
layout::{self, crosvm},
main,
memory::{MemoryTracker, PageTable, MEMORY, PAGE_SIZE},
@@ -111,9 +114,35 @@
error!("Failed to use memory range value from DT: {memory_range:#x?}");
e
})?;
+
+ if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
+ let granule = memory_protection_granule()?;
+ MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
+ error!("Failed to initialize dynamically shared pool.");
+ e
+ })?;
+ } else {
+ let range = SwiotlbInfo::new_from_fdt(fdt)?.fixed_range().ok_or_else(|| {
+ error!("Pre-shared pool range not specified in swiotlb node");
+ Error::from(FdtError::BadValue)
+ })?;
+ MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).map_err(|e| {
+ error!("Failed to initialize pre-shared pool.");
+ e
+ })?;
+ }
Ok(())
}
+fn memory_protection_granule() -> result::Result<usize, hyp::Error> {
+ match get_hypervisor().memory_protection_granule() {
+ Ok(granule) => Ok(granule),
+ // Take the default page size when KVM call is not supported in non-protected VMs.
+ Err(hyp::Error::KvmError(KvmError::NotSupported, _)) => Ok(PAGE_SIZE),
+ Err(e) => Err(e),
+ }
+}
+
fn try_unshare_all_memory(mmio_guard_supported: bool) -> Result<()> {
info!("Starting unsharing memory...");
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 5d60c85..61cbeb0 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -220,10 +220,9 @@
}
/// Initialize the shared heap to dynamically share memory from the global allocator.
- pub fn init_dynamic_shared_pool(&mut self) -> Result<()> {
+ pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
const INIT_CAP: usize = 10;
- let granule = get_hypervisor().memory_protection_granule()?;
let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
if previous.is_some() {
return Err(MemoryTrackerError::SharedMemorySetFailure);