vmbase: Add map_data_noflush()
Support use-cases where clients want to map pages R/W and know that they
won't rely on the cache flushing of deactivate_dynamic_page_tables(),
such as pvmfw mapping the kernel payload R/W when booted in EFI mode.
At a low-level, map_data_noflush() doesn't need to create page-level
mappings for accurate dirty state tracking, unlike map_data(), which
reduces the total number of page tables, limiting heap pressure.
Test: m pvmfw_bin
Change-Id: I4e9e7d49e353c5d2f81834054e2a669cc4ce46c8
diff --git a/libs/libvmbase/src/memory.rs b/libs/libvmbase/src/memory.rs
index fd4706f..9153706 100644
--- a/libs/libvmbase/src/memory.rs
+++ b/libs/libvmbase/src/memory.rs
@@ -26,9 +26,9 @@
pub use page_table::PageTable;
pub use shared::MemoryRange;
pub use tracker::{
- deactivate_dynamic_page_tables, init_shared_pool, map_data, map_device, map_image_footer,
- map_rodata, map_rodata_outside_main_memory, resize_available_memory, unshare_all_memory,
- unshare_all_mmio_except_uart, unshare_uart,
+ deactivate_dynamic_page_tables, init_shared_pool, map_data, map_data_noflush, map_device,
+ map_image_footer, map_rodata, map_rodata_outside_main_memory, resize_available_memory,
+ unshare_all_memory, unshare_all_mmio_except_uart, unshare_uart,
};
pub use util::{
flush, flushed_zeroize, page_4kb_of, PAGE_SIZE, SIZE_128KB, SIZE_16KB, SIZE_2MB, SIZE_4KB,
diff --git a/libs/libvmbase/src/memory/tracker.rs b/libs/libvmbase/src/memory/tracker.rs
index f15c4db..bbff254 100644
--- a/libs/libvmbase/src/memory/tracker.rs
+++ b/libs/libvmbase/src/memory/tracker.rs
@@ -132,6 +132,18 @@
Ok(())
}
+/// Map the provided range as normal memory, with R/W permissions.
+///
+/// Unlike `map_data()`, `deactivate_dynamic_page_tables()` will not flush caches for the range.
+///
+/// This fails if the range has already been (partially) mapped.
+pub fn map_data_noflush(addr: usize, size: NonZeroUsize) -> Result<()> {
+ let mut locked_tracker = try_lock_memory_tracker()?;
+ let tracker = locked_tracker.as_mut().ok_or(MemoryTrackerError::Unavailable)?;
+ let _ = tracker.alloc_mut_noflush(addr, size)?;
+ Ok(())
+}
+
/// Map the region potentially holding data appended to the image, with read-write permissions.
///
/// This fails if the footer has already been mapped.
@@ -294,6 +306,16 @@
self.add(region)
}
+ fn alloc_range_mut_noflush(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
+ let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
+ self.check_allocatable(®ion)?;
+ self.page_table.map_data(&get_va_range(range)).map_err(|e| {
+ error!("Error during non-flushed mutable range allocation: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ self.add(region)
+ }
+
/// Maps the image footer, with read-write permissions.
fn map_image_footer(&mut self) -> Result<MemoryRange> {
if self.image_footer_mapped {
@@ -318,6 +340,10 @@
self.alloc_range_mut(&(base..(base + size.get())))
}
+ fn alloc_mut_noflush(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
+ self.alloc_range_mut_noflush(&(base..(base + size.get())))
+ }
+
/// Checks that the given range of addresses is within the MMIO region, and then maps it
/// appropriately.
fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {