[pvmfw] Move payload_range out of MemoryTracker
To simplify the task of moving MemoryTracker out from pvmfw to
vmbase later.
Bug: 284462758
Test: m pvmfw_img
Change-Id: I95364c7ec2f48abba8484465ac3cf7a10d41f644
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 66e994b..0731fb8 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -229,6 +229,7 @@
page_table,
crosvm::MEM_START..memory::MAX_ADDR,
crosvm::MMIO_START..crosvm::MMIO_END,
+ memory::appended_payload_range(),
));
let slices = MemorySlices::new(fdt, payload, payload_size)?;
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index ffbb1ca..f259776 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -77,6 +77,7 @@
regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
mmio_range: MemoryRange,
+ payload_range: MemoryRange,
}
/// Errors for MemoryTracker operations.
@@ -149,7 +150,12 @@
const MMIO_CAPACITY: usize = 5;
/// Create a new instance from an active page table, covering the maximum RAM size.
- pub fn new(mut page_table: PageTable, total: MemoryRange, mmio_range: MemoryRange) -> Self {
+ pub fn new(
+ mut page_table: PageTable,
+ total: MemoryRange,
+ mmio_range: MemoryRange,
+ payload_range: MemoryRange,
+ ) -> Self {
assert!(
!total.overlaps(&mmio_range),
"MMIO space should not overlap with the main memory region."
@@ -172,6 +178,7 @@
regions: ArrayVec::new(),
mmio_regions: ArrayVec::new(),
mmio_range,
+ payload_range,
}
}
@@ -346,12 +353,11 @@
// Collect memory ranges for which dirty state is tracked.
let writable_regions =
self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
- let payload_range = appended_payload_range();
// Execute a barrier instruction to ensure all hardware updates to the page table have been
// observed before reading PTE flags to determine dirty state.
dsb!("ish");
// Now flush writable-dirty pages in those regions.
- for range in writable_regions.chain(once(&payload_range)) {
+ for range in writable_regions.chain(once(&self.payload_range)) {
self.page_table
.modify_range(range, &flush_dirty_range)
.map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
@@ -499,7 +505,7 @@
}
/// Returns memory range reserved for the appended payload.
-pub fn appended_payload_range() -> Range<usize> {
+pub fn appended_payload_range() -> MemoryRange {
let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
// pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
let end = align_up(start, SIZE_2MB).unwrap();
@@ -507,7 +513,7 @@
}
/// Region allocated for the stack.
-pub fn stack_range() -> Range<usize> {
+pub fn stack_range() -> MemoryRange {
const STACK_PAGES: usize = 8;
layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)