pvmfw: Map image footer after dynamic PT switch

Teach the MemoryTracker to map the optional image footer into the
dynamic PTs. As vmbase now knows internally where it is located, there
is no need to receive the "payload range" from the client anymore.
Therefore, refactor MemoryTracker::new() to stop taking that argument.

As a result, pvmfw now first switches to the dynamic PTs before
requesting the footer to be mapped, which prepares it (along with other
vmbase clients) for an upcoming change that will activate the dynamic
PTs before it is entered from vmbase.

As the MemoryTracker keeps track of whether the footer has been mapped,
get_appended_data_slice() can now safely return a Result<&mut> as it
will fail if called more than once.

Note that the memory tracker uses dirty-bit tracking (flushed when
switching back to the static PTs) for the image footer.

Bug: 377276983
Test: m {pvmfw,rialto,vmbase_example_{bios,kernel}}_bin
Test: atest rialto_test vmbase_example.integration_test
Change-Id: I24954b6905db0a0a499f7aa2dedb9a4e5b83d168
diff --git a/guest/pvmfw/src/entry.rs b/guest/pvmfw/src/entry.rs
index 48585f3..e55d4ca 100644
--- a/guest/pvmfw/src/entry.rs
+++ b/guest/pvmfw/src/entry.rs
@@ -31,7 +31,7 @@
     configure_heap, console_writeln,
     layout::{self, crosvm, UART_PAGE_ADDR},
     main,
-    memory::{MemoryTracker, MEMORY, SIZE_128KB, SIZE_4KB},
+    memory::{MemoryTracker, MemoryTrackerError, MEMORY, SIZE_128KB, SIZE_4KB},
     power::reboot,
 };
 use zeroize::Zeroize;
@@ -113,9 +113,17 @@
         RebootReason::InternalError
     })?;
 
-    // SAFETY: We only get the appended payload from here, once. The region was statically mapped,
-    // then remapped by `init_page_table()`.
-    let appended_data = unsafe { get_appended_data_slice() };
+    // Up to this point, we were using the built-in static (from .rodata) page tables.
+    MEMORY.lock().replace(MemoryTracker::new(
+        page_table,
+        crosvm::MEM_START..layout::MAX_VIRT_ADDR,
+        crosvm::MMIO_RANGE,
+    ));
+
+    let appended_data = get_appended_data_slice().map_err(|e| {
+        error!("Failed to map the appended data: {e}");
+        RebootReason::InternalError
+    })?;
 
     let appended = AppendedPayload::new(appended_data).ok_or_else(|| {
         error!("No valid configuration found");
@@ -124,14 +132,6 @@
 
     let config_entries = appended.get_entries();
 
-    // Up to this point, we were using the built-in static (from .rodata) page tables.
-    MEMORY.lock().replace(MemoryTracker::new(
-        page_table,
-        crosvm::MEM_START..layout::MAX_VIRT_ADDR,
-        crosvm::MMIO_RANGE,
-        Some(layout::image_footer_range()),
-    ));
-
     let slices = memory::MemorySlices::new(
         fdt,
         payload,
@@ -321,15 +321,11 @@
     };
 }
 
-/// # Safety
-///
-/// This must only be called once, since we are returning a mutable reference.
-/// The appended data region must be mapped.
-unsafe fn get_appended_data_slice() -> &'static mut [u8] {
-    let range = layout::image_footer_range();
-    // SAFETY: This region is mapped and the linker script prevents it from overlapping with other
-    // objects.
-    unsafe { slice::from_raw_parts_mut(range.start.0 as *mut u8, range.end - range.start) }
+fn get_appended_data_slice() -> Result<&'static mut [u8], MemoryTrackerError> {
+    let range = MEMORY.lock().as_mut().unwrap().map_image_footer()?;
+    // SAFETY: This region was just mapped for the first time (as map_image_footer() didn't fail)
+    // and the linker script prevents it from overlapping with other objects.
+    Ok(unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) })
 }
 
 enum AppendedPayload<'a> {
diff --git a/guest/pvmfw/src/memory.rs b/guest/pvmfw/src/memory.rs
index 7d49bca..64a6850 100644
--- a/guest/pvmfw/src/memory.rs
+++ b/guest/pvmfw/src/memory.rs
@@ -50,7 +50,6 @@
     page_table.map_data(&stack_range().into())?;
     page_table.map_code(&layout::text_range().into())?;
     page_table.map_rodata(&layout::rodata_range().into())?;
-    page_table.map_data_dbm(&layout::image_footer_range().into())?;
     if let Err(e) = page_table.map_device(&layout::console_uart_page().into()) {
         error!("Failed to remap the UART as a dynamic page table entry: {e}");
         return Err(e);
diff --git a/guest/rialto/src/main.rs b/guest/rialto/src/main.rs
index 61e9948..456af7f 100644
--- a/guest/rialto/src/main.rs
+++ b/guest/rialto/src/main.rs
@@ -95,7 +95,6 @@
         page_table,
         crosvm::MEM_START..layout::MAX_VIRT_ADDR,
         crosvm::MMIO_RANGE,
-        None, // Rialto doesn't have any payload for now.
     ));
 
     let fdt_range = MEMORY
diff --git a/libs/libvmbase/src/memory/error.rs b/libs/libvmbase/src/memory/error.rs
index 870e4c9..1d42a04 100644
--- a/libs/libvmbase/src/memory/error.rs
+++ b/libs/libvmbase/src/memory/error.rs
@@ -43,6 +43,8 @@
     SharedMemorySetFailure,
     /// Failure to set `SHARED_POOL`.
     SharedPoolSetFailure,
+    /// Rejected request to map footer that is already mapped.
+    FooterAlreadyMapped,
     /// Invalid page table entry.
     InvalidPte,
     /// Failed to flush memory region.
@@ -69,6 +71,7 @@
             Self::Hypervisor(e) => e.fmt(f),
             Self::SharedMemorySetFailure => write!(f, "Failed to set SHARED_MEMORY"),
             Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"),
+            Self::FooterAlreadyMapped => write!(f, "Refused to map image footer again"),
             Self::InvalidPte => write!(f, "Page table entry is not valid"),
             Self::FlushRegionFailed => write!(f, "Failed to flush memory region"),
             Self::SetPteDirtyFailed => write!(f, "Failed to set PTE dirty state"),
diff --git a/libs/libvmbase/src/memory/tracker.rs b/libs/libvmbase/src/memory/tracker.rs
index c1f5d54..d699c4c 100644
--- a/libs/libvmbase/src/memory/tracker.rs
+++ b/libs/libvmbase/src/memory/tracker.rs
@@ -19,6 +19,7 @@
 use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
 use super::shared::{SHARED_MEMORY, SHARED_POOL};
 use crate::dsb;
+use crate::layout;
 use crate::memory::shared::{MemoryRange, MemorySharer, MmioSharer};
 use crate::util::RangeExt as _;
 use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
@@ -62,7 +63,7 @@
     regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
     mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
     mmio_range: MemoryRange,
-    payload_range: Option<MemoryRange>,
+    image_footer_mapped: bool,
     mmio_sharer: MmioSharer,
 }
 
@@ -71,12 +72,7 @@
     const MMIO_CAPACITY: usize = 5;
 
     /// Creates a new instance from an active page table, covering the maximum RAM size.
-    pub fn new(
-        mut page_table: PageTable,
-        total: MemoryRange,
-        mmio_range: MemoryRange,
-        payload_range: Option<Range<VirtualAddress>>,
-    ) -> Self {
+    pub fn new(mut page_table: PageTable, total: MemoryRange, mmio_range: MemoryRange) -> Self {
         assert!(
             !total.overlaps(&mmio_range),
             "MMIO space should not overlap with the main memory region."
@@ -99,7 +95,7 @@
             regions: ArrayVec::new(),
             mmio_regions: ArrayVec::new(),
             mmio_range,
-            payload_range: payload_range.map(|r| r.start.0..r.end.0),
+            image_footer_mapped: false,
             mmio_sharer: MmioSharer::new().unwrap(),
         }
     }
@@ -163,6 +159,20 @@
         self.add(region)
     }
 
+    /// Maps the image footer read-write, with permissions.
+    pub fn map_image_footer(&mut self) -> Result<MemoryRange> {
+        if self.image_footer_mapped {
+            return Err(MemoryTrackerError::FooterAlreadyMapped);
+        }
+        let range = layout::image_footer_range();
+        self.page_table.map_data_dbm(&range.clone().into()).map_err(|e| {
+            error!("Error during image footer map: {e}");
+            MemoryTrackerError::FailedToMap
+        })?;
+        self.image_footer_mapped = true;
+        Ok(range.start.0..range.end.0)
+    }
+
     /// Allocate the address range for a const slice; returns None if failed.
     pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
         self.alloc_range(&(base..(base + size.get())))
@@ -336,11 +346,17 @@
         // observed before reading PTE flags to determine dirty state.
         dsb!("ish");
         // Now flush writable-dirty pages in those regions.
-        for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
+        for range in writable_regions {
             self.page_table
                 .walk_range(&get_va_range(range), &flush_dirty_range)
                 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
         }
+        if self.image_footer_mapped {
+            let range = layout::image_footer_range();
+            self.page_table
+                .walk_range(&range.into(), &flush_dirty_range)
+                .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
+        }
         Ok(())
     }