pvmfw: Implement software dirty state handling

Flush only dirty pages when dropping RW memory regions. Implement
handling of dirty bit in software. Mark writable regions read-only
and make the pages writable-dirty when access causes a permission
fault.

Bug: 269738062
Test: atest MicrodroidTestApp

Change-Id: I2e73a7cc867bae8b68c2a3b68d382405327f99e8
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 4d2d696..ca74740 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -35,6 +35,7 @@
 use log::warn;
 use log::LevelFilter;
 use vmbase::{console, layout, logger, main, power::reboot};
+use zeroize::Zeroize;
 
 #[derive(Debug, Clone)]
 pub enum RebootReason {
@@ -82,18 +83,13 @@
 }
 
 impl<'a> MemorySlices<'a> {
-    fn new(
-        fdt: usize,
-        kernel: usize,
-        kernel_size: usize,
-        memory: &mut MemoryTracker,
-    ) -> Result<Self, RebootReason> {
+    fn new(fdt: usize, kernel: usize, kernel_size: usize) -> Result<Self, RebootReason> {
         // SAFETY - SIZE_2MB is non-zero.
         const FDT_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(helpers::SIZE_2MB) };
         // TODO - Only map the FDT as read-only, until we modify it right before jump_to_payload()
         // e.g. by generating a DTBO for a template DT in main() and, on return, re-map DT as RW,
         // overwrite with the template DT and apply the DTBO.
-        let range = memory.alloc_mut(fdt, FDT_SIZE).map_err(|e| {
+        let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, FDT_SIZE).map_err(|e| {
             error!("Failed to allocate the FDT range: {e}");
             RebootReason::InternalError
         })?;
@@ -110,13 +106,13 @@
 
         let memory_range = info.memory_range;
         debug!("Resizing MemoryTracker to range {memory_range:#x?}");
-        memory.shrink(&memory_range).map_err(|_| {
-            error!("Failed to use memory range value from DT: {memory_range:#x?}");
+        MEMORY.lock().as_mut().unwrap().shrink(&memory_range).map_err(|e| {
+            error!("Failed to use memory range value from DT: {memory_range:#x?}: {e}");
             RebootReason::InvalidFdt
         })?;
 
         if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
-            memory.init_dynamic_shared_pool().map_err(|e| {
+            MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool().map_err(|e| {
                 error!("Failed to initialize dynamically shared pool: {e}");
                 RebootReason::InternalError
             })?;
@@ -126,14 +122,14 @@
                 RebootReason::InvalidFdt
             })?;
 
-            memory.init_static_shared_pool(range).map_err(|e| {
+            MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).map_err(|e| {
                 error!("Failed to initialize pre-shared pool {e}");
                 RebootReason::InvalidFdt
             })?;
         }
 
         let kernel_range = if let Some(r) = info.kernel_range {
-            memory.alloc_range(&r).map_err(|e| {
+            MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
                 error!("Failed to obtain the kernel range with DT range: {e}");
                 RebootReason::InternalError
             })?
@@ -145,7 +141,7 @@
                 RebootReason::InvalidPayload
             })?;
 
-            memory.alloc(kernel, kernel_size).map_err(|e| {
+            MEMORY.lock().as_mut().unwrap().alloc(kernel, kernel_size).map_err(|e| {
                 error!("Failed to obtain the kernel range with legacy range: {e}");
                 RebootReason::InternalError
             })?
@@ -160,7 +156,7 @@
 
         let ramdisk = if let Some(r) = info.initrd_range {
             debug!("Located ramdisk at {r:?}");
-            let r = memory.alloc_range(&r).map_err(|e| {
+            let r = MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
                 error!("Failed to obtain the initrd range: {e}");
                 RebootReason::InvalidRamdisk
             })?;
@@ -242,7 +238,7 @@
     debug!("... Success!");
 
     MEMORY.lock().replace(MemoryTracker::new(page_table));
-    let slices = MemorySlices::new(fdt, payload, payload_size, MEMORY.lock().as_mut().unwrap())?;
+    let slices = MemorySlices::new(fdt, payload, payload_size)?;
 
     rand::init().map_err(|e| {
         error!("Failed to initialize rand: {e}");
@@ -252,7 +248,8 @@
     // This wrapper allows main() to be blissfully ignorant of platform details.
     let next_bcc = crate::main(slices.fdt, slices.kernel, slices.ramdisk, bcc_slice, debug_policy)?;
 
-    helpers::flushed_zeroize(bcc_slice);
+    // Writable-dirty regions will be flushed when MemoryTracker is dropped.
+    bcc_slice.zeroize();
 
     info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
     MEMORY.lock().as_mut().unwrap().mmio_unmap_all().map_err(|e| {
@@ -395,13 +392,10 @@
 }
 
 unsafe fn get_appended_data_slice() -> &'static mut [u8] {
-    let base = helpers::align_up(layout::binary_end(), helpers::SIZE_4KB).unwrap();
-    // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
-    let size = helpers::align_up(base, helpers::SIZE_2MB).unwrap() - base;
-
+    let range = mmu::PageTable::appended_payload_range();
     // SAFETY: This region is mapped and the linker script prevents it from overlapping with other
     // objects.
-    unsafe { slice::from_raw_parts_mut(base as *mut u8, size) }
+    unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) }
 }
 
 enum AppendedConfigType {