[pvmfw] Move memory flush functions to vmbase

To facilitate reusing the memory management code in rialto later.

Bug: 284462758
Test: m pvmfw_img
Change-Id: I9a544c783ff322d4894f0b3d7e10c0643908d48f
diff --git a/vmbase/Android.bp b/vmbase/Android.bp
index 72be0b0..bda3796 100644
--- a/vmbase/Android.bp
+++ b/vmbase/Android.bp
@@ -69,6 +69,7 @@
         "liblog_rust_nostd",
         "libsmccc",
         "libspin_nostd",
+        "libzeroize_nostd",
     ],
     whole_static_libs: [
         "librust_baremetal",
diff --git a/vmbase/src/memory/dbm.rs b/vmbase/src/memory/dbm.rs
index 3a52c2d..235c0e0 100644
--- a/vmbase/src/memory/dbm.rs
+++ b/vmbase/src/memory/dbm.rs
@@ -14,7 +14,10 @@
 
 //! Hardware management of the access flag and dirty state.
 
+use super::page_table::is_leaf_pte;
+use super::util::flush_region;
 use crate::{isb, read_sysreg, write_sysreg};
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion};
 
 /// Sets whether the hardware management of access and dirty state is enabled with
 /// the given boolean.
@@ -45,3 +48,23 @@
     const DBM_AVAILABLE: usize = 1 << 1;
     read_sysreg!("id_aa64mmfr1_el1") & DBM_AVAILABLE != 0
 }
+
+/// Flushes a memory range the descriptor refers to, if the descriptor is in writable-dirty state.
+/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
+/// issue `clippy::result_unit_err`.
+#[allow(clippy::result_unit_err)]
+pub fn flush_dirty_range(
+    va_range: &MemoryRegion,
+    desc: &mut Descriptor,
+    level: usize,
+) -> Result<(), ()> {
+    // Only flush ranges corresponding to dirty leaf PTEs.
+    let flags = desc.flags().ok_or(())?;
+    if !is_leaf_pte(&flags, level) {
+        return Ok(());
+    }
+    if !flags.contains(Attributes::READ_ONLY) {
+        flush_region(va_range.start().0, va_range.len());
+    }
+    Ok(())
+}
diff --git a/vmbase/src/memory/mod.rs b/vmbase/src/memory/mod.rs
index 3b1b384..e5e0305 100644
--- a/vmbase/src/memory/mod.rs
+++ b/vmbase/src/memory/mod.rs
@@ -19,7 +19,10 @@
 mod shared;
 mod util;
 
-pub use dbm::set_dbm_enabled;
-pub use page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
+pub use dbm::{flush_dirty_range, set_dbm_enabled};
+pub use page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG};
 pub use shared::MemorySharer;
-pub use util::{page_4kb_of, phys_to_virt, virt_to_phys, SIZE_2MB, SIZE_4KB, SIZE_4MB};
+pub use util::{
+    flush, flushed_zeroize, min_dcache_line_size, page_4kb_of, phys_to_virt, virt_to_phys,
+    SIZE_2MB, SIZE_4KB, SIZE_4MB,
+};
diff --git a/vmbase/src/memory/page_table.rs b/vmbase/src/memory/page_table.rs
index bc71e97..d3564b6 100644
--- a/vmbase/src/memory/page_table.rs
+++ b/vmbase/src/memory/page_table.rs
@@ -108,3 +108,14 @@
         self.idmap.modify_range(&MemoryRegion::new(range.start, range.end), f)
     }
 }
+
+/// Checks whether a PTE at given level is a page or block descriptor.
+#[inline]
+pub fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
+    const LEAF_PTE_LEVEL: usize = 3;
+    if flags.contains(Attributes::TABLE_OR_PAGE) {
+        level == LEAF_PTE_LEVEL
+    } else {
+        level < LEAF_PTE_LEVEL
+    }
+}
diff --git a/vmbase/src/memory/util.rs b/vmbase/src/memory/util.rs
index 3186409..3739d28 100644
--- a/vmbase/src/memory/util.rs
+++ b/vmbase/src/memory/util.rs
@@ -14,8 +14,11 @@
 
 //! Utility functions for memory management.
 
+use crate::read_sysreg;
 use crate::util::unchecked_align_down;
+use core::arch::asm;
 use core::ptr::NonNull;
+use zeroize::Zeroize;
 
 /// The size of a 4KB memory in bytes.
 pub const SIZE_4KB: usize = 4 << 10;
@@ -24,6 +27,51 @@
 /// The size of a 4MB memory in bytes.
 pub const SIZE_4MB: usize = 4 << 20;
 
+/// Reads the number of words in the smallest cache line of all the data caches and unified caches.
+#[inline]
+pub fn min_dcache_line_size() -> usize {
+    const DMINLINE_SHIFT: usize = 16;
+    const DMINLINE_MASK: usize = 0xf;
+    let ctr_el0 = read_sysreg!("ctr_el0");
+
+    // DminLine: log2 of the number of words in the smallest cache line of all the data caches.
+    let dminline = (ctr_el0 >> DMINLINE_SHIFT) & DMINLINE_MASK;
+
+    1 << dminline
+}
+
+/// Flush `size` bytes of data cache by virtual address.
+#[inline]
+pub(super) fn flush_region(start: usize, size: usize) {
+    let line_size = min_dcache_line_size();
+    let end = start + size;
+    let start = unchecked_align_down(start, line_size);
+
+    for line in (start..end).step_by(line_size) {
+        // SAFETY - Clearing cache lines shouldn't have Rust-visible side effects.
+        unsafe {
+            asm!(
+                "dc cvau, {x}",
+                x = in(reg) line,
+                options(nomem, nostack, preserves_flags),
+            )
+        }
+    }
+}
+
+/// Flushes the slice to the point of unification.
+#[inline]
+pub fn flush(reg: &[u8]) {
+    flush_region(reg.as_ptr() as usize, reg.len())
+}
+
+/// Overwrites the slice with zeroes, to the point of unification.
+#[inline]
+pub fn flushed_zeroize(reg: &mut [u8]) {
+    reg.zeroize();
+    flush(reg)
+}
+
 /// Computes the address of the 4KiB page containing a given address.
 pub const fn page_4kb_of(addr: usize) -> usize {
     unchecked_align_down(addr, SIZE_4KB)