[pvmfw] Move memory flush functions to vmbase
To facilitate reusing the memory management code in rialto later.
Bug: 284462758
Test: m pvmfw_img
Change-Id: I9a544c783ff322d4894f0b3d7e10c0643908d48f
diff --git a/pvmfw/src/dice.rs b/pvmfw/src/dice.rs
index 0aa1273..3116456 100644
--- a/pvmfw/src/dice.rs
+++ b/pvmfw/src/dice.rs
@@ -15,10 +15,10 @@
//! Support for DICE derivation and BCC generation.
use crate::cstr;
-use crate::helpers::flushed_zeroize;
use core::ffi::c_void;
use core::mem::size_of;
use core::slice;
+use vmbase::memory::flushed_zeroize;
use diced_open_dice::{
bcc_format_config_descriptor, bcc_handover_main_flow, hash, Config, DiceMode, Hash,
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 6319863..a2bd156 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -18,7 +18,6 @@
use crate::crypto;
use crate::fdt;
use crate::heap;
-use crate::helpers;
use crate::helpers::RangeExt as _;
use crate::memory::{self, MemoryTracker, MEMORY};
use crate::rand;
@@ -35,7 +34,7 @@
use log::LevelFilter;
use vmbase::{
console, layout, logger, main,
- memory::{SIZE_2MB, SIZE_4KB},
+ memory::{min_dcache_line_size, SIZE_2MB, SIZE_4KB},
power::reboot,
};
use zeroize::Zeroize;
@@ -373,7 +372,7 @@
scratch_end = in(reg) u64::try_from(scratch.end).unwrap(),
stack = in(reg) u64::try_from(stack.start).unwrap(),
stack_end = in(reg) u64::try_from(stack.end).unwrap(),
- dcache_line_size = in(reg) u64::try_from(helpers::min_dcache_line_size()).unwrap(),
+ dcache_line_size = in(reg) u64::try_from(min_dcache_line_size()).unwrap(),
in("x0") fdt_address,
in("x30") payload_start,
options(noreturn),
diff --git a/pvmfw/src/helpers.rs b/pvmfw/src/helpers.rs
index bbec7a8..0a32832 100644
--- a/pvmfw/src/helpers.rs
+++ b/pvmfw/src/helpers.rs
@@ -14,61 +14,12 @@
//! Miscellaneous helper functions.
-use core::arch::asm;
use core::ops::Range;
use vmbase::memory::SIZE_4KB;
-use vmbase::read_sysreg;
-use vmbase::util::unchecked_align_down;
-use zeroize::Zeroize;
pub const GUEST_PAGE_SIZE: usize = SIZE_4KB;
pub const PVMFW_PAGE_SIZE: usize = SIZE_4KB;
-#[inline]
-/// Read the number of words in the smallest cache line of all the data caches and unified caches.
-pub fn min_dcache_line_size() -> usize {
- const DMINLINE_SHIFT: usize = 16;
- const DMINLINE_MASK: usize = 0xf;
- let ctr_el0 = read_sysreg!("ctr_el0");
-
- // DminLine: log2 of the number of words in the smallest cache line of all the data caches.
- let dminline = (ctr_el0 >> DMINLINE_SHIFT) & DMINLINE_MASK;
-
- 1 << dminline
-}
-
-/// Flush `size` bytes of data cache by virtual address.
-#[inline]
-pub fn flush_region(start: usize, size: usize) {
- let line_size = min_dcache_line_size();
- let end = start + size;
- let start = unchecked_align_down(start, line_size);
-
- for line in (start..end).step_by(line_size) {
- // SAFETY - Clearing cache lines shouldn't have Rust-visible side effects.
- unsafe {
- asm!(
- "dc cvau, {x}",
- x = in(reg) line,
- options(nomem, nostack, preserves_flags),
- )
- }
- }
-}
-
-#[inline]
-/// Flushes the slice to the point of unification.
-pub fn flush(reg: &[u8]) {
- flush_region(reg.as_ptr() as usize, reg.len())
-}
-
-#[inline]
-/// Overwrites the slice with zeroes, to the point of unification.
-pub fn flushed_zeroize(reg: &mut [u8]) {
- reg.zeroize();
- flush(reg)
-}
-
/// Trait to check containment of one range within another.
pub(crate) trait RangeExt {
/// Returns true if `self` is contained within the `other` range.
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index 5108eb4..8d48098 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -40,7 +40,6 @@
use crate::dice::PartialInputs;
use crate::entry::RebootReason;
use crate::fdt::modify_for_next_stage;
-use crate::helpers::flush;
use crate::helpers::GUEST_PAGE_SIZE;
use crate::instance::get_or_generate_instance_salt;
use crate::memory::MEMORY;
@@ -55,6 +54,7 @@
use pvmfw_avb::Capability;
use pvmfw_avb::DebugLevel;
use pvmfw_embedded_key::PUBLIC_KEY;
+use vmbase::memory::flush;
const NEXT_BCC_SIZE: usize = GUEST_PAGE_SIZE;
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 1467611..44db85f 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -16,7 +16,7 @@
#![deny(unsafe_op_in_unsafe_fn)]
-use crate::helpers::{self, RangeExt, PVMFW_PAGE_SIZE};
+use crate::helpers::{RangeExt, PVMFW_PAGE_SIZE};
use aarch64_paging::idmap::IdMap;
use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
use aarch64_paging::MapError;
@@ -41,8 +41,8 @@
use vmbase::{
dsb, isb, layout,
memory::{
- page_4kb_of, set_dbm_enabled, MemorySharer, PageTable, MMIO_LAZY_MAP_FLAG, SIZE_2MB,
- SIZE_4KB, SIZE_4MB,
+ flush_dirty_range, is_leaf_pte, page_4kb_of, set_dbm_enabled, MemorySharer, PageTable,
+ MMIO_LAZY_MAP_FLAG, SIZE_2MB, SIZE_4KB, SIZE_4MB,
},
tlbi,
util::align_up,
@@ -439,17 +439,6 @@
Ok(())
}
-/// Checks whether a PTE at given level is a page or block descriptor.
-#[inline]
-fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
- const LEAF_PTE_LEVEL: usize = 3;
- if flags.contains(Attributes::TABLE_OR_PAGE) {
- level == LEAF_PTE_LEVEL
- } else {
- level < LEAF_PTE_LEVEL
- }
-}
-
/// Checks whether block flags indicate it should be MMIO guard mapped.
fn verify_lazy_mapped_block(
_range: &VaRange,
@@ -503,23 +492,6 @@
Ok(())
}
-/// Flushes a memory range the descriptor refers to, if the descriptor is in writable-dirty state.
-fn flush_dirty_range(
- va_range: &VaRange,
- desc: &mut Descriptor,
- level: usize,
-) -> result::Result<(), ()> {
- // Only flush ranges corresponding to dirty leaf PTEs.
- let flags = desc.flags().ok_or(())?;
- if !is_leaf_pte(&flags, level) {
- return Ok(());
- }
- if !flags.contains(Attributes::READ_ONLY) {
- helpers::flush_region(va_range.start().0, va_range.len());
- }
- Ok(())
-}
-
/// Clears read-only flag on a PTE, making it writable-dirty. Used when dirty state is managed
/// in software to handle permission faults on read-only descriptors.
fn mark_dirty_block(
diff --git a/vmbase/Android.bp b/vmbase/Android.bp
index 72be0b0..bda3796 100644
--- a/vmbase/Android.bp
+++ b/vmbase/Android.bp
@@ -69,6 +69,7 @@
"liblog_rust_nostd",
"libsmccc",
"libspin_nostd",
+ "libzeroize_nostd",
],
whole_static_libs: [
"librust_baremetal",
diff --git a/vmbase/src/memory/dbm.rs b/vmbase/src/memory/dbm.rs
index 3a52c2d..235c0e0 100644
--- a/vmbase/src/memory/dbm.rs
+++ b/vmbase/src/memory/dbm.rs
@@ -14,7 +14,10 @@
//! Hardware management of the access flag and dirty state.
+use super::page_table::is_leaf_pte;
+use super::util::flush_region;
use crate::{isb, read_sysreg, write_sysreg};
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion};
/// Sets whether the hardware management of access and dirty state is enabled with
/// the given boolean.
@@ -45,3 +48,23 @@
const DBM_AVAILABLE: usize = 1 << 1;
read_sysreg!("id_aa64mmfr1_el1") & DBM_AVAILABLE != 0
}
+
+/// Flushes a memory range the descriptor refers to, if the descriptor is in writable-dirty state.
+/// As the return type is required by the crate `aarch64_paging`, we cannot address the lint
+/// issue `clippy::result_unit_err`.
+#[allow(clippy::result_unit_err)]
+pub fn flush_dirty_range(
+ va_range: &MemoryRegion,
+ desc: &mut Descriptor,
+ level: usize,
+) -> Result<(), ()> {
+ // Only flush ranges corresponding to dirty leaf PTEs.
+ let flags = desc.flags().ok_or(())?;
+ if !is_leaf_pte(&flags, level) {
+ return Ok(());
+ }
+ if !flags.contains(Attributes::READ_ONLY) {
+ flush_region(va_range.start().0, va_range.len());
+ }
+ Ok(())
+}
diff --git a/vmbase/src/memory/mod.rs b/vmbase/src/memory/mod.rs
index 3b1b384..e5e0305 100644
--- a/vmbase/src/memory/mod.rs
+++ b/vmbase/src/memory/mod.rs
@@ -19,7 +19,10 @@
mod shared;
mod util;
-pub use dbm::set_dbm_enabled;
-pub use page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
+pub use dbm::{flush_dirty_range, set_dbm_enabled};
+pub use page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG};
pub use shared::MemorySharer;
-pub use util::{page_4kb_of, phys_to_virt, virt_to_phys, SIZE_2MB, SIZE_4KB, SIZE_4MB};
+pub use util::{
+ flush, flushed_zeroize, min_dcache_line_size, page_4kb_of, phys_to_virt, virt_to_phys,
+ SIZE_2MB, SIZE_4KB, SIZE_4MB,
+};
diff --git a/vmbase/src/memory/page_table.rs b/vmbase/src/memory/page_table.rs
index bc71e97..d3564b6 100644
--- a/vmbase/src/memory/page_table.rs
+++ b/vmbase/src/memory/page_table.rs
@@ -108,3 +108,14 @@
self.idmap.modify_range(&MemoryRegion::new(range.start, range.end), f)
}
}
+
+/// Checks whether a PTE at given level is a page or block descriptor.
+#[inline]
+pub fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
+ const LEAF_PTE_LEVEL: usize = 3;
+ if flags.contains(Attributes::TABLE_OR_PAGE) {
+ level == LEAF_PTE_LEVEL
+ } else {
+ level < LEAF_PTE_LEVEL
+ }
+}
diff --git a/vmbase/src/memory/util.rs b/vmbase/src/memory/util.rs
index 3186409..3739d28 100644
--- a/vmbase/src/memory/util.rs
+++ b/vmbase/src/memory/util.rs
@@ -14,8 +14,11 @@
//! Utility functions for memory management.
+use crate::read_sysreg;
use crate::util::unchecked_align_down;
+use core::arch::asm;
use core::ptr::NonNull;
+use zeroize::Zeroize;
/// The size of a 4KB memory in bytes.
pub const SIZE_4KB: usize = 4 << 10;
@@ -24,6 +27,51 @@
/// The size of a 4MB memory in bytes.
pub const SIZE_4MB: usize = 4 << 20;
+/// Reads the number of words in the smallest cache line of all the data caches and unified caches.
+#[inline]
+pub fn min_dcache_line_size() -> usize {
+ const DMINLINE_SHIFT: usize = 16;
+ const DMINLINE_MASK: usize = 0xf;
+ let ctr_el0 = read_sysreg!("ctr_el0");
+
+ // DminLine: log2 of the number of words in the smallest cache line of all the data caches.
+ let dminline = (ctr_el0 >> DMINLINE_SHIFT) & DMINLINE_MASK;
+
+ 1 << dminline
+}
+
+/// Flush `size` bytes of data cache by virtual address.
+#[inline]
+pub(super) fn flush_region(start: usize, size: usize) {
+ let line_size = min_dcache_line_size();
+ let end = start + size;
+ let start = unchecked_align_down(start, line_size);
+
+ for line in (start..end).step_by(line_size) {
+ // SAFETY - Clearing cache lines shouldn't have Rust-visible side effects.
+ unsafe {
+ asm!(
+ "dc cvau, {x}",
+ x = in(reg) line,
+ options(nomem, nostack, preserves_flags),
+ )
+ }
+ }
+}
+
+/// Flushes the slice to the point of unification.
+#[inline]
+pub fn flush(reg: &[u8]) {
+ flush_region(reg.as_ptr() as usize, reg.len())
+}
+
+/// Overwrites the slice with zeroes, to the point of unification.
+#[inline]
+pub fn flushed_zeroize(reg: &mut [u8]) {
+ reg.zeroize();
+ flush(reg)
+}
+
/// Computes the address of the 4KiB page containing a given address.
pub const fn page_4kb_of(addr: usize) -> usize {
unchecked_align_down(addr, SIZE_4KB)