pvmfw: Zero all scratch memory before guest runs
Zero any memory that could still hold secrets before executing the guest
OS, to reduce as much as possible the risk of leaking them.
Note that this only covers memory that can't be zeroed from high-level
compiled code (i.e. the .bss and .data sections and stack regions) and
doesn't zero the received configuration data, which contains the
BccHandover holding the secret CDIs as that is (and must still be)
zeroed from Rust.
Furthermore, no other region is flushed so data such as the DT or BCC
that must be made available to the guest OS (even if it doesn't
immediately re-enable the MMU) should still be flushed from Rust.
Remove unnecessary ISB in jump_to_payload().
Bug: 270684188
Test: atest MicrodroidHostTests
Change-Id: I8e923a468d1826c00ce1d0b07e1a91f5d2909f99
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 398c8df..4889c28 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -19,11 +19,14 @@
use crate::fdt;
use crate::heap;
use crate::helpers;
+use crate::helpers::RangeExt as _;
use crate::memory::{MemoryTracker, MEMORY};
use crate::mmu;
use crate::rand;
use core::arch::asm;
+use core::mem::size_of;
use core::num::NonZeroUsize;
+use core::ops::Range;
use core::slice;
use hyp::get_hypervisor;
use log::debug;
@@ -62,7 +65,7 @@
// - can't access MMIO (therefore, no logging)
match main_wrapper(fdt_address as usize, payload_start as usize, payload_size as usize) {
- Ok(entry) => jump_to_payload(fdt_address, entry.try_into().unwrap()),
+ Ok((entry, bcc)) => jump_to_payload(fdt_address, entry.try_into().unwrap(), bcc),
Err(_) => reboot(), // TODO(b/220071963) propagate the reason back to the host.
}
@@ -158,7 +161,11 @@
///
/// Provide the abstractions necessary for start() to abort the pVM boot and for main() to run with
/// the assumption that its environment has been properly configured.
-fn main_wrapper(fdt: usize, payload: usize, payload_size: usize) -> Result<usize, RebootReason> {
+fn main_wrapper(
+ fdt: usize,
+ payload: usize,
+ payload_size: usize,
+) -> Result<(usize, Range<usize>), RebootReason> {
// Limitations in this function:
// - only access MMIO once (and while) it has been mapped and configured
// - only perform logging once the logger has been initialized
@@ -226,7 +233,7 @@
})?;
// This wrapper allows main() to be blissfully ignorant of platform details.
- crate::main(
+ let next_bcc = crate::main(
slices.fdt,
slices.kernel,
slices.ramdisk,
@@ -249,10 +256,11 @@
})?;
MEMORY.lock().take().unwrap();
- Ok(slices.kernel.as_ptr() as usize)
+ Ok((slices.kernel.as_ptr() as usize, next_bcc))
}
-fn jump_to_payload(fdt_address: u64, payload_start: u64) -> ! {
+fn jump_to_payload(fdt_address: u64, payload_start: u64, bcc: Range<usize>) -> ! {
+ const ASM_STP_ALIGN: usize = size_of::<u64>() * 2;
const SCTLR_EL1_RES1: u64 = (0b11 << 28) | (0b101 << 20) | (0b1 << 11);
// Stage 1 instruction access cacheability is unaffected.
const SCTLR_EL1_I: u64 = 0b1 << 12;
@@ -263,12 +271,67 @@
const SCTLR_EL1_VAL: u64 = SCTLR_EL1_RES1 | SCTLR_EL1_ITD | SCTLR_EL1_SED | SCTLR_EL1_I;
+ let scratch = layout::scratch_range();
+
+ assert_ne!(scratch.len(), 0, "scratch memory is empty.");
+ assert_eq!(scratch.start % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
+ assert_eq!(scratch.end % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
+
+ assert!(bcc.is_within(&scratch));
+ assert_eq!(bcc.start % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
+ assert_eq!(bcc.end % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
+
+ let stack = mmu::stack_range();
+
+ assert_ne!(stack.len(), 0, "stack region is empty.");
+ assert_eq!(stack.start % ASM_STP_ALIGN, 0, "Misaligned stack region.");
+ assert_eq!(stack.end % ASM_STP_ALIGN, 0, "Misaligned stack region.");
+
+ // Zero all memory that could hold secrets and that can't be safely written to from Rust.
// Disable the exception vector, caches and page table and then jump to the payload at the
// given address, passing it the given FDT pointer.
//
// SAFETY - We're exiting pvmfw by passing the register values we need to a noreturn asm!().
unsafe {
asm!(
+ "cmp {scratch}, {bcc}",
+ "b.hs 1f",
+
+ // Zero .data & .bss until BCC.
+ "0: stp xzr, xzr, [{scratch}], 16",
+ "cmp {scratch}, {bcc}",
+ "b.lo 0b",
+
+ "1:",
+ // Skip BCC.
+ "mov {scratch}, {bcc_end}",
+ "cmp {scratch}, {scratch_end}",
+ "b.hs 1f",
+
+ // Keep zeroing .data & .bss.
+ "0: stp xzr, xzr, [{scratch}], 16",
+ "cmp {scratch}, {scratch_end}",
+ "b.lo 0b",
+
+ "1:",
+ // Flush d-cache over .data & .bss (including BCC).
+ "0: dc cvau, {cache_line}",
+ "add {cache_line}, {cache_line}, {dcache_line_size}",
+ "cmp {cache_line}, {scratch_end}",
+ "b.lo 0b",
+
+ "mov {cache_line}, {stack}",
+ // Zero stack region.
+ "0: stp xzr, xzr, [{stack}], 16",
+ "cmp {stack}, {stack_end}",
+ "b.lo 0b",
+
+ // Flush d-cache over stack region.
+ "0: dc cvau, {cache_line}",
+ "add {cache_line}, {cache_line}, {dcache_line_size}",
+ "cmp {cache_line}, {stack_end}",
+ "b.lo 0b",
+
"msr sctlr_el1, {sctlr_el1_val}",
"isb",
"mov x1, xzr",
@@ -301,13 +364,21 @@
"mov x28, xzr",
"mov x29, xzr",
"msr ttbr0_el1, xzr",
- "isb",
+ // Ensure that CMOs have completed before entering payload.
"dsb nsh",
"br x30",
sctlr_el1_val = in(reg) SCTLR_EL1_VAL,
+ bcc = in(reg) u64::try_from(bcc.start).unwrap(),
+ bcc_end = in(reg) u64::try_from(bcc.end).unwrap(),
+ cache_line = in(reg) u64::try_from(scratch.start).unwrap(),
+ scratch = in(reg) u64::try_from(scratch.start).unwrap(),
+ scratch_end = in(reg) u64::try_from(scratch.end).unwrap(),
+ stack = in(reg) u64::try_from(stack.start).unwrap(),
+ stack_end = in(reg) u64::try_from(stack.end).unwrap(),
+ dcache_line_size = in(reg) u64::try_from(helpers::min_dcache_line_size()).unwrap(),
in("x0") fdt_address,
in("x30") payload_start,
- options(nomem, noreturn, nostack),
+ options(noreturn),
);
};
}
diff --git a/pvmfw/src/helpers.rs b/pvmfw/src/helpers.rs
index 933a6aa..a6f0dd5 100644
--- a/pvmfw/src/helpers.rs
+++ b/pvmfw/src/helpers.rs
@@ -112,7 +112,8 @@
}
#[inline]
-fn min_dcache_line_size() -> usize {
+/// Read the number of words in the smallest cache line of all the data caches and unified caches.
+pub fn min_dcache_line_size() -> usize {
const DMINLINE_SHIFT: usize = 16;
const DMINLINE_MASK: usize = 0xf;
let ctr_el0 = read_sysreg!("ctr_el0");
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index 1c22861..96b707e 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -38,6 +38,7 @@
use alloc::boxed::Box;
use alloc::string::ToString;
+use core::ops::Range;
use crate::dice::PartialInputs;
use crate::entry::RebootReason;
@@ -80,7 +81,7 @@
current_bcc_handover: &[u8],
debug_policy: Option<&mut [u8]>,
memory: &mut MemoryTracker,
-) -> Result<(), RebootReason> {
+) -> Result<Range<usize>, RebootReason> {
info!("pVM firmware");
debug!("FDT: {:?}", fdt.as_ptr());
debug!("Signed kernel: {:?} ({:#x} bytes)", signed_kernel.as_ptr(), signed_kernel.len());
@@ -156,7 +157,13 @@
})?;
info!("Starting payload...");
- Ok(())
+
+ let bcc_range = {
+ let r = next_bcc.as_ptr_range();
+ (r.start as usize)..(r.end as usize)
+ };
+
+ Ok(bcc_range)
}
/// Logs the given PCI error and returns the appropriate `RebootReason`.
diff --git a/pvmfw/src/mmu.rs b/pvmfw/src/mmu.rs
index c21b69b..e33dcba 100644
--- a/pvmfw/src/mmu.rs
+++ b/pvmfw/src/mmu.rs
@@ -46,7 +46,7 @@
}
/// Region allocated for the stack.
-fn stack_range() -> Range<usize> {
+pub fn stack_range() -> Range<usize> {
const STACK_PAGES: usize = 8;
layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)