Merge "[avb] Reorganizing the modules in avb/descriptor"
diff --git a/libs/libfdt/Android.bp b/libs/libfdt/Android.bp
index 72399b0..5a729f1 100644
--- a/libs/libfdt/Android.bp
+++ b/libs/libfdt/Android.bp
@@ -37,6 +37,7 @@
],
rustlibs: [
"liblibfdt_bindgen",
+ "libzerocopy_nostd",
],
whole_static_libs: [
"libfdt",
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index 9785941..df1058e 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -26,6 +26,7 @@
use core::fmt;
use core::mem;
use core::result;
+use zerocopy::AsBytes as _;
/// Error type corresponding to libfdt error codes.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
@@ -441,6 +442,13 @@
fdt_err_expect_zero(ret)
}
+ /// Replace the value of the given (address, size) pair property with the given value, and
+ /// ensure that the given value has the same length as the current value length
+ pub fn setprop_addrrange_inplace(&mut self, name: &CStr, addr: u64, size: u64) -> Result<()> {
+ let pair = [addr.to_be(), size.to_be()];
+ self.setprop_inplace(name, pair.as_bytes())
+ }
+
/// Create or change a flag-like empty property.
pub fn setprop_empty(&mut self, name: &CStr) -> Result<()> {
self.setprop(name, &[])
@@ -459,6 +467,17 @@
fdt_err_expect_zero(ret)
}
+ /// Overwrite the given property with FDT_NOP, effectively removing it from the DT.
+ pub fn nop_property(&mut self, name: &CStr) -> Result<()> {
+ // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) when the
+ // library locates the node's property.
+ let ret = unsafe {
+ libfdt_bindgen::fdt_nop_property(self.fdt.as_mut_ptr(), self.offset, name.as_ptr())
+ };
+
+ fdt_err_expect_zero(ret)
+ }
+
/// Reduce the size of the given property to new_size
pub fn trimprop(&mut self, name: &CStr, new_size: usize) -> Result<()> {
let (prop, len) =
diff --git a/pvmfw/platform.dts b/pvmfw/platform.dts
index a7b1de7..74439d9 100644
--- a/pvmfw/platform.dts
+++ b/pvmfw/platform.dts
@@ -37,6 +37,7 @@
ranges;
swiotlb: restricted_dma_reserved {
compatible = "restricted-dma-pool";
+ reg = <PLACEHOLDER4>;
size = <PLACEHOLDER2>;
alignment = <PLACEHOLDER2>;
};
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index d172474..4d2d696 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -250,14 +250,7 @@
})?;
// This wrapper allows main() to be blissfully ignorant of platform details.
- let next_bcc = crate::main(
- slices.fdt,
- slices.kernel,
- slices.ramdisk,
- bcc_slice,
- debug_policy,
- MEMORY.lock().as_mut().unwrap(),
- )?;
+ let next_bcc = crate::main(slices.fdt, slices.kernel, slices.ramdisk, bcc_slice, debug_policy)?;
helpers::flushed_zeroize(bcc_slice);
diff --git a/pvmfw/src/exceptions.rs b/pvmfw/src/exceptions.rs
index a6ac4fe..39641b0 100644
--- a/pvmfw/src/exceptions.rs
+++ b/pvmfw/src/exceptions.rs
@@ -14,6 +14,7 @@
//! Exception handlers.
+use crate::memory::{MemoryTrackerError, MEMORY};
use crate::{helpers::page_4kb_of, read_sysreg};
use core::fmt;
use vmbase::console;
@@ -24,12 +25,24 @@
#[derive(Debug)]
enum HandleExceptionError {
+ PageTableUnavailable,
+ PageTableNotInitialized,
+ InternalError(MemoryTrackerError),
UnknownException,
}
+impl From<MemoryTrackerError> for HandleExceptionError {
+ fn from(other: MemoryTrackerError) -> Self {
+ Self::InternalError(other)
+ }
+}
+
impl fmt::Display for HandleExceptionError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
+ Self::PageTableUnavailable => write!(f, "Page table is not available."),
+ Self::PageTableNotInitialized => write!(f, "Page table is not initialized."),
+ Self::InternalError(e) => write!(f, "Error while updating page table: {e}"),
Self::UnknownException => write!(f, "An unknown exception occurred, not handled."),
}
}
@@ -76,8 +89,17 @@
}
}
-fn handle_exception(_esr: Esr, _far: usize) -> Result<(), HandleExceptionError> {
- Err(HandleExceptionError::UnknownException)
+fn handle_exception(esr: Esr, far: usize) -> Result<(), HandleExceptionError> {
+ // Handle all translation faults on both read and write, and MMIO guard map
+ // flagged invalid pages or blocks that caused the exception.
+ match esr {
+ Esr::DataAbortTranslationFault => {
+ let mut locked = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
+ let memory = locked.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
+ Ok(memory.handle_mmio_fault(far)?)
+ }
+ _ => Err(HandleExceptionError::UnknownException),
+ }
}
#[inline]
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index 98802b4..c0241ca 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -440,7 +440,7 @@
pub struct SwiotlbInfo {
addr: Option<usize>,
size: usize,
- align: usize,
+ align: Option<usize>,
}
impl SwiotlbInfo {
@@ -452,17 +452,16 @@
fn read_swiotlb_info_from(fdt: &Fdt) -> libfdt::Result<SwiotlbInfo> {
let node =
fdt.compatible_nodes(cstr!("restricted-dma-pool"))?.next().ok_or(FdtError::NotFound)?;
- let align =
- node.getprop_u64(cstr!("alignment"))?.ok_or(FdtError::NotFound)?.try_into().unwrap();
- let (addr, size) = if let Some(mut reg) = node.reg()? {
+ let (addr, size, align) = if let Some(mut reg) = node.reg()? {
let reg = reg.next().ok_or(FdtError::NotFound)?;
let size = reg.size.ok_or(FdtError::NotFound)?;
reg.addr.checked_add(size).ok_or(FdtError::BadValue)?;
- (Some(reg.addr.try_into().unwrap()), size.try_into().unwrap())
+ (Some(reg.addr.try_into().unwrap()), size.try_into().unwrap(), None)
} else {
- let size = node.getprop_u64(cstr!("size"))?.ok_or(FdtError::NotFound)?.try_into().unwrap();
- (None, size)
+ let size = node.getprop_u64(cstr!("size"))?.ok_or(FdtError::NotFound)?;
+ let align = node.getprop_u64(cstr!("alignment"))?.ok_or(FdtError::NotFound)?;
+ (None, size.try_into().unwrap(), Some(align.try_into().unwrap()))
};
Ok(SwiotlbInfo { addr, size, align })
@@ -480,7 +479,7 @@
return Err(RebootReason::InvalidFdt);
}
- if (align % GUEST_PAGE_SIZE) != 0 {
+ if let Some(align) = align.filter(|&a| a % GUEST_PAGE_SIZE != 0) {
error!("Invalid swiotlb alignment {:#x}", align);
return Err(RebootReason::InvalidFdt);
}
@@ -498,16 +497,19 @@
fn patch_swiotlb_info(fdt: &mut Fdt, swiotlb_info: &SwiotlbInfo) -> libfdt::Result<()> {
let mut node =
fdt.root_mut()?.next_compatible(cstr!("restricted-dma-pool"))?.ok_or(FdtError::NotFound)?;
- node.setprop_inplace(cstr!("alignment"), &swiotlb_info.align.to_be_bytes())?;
if let Some(range) = swiotlb_info.fixed_range() {
- node.appendprop_addrrange(
+ node.setprop_addrrange_inplace(
cstr!("reg"),
range.start.try_into().unwrap(),
range.len().try_into().unwrap(),
)?;
+ node.nop_property(cstr!("size"))?;
+ node.nop_property(cstr!("alignment"))?;
} else {
+ node.nop_property(cstr!("reg"))?;
node.setprop_inplace(cstr!("size"), &swiotlb_info.size.to_be_bytes())?;
+ node.setprop_inplace(cstr!("alignment"), &swiotlb_info.align.unwrap().to_be_bytes())?;
}
Ok(())
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index 3c0acc7..fdc9407 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -44,7 +44,7 @@
use crate::helpers::flush;
use crate::helpers::GUEST_PAGE_SIZE;
use crate::instance::get_or_generate_instance_salt;
-use crate::memory::MemoryTracker;
+use crate::memory::MEMORY;
use crate::virtio::pci;
use alloc::boxed::Box;
use core::ops::Range;
@@ -64,7 +64,6 @@
ramdisk: Option<&[u8]>,
current_bcc_handover: &[u8],
mut debug_policy: Option<&mut [u8]>,
- memory: &mut MemoryTracker,
) -> Result<Range<usize>, RebootReason> {
info!("pVM firmware");
debug!("FDT: {:?}", fdt.as_ptr());
@@ -99,7 +98,7 @@
// Set up PCI bus for VirtIO devices.
let pci_info = PciInfo::from_fdt(fdt).map_err(handle_pci_error)?;
debug!("PCI: {:#x?}", pci_info);
- let mut pci_root = pci::initialise(pci_info, memory)?;
+ let mut pci_root = pci::initialise(pci_info, MEMORY.lock().as_mut().unwrap())?;
let verified_boot_data = verify_payload(signed_kernel, ramdisk, PUBLIC_KEY).map_err(|e| {
error!("Failed to verify the payload: {e}");
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 0656321..1a2b4b7 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -16,8 +16,9 @@
#![deny(unsafe_op_in_unsafe_fn)]
-use crate::helpers::{self, page_4kb_of, RangeExt, SIZE_4KB, SIZE_4MB};
+use crate::helpers::{self, page_4kb_of, RangeExt, PVMFW_PAGE_SIZE, SIZE_4MB};
use crate::mmu;
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
use alloc::alloc::alloc_zeroed;
use alloc::alloc::dealloc;
use alloc::alloc::handle_alloc_error;
@@ -110,12 +111,16 @@
Overlaps,
/// Region couldn't be mapped.
FailedToMap,
+ /// Region couldn't be unmapped.
+ FailedToUnmap,
/// Error from the interaction with the hypervisor.
Hypervisor(hyp::Error),
/// Failure to set `SHARED_MEMORY`.
SharedMemorySetFailure,
/// Failure to set `SHARED_POOL`.
SharedPoolSetFailure,
+ /// Invalid page table entry.
+ InvalidPte,
}
impl fmt::Display for MemoryTrackerError {
@@ -128,9 +133,11 @@
Self::OutOfRange => write!(f, "Region is out of the tracked memory address space"),
Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
Self::FailedToMap => write!(f, "Failed to map the new region"),
+ Self::FailedToUnmap => write!(f, "Failed to unmap the new region"),
Self::Hypervisor(e) => e.fmt(f),
Self::SharedMemorySetFailure => write!(f, "Failed to set SHARED_MEMORY"),
Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"),
+ Self::InvalidPte => write!(f, "Page table entry is not valid"),
}
}
}
@@ -279,15 +286,11 @@
return Err(MemoryTrackerError::Full);
}
- self.page_table.map_device(&range).map_err(|e| {
+ self.page_table.map_device_lazy(&range).map_err(|e| {
error!("Error during MMIO device mapping: {e}");
MemoryTrackerError::FailedToMap
})?;
- for page_base in page_iterator(&range) {
- get_hypervisor().mmio_guard_map(page_base)?;
- }
-
if self.mmio_regions.try_push(range).is_some() {
return Err(MemoryTrackerError::Full);
}
@@ -322,13 +325,12 @@
/// Unmaps all tracked MMIO regions from the MMIO guard.
///
/// Note that they are not unmapped from the page table.
- pub fn mmio_unmap_all(&self) -> Result<()> {
- for region in &self.mmio_regions {
- for page_base in page_iterator(region) {
- get_hypervisor().mmio_guard_unmap(page_base)?;
- }
+ pub fn mmio_unmap_all(&mut self) -> Result<()> {
+ for range in &self.mmio_regions {
+ self.page_table
+ .modify_range(range, &mmio_guard_unmap_page)
+ .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
}
-
Ok(())
}
@@ -372,6 +374,18 @@
pub fn unshare_all_memory(&mut self) {
drop(SHARED_MEMORY.lock().take());
}
+
+ /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
+ /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
+ pub fn handle_mmio_fault(&mut self, addr: usize) -> Result<()> {
+ let page_range = page_4kb_of(addr)..page_4kb_of(addr) + PVMFW_PAGE_SIZE;
+ self.page_table
+ .modify_range(&page_range, &verify_lazy_mapped_block)
+ .map_err(|_| MemoryTrackerError::InvalidPte)?;
+ get_hypervisor().mmio_guard_map(page_range.start)?;
+ // Maps a single device page, breaking up block mappings if necessary.
+ self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
+ }
}
impl Drop for MemoryTracker {
@@ -429,11 +443,6 @@
Ok(())
}
-/// Returns an iterator which yields the base address of each 4 KiB page within the given range.
-fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> {
- (page_4kb_of(range.start)..range.end).step_by(SIZE_4KB)
-}
-
/// Returns the intermediate physical address corresponding to the given virtual address.
///
/// As we use identity mapping for everything, this is just a cast, but it's useful to use it to be
@@ -449,3 +458,64 @@
pub fn phys_to_virt(paddr: usize) -> NonNull<u8> {
NonNull::new(paddr as _).unwrap()
}
+
+/// Checks whether a PTE at given level is a page or block descriptor.
+#[inline]
+fn is_leaf_pte(flags: &Attributes, level: usize) -> bool {
+ const LEAF_PTE_LEVEL: usize = 3;
+ if flags.contains(Attributes::TABLE_OR_PAGE) {
+ level == LEAF_PTE_LEVEL
+ } else {
+ level < LEAF_PTE_LEVEL
+ }
+}
+
+/// Checks whether block flags indicate it should be MMIO guard mapped.
+fn verify_lazy_mapped_block(
+ _range: &VaRange,
+ desc: &mut Descriptor,
+ level: usize,
+) -> result::Result<(), ()> {
+ let flags = desc.flags().expect("Unsupported PTE flags set");
+ if !is_leaf_pte(&flags, level) {
+ return Ok(()); // Skip table PTEs as they aren't tagged with MMIO_LAZY_MAP_FLAG.
+ }
+ if flags.contains(mmu::MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
+ Ok(())
+ } else {
+ Err(())
+ }
+}
+
+/// MMIO guard unmaps page
+fn mmio_guard_unmap_page(
+ va_range: &VaRange,
+ desc: &mut Descriptor,
+ level: usize,
+) -> result::Result<(), ()> {
+ let flags = desc.flags().expect("Unsupported PTE flags set");
+ // This function will be called on an address range that corresponds to a device. Only if a
+ // page has been accessed (written to or read from), will it contain the VALID flag and be MMIO
+ // guard mapped. Therefore, we can skip unmapping invalid pages, they were never MMIO guard
+ // mapped anyway.
+ if is_leaf_pte(&flags, level) && flags.contains(Attributes::VALID) {
+ assert!(
+ flags.contains(mmu::MMIO_LAZY_MAP_FLAG),
+ "Attempting MMIO guard unmap for non-device pages"
+ );
+ assert_eq!(
+ va_range.len(),
+ PVMFW_PAGE_SIZE,
+ "Failed to break down block mapping before MMIO guard mapping"
+ );
+ let page_base = va_range.start().0;
+ assert_eq!(page_base % PVMFW_PAGE_SIZE, 0);
+ // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
+ // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
+ // virt_to_phys here, and just pass page_base instead.
+ get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
+ error!("Error MMIO guard unmapping: {e}");
+ })?;
+ }
+ Ok(())
+}
diff --git a/pvmfw/src/mmu.rs b/pvmfw/src/mmu.rs
index da8c4b5..ed9b209 100644
--- a/pvmfw/src/mmu.rs
+++ b/pvmfw/src/mmu.rs
@@ -17,19 +17,22 @@
use crate::helpers;
use crate::helpers::PVMFW_PAGE_SIZE;
use aarch64_paging::idmap::IdMap;
-use aarch64_paging::paging::Attributes;
-use aarch64_paging::paging::MemoryRegion;
+use aarch64_paging::paging::{Attributes, MemoryRegion, PteUpdater};
use aarch64_paging::MapError;
use core::ops::Range;
use vmbase::layout;
+/// Software bit used to indicate a device that should be lazily mapped.
+pub const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
+
// We assume that:
// - MAIR_EL1.Attr0 = "Device-nGnRE memory" (0b0000_0100)
// - MAIR_EL1.Attr1 = "Normal memory, Outer & Inner WB Non-transient, R/W-Allocate" (0b1111_1111)
const MEMORY: Attributes =
- Attributes::NORMAL.union(Attributes::NON_GLOBAL).union(Attributes::VALID);
-const DEVICE: Attributes =
- Attributes::DEVICE_NGNRE.union(Attributes::EXECUTE_NEVER).union(Attributes::VALID);
+ Attributes::VALID.union(Attributes::NORMAL).union(Attributes::NON_GLOBAL);
+const DEVICE_LAZY: Attributes =
+ MMIO_LAZY_MAP_FLAG.union(Attributes::DEVICE_NGNRE).union(Attributes::EXECUTE_NEVER);
+const DEVICE: Attributes = DEVICE_LAZY.union(Attributes::VALID);
const CODE: Attributes = MEMORY.union(Attributes::READ_ONLY);
const DATA: Attributes = MEMORY.union(Attributes::EXECUTE_NEVER);
const RODATA: Attributes = DATA.union(Attributes::READ_ONLY);
@@ -75,6 +78,10 @@
self.idmap.activate()
}
+ pub fn map_device_lazy(&mut self, range: &Range<usize>) -> Result<(), MapError> {
+ self.map_range(range, DEVICE_LAZY)
+ }
+
pub fn map_device(&mut self, range: &Range<usize>) -> Result<(), MapError> {
self.map_range(range, DEVICE)
}
@@ -94,4 +101,8 @@
fn map_range(&mut self, range: &Range<usize>, attr: Attributes) -> Result<(), MapError> {
self.idmap.map_range(&MemoryRegion::new(range.start, range.end), attr)
}
+
+ pub fn modify_range(&mut self, range: &Range<usize>, f: &PteUpdater) -> Result<(), MapError> {
+ self.idmap.modify_range(&MemoryRegion::new(range.start, range.end), f)
+ }
}