Merge changes from topic ",microdroid-precompiled-sepolicy-on-system"
* changes:
Remove vendor partition
Move precompiled policy to /system partition
Remove vendor_manifest.xml and vendor_compatibility_matrix.xml
Move fstab.microdroid to /system partition
diff --git a/pvmfw/src/crypto.rs b/pvmfw/src/crypto.rs
index d607bee..3d9c8d1 100644
--- a/pvmfw/src/crypto.rs
+++ b/pvmfw/src/crypto.rs
@@ -21,8 +21,6 @@
use core::num::NonZeroU32;
use core::ptr;
-use crate::cstr;
-
use bssl_ffi::CRYPTO_library_init;
use bssl_ffi::ERR_get_error_line;
use bssl_ffi::ERR_lib_error_string;
@@ -37,6 +35,7 @@
use bssl_ffi::EVP_AEAD;
use bssl_ffi::EVP_AEAD_CTX;
use bssl_ffi::HKDF;
+use vmbase::cstr;
#[derive(Debug)]
pub struct Error {
diff --git a/pvmfw/src/dice.rs b/pvmfw/src/dice.rs
index 3116456..fbab013 100644
--- a/pvmfw/src/dice.rs
+++ b/pvmfw/src/dice.rs
@@ -14,17 +14,16 @@
//! Support for DICE derivation and BCC generation.
-use crate::cstr;
use core::ffi::c_void;
use core::mem::size_of;
use core::slice;
-use vmbase::memory::flushed_zeroize;
-
use diced_open_dice::{
bcc_format_config_descriptor, bcc_handover_main_flow, hash, Config, DiceMode, Hash,
InputValues, HIDDEN_SIZE,
};
use pvmfw_avb::{DebugLevel, Digest, VerifiedBootData};
+use vmbase::cstr;
+use vmbase::memory::flushed_zeroize;
fn to_dice_mode(debug_level: DebugLevel) -> DiceMode {
match debug_level {
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 3a870ab..0d2dfda 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -116,7 +116,11 @@
})?;
if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
- MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool().map_err(|e| {
+ let granule = get_hypervisor().memory_protection_granule().map_err(|e| {
+ error!("Failed to get memory protection granule: {e}");
+ RebootReason::InternalError
+ })?;
+ MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
error!("Failed to initialize dynamically shared pool: {e}");
RebootReason::InternalError
})?;
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index ab851a1..8310504 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -15,7 +15,6 @@
//! High-level FDT functions.
use crate::bootargs::BootArgsIterator;
-use crate::cstr;
use crate::helpers::GUEST_PAGE_SIZE;
use crate::Box;
use crate::RebootReason;
@@ -24,6 +23,7 @@
use core::cmp::max;
use core::cmp::min;
use core::ffi::CStr;
+use core::fmt;
use core::mem::size_of;
use core::ops::Range;
use fdtpci::PciMemoryFlags;
@@ -38,11 +38,28 @@
use log::info;
use log::warn;
use tinyvec::ArrayVec;
+use vmbase::cstr;
+use vmbase::fdt::SwiotlbInfo;
use vmbase::layout::{crosvm::MEM_START, MAX_VIRT_ADDR};
use vmbase::memory::SIZE_4KB;
use vmbase::util::flatten;
use vmbase::util::RangeExt as _;
+/// An enumeration of errors that can occur during the FDT validation.
+#[derive(Clone, Debug)]
+pub enum FdtValidationError {
+ /// Invalid CPU count.
+ InvalidCpuCount(usize),
+}
+
+impl fmt::Display for FdtValidationError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Self::InvalidCpuCount(num_cpus) => write!(f, "Invalid CPU count: {num_cpus}"),
+ }
+ }
+}
+
/// Extract from /config the address range containing the pre-loaded kernel. Absence of /config is
/// not an error.
fn read_kernel_range_from(fdt: &Fdt) -> libfdt::Result<Option<Range<usize>>> {
@@ -140,16 +157,12 @@
}
/// Validate number of CPUs
-fn validate_num_cpus(num_cpus: usize) -> Result<(), RebootReason> {
- if num_cpus == 0 {
- error!("Number of CPU can't be 0");
- return Err(RebootReason::InvalidFdt);
+fn validate_num_cpus(num_cpus: usize) -> Result<(), FdtValidationError> {
+ if num_cpus == 0 || DeviceTreeInfo::gic_patched_size(num_cpus).is_none() {
+ Err(FdtValidationError::InvalidCpuCount(num_cpus))
+ } else {
+ Ok(())
}
- if DeviceTreeInfo::GIC_REDIST_SIZE_PER_CPU.checked_mul(num_cpus.try_into().unwrap()).is_none() {
- error!("Too many CPUs for gic: {}", num_cpus);
- return Err(RebootReason::InvalidFdt);
- }
- Ok(())
}
/// Patch DT by keeping `num_cpus` number of arm,arm-v8 compatible nodes, and pruning the rest.
@@ -430,37 +443,6 @@
Ok(())
}
-#[derive(Debug)]
-pub struct SwiotlbInfo {
- addr: Option<usize>,
- size: usize,
- align: Option<usize>,
-}
-
-impl SwiotlbInfo {
- pub fn fixed_range(&self) -> Option<Range<usize>> {
- self.addr.map(|addr| addr..addr + self.size)
- }
-}
-
-fn read_swiotlb_info_from(fdt: &Fdt) -> libfdt::Result<SwiotlbInfo> {
- let node =
- fdt.compatible_nodes(cstr!("restricted-dma-pool"))?.next().ok_or(FdtError::NotFound)?;
-
- let (addr, size, align) = if let Some(mut reg) = node.reg()? {
- let reg = reg.next().ok_or(FdtError::NotFound)?;
- let size = reg.size.ok_or(FdtError::NotFound)?;
- reg.addr.checked_add(size).ok_or(FdtError::BadValue)?;
- (Some(reg.addr.try_into().unwrap()), size.try_into().unwrap(), None)
- } else {
- let size = node.getprop_u64(cstr!("size"))?.ok_or(FdtError::NotFound)?;
- let align = node.getprop_u64(cstr!("alignment"))?.ok_or(FdtError::NotFound)?;
- (None, size.try_into().unwrap(), Some(align.try_into().unwrap()))
- };
-
- Ok(SwiotlbInfo { addr, size, align })
-}
-
fn validate_swiotlb_info(
swiotlb_info: &SwiotlbInfo,
memory: &Range<usize>,
@@ -478,6 +460,12 @@
return Err(RebootReason::InvalidFdt);
}
+ if let Some(addr) = swiotlb_info.addr {
+ if addr.checked_add(size).is_none() {
+ error!("Invalid swiotlb range: addr:{addr:#x} size:{size:#x}");
+ return Err(RebootReason::InvalidFdt);
+ }
+ }
if let Some(range) = swiotlb_info.fixed_range() {
if !range.is_within(memory) {
error!("swiotlb range {range:#x?} not part of memory range {memory:#x?}");
@@ -516,9 +504,8 @@
let mut range1 = ranges.next().ok_or(FdtError::NotFound)?;
let addr = range0.addr;
- // SAFETY - doesn't overflow. checked in validate_num_cpus
- let size: u64 =
- DeviceTreeInfo::GIC_REDIST_SIZE_PER_CPU.checked_mul(num_cpus.try_into().unwrap()).unwrap();
+ // `validate_num_cpus()` checked that this wouldn't panic
+ let size = u64::try_from(DeviceTreeInfo::gic_patched_size(num_cpus).unwrap()).unwrap();
// range1 is just below range0
range1.addr = addr - size;
@@ -581,7 +568,11 @@
}
impl DeviceTreeInfo {
- const GIC_REDIST_SIZE_PER_CPU: u64 = (32 * SIZE_4KB) as u64;
+ fn gic_patched_size(num_cpus: usize) -> Option<usize> {
+ const GIC_REDIST_SIZE_PER_CPU: usize = 32 * SIZE_4KB;
+
+ GIC_REDIST_SIZE_PER_CPU.checked_mul(num_cpus)
+ }
}
pub fn sanitize_device_tree(fdt: &mut Fdt) -> Result<DeviceTreeInfo, RebootReason> {
@@ -623,7 +614,10 @@
error!("Failed to read num cpus from DT: {e}");
RebootReason::InvalidFdt
})?;
- validate_num_cpus(num_cpus)?;
+ validate_num_cpus(num_cpus).map_err(|e| {
+ error!("Failed to validate num cpus from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
let pci_info = read_pci_info_from(fdt).map_err(|e| {
error!("Failed to read pci info from DT: {e}");
@@ -636,7 +630,7 @@
RebootReason::InvalidFdt
})?;
- let swiotlb_info = read_swiotlb_info_from(fdt).map_err(|e| {
+ let swiotlb_info = SwiotlbInfo::new_from_fdt(fdt).map_err(|e| {
error!("Failed to read swiotlb info from DT: {e}");
RebootReason::InvalidFdt
})?;
diff --git a/pvmfw/src/helpers.rs b/pvmfw/src/helpers.rs
index 5ad721e..8981408 100644
--- a/pvmfw/src/helpers.rs
+++ b/pvmfw/src/helpers.rs
@@ -18,11 +18,3 @@
pub const GUEST_PAGE_SIZE: usize = SIZE_4KB;
pub const PVMFW_PAGE_SIZE: usize = PAGE_SIZE;
-
-/// Create &CStr out of &str literal
-#[macro_export]
-macro_rules! cstr {
- ($str:literal) => {{
- core::ffi::CStr::from_bytes_with_nul(concat!($str, "\0").as_bytes()).unwrap()
- }};
-}
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index 578d7a0..29056f1 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -25,11 +25,14 @@
use crate::error::{Error, Result};
use buddy_system_allocator::LockedHeap;
use core::num::NonZeroUsize;
+use core::result;
use core::slice;
use fdtpci::PciInfo;
-use hyp::get_hypervisor;
+use hyp::{get_hypervisor, HypervisorCap, KvmError};
+use libfdt::FdtError;
use log::{debug, error, info};
use vmbase::{
+ fdt::SwiotlbInfo,
layout::{self, crosvm},
main,
memory::{MemoryTracker, PageTable, MEMORY, PAGE_SIZE},
@@ -111,9 +114,35 @@
error!("Failed to use memory range value from DT: {memory_range:#x?}");
e
})?;
+
+ if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
+ let granule = memory_protection_granule()?;
+ MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
+ error!("Failed to initialize dynamically shared pool.");
+ e
+ })?;
+ } else {
+ let range = SwiotlbInfo::new_from_fdt(fdt)?.fixed_range().ok_or_else(|| {
+ error!("Pre-shared pool range not specified in swiotlb node");
+ Error::from(FdtError::BadValue)
+ })?;
+ MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).map_err(|e| {
+ error!("Failed to initialize pre-shared pool.");
+ e
+ })?;
+ }
Ok(())
}
+fn memory_protection_granule() -> result::Result<usize, hyp::Error> {
+ match get_hypervisor().memory_protection_granule() {
+ Ok(granule) => Ok(granule),
+ // Take the default page size when KVM call is not supported in non-protected VMs.
+ Err(hyp::Error::KvmError(KvmError::NotSupported, _)) => Ok(PAGE_SIZE),
+ Err(e) => Err(e),
+ }
+}
+
fn try_unshare_all_memory(mmio_guard_supported: bool) -> Result<()> {
info!("Starting unsharing memory...");
diff --git a/vmbase/Android.bp b/vmbase/Android.bp
index 6db9ff8..0ef47db 100644
--- a/vmbase/Android.bp
+++ b/vmbase/Android.bp
@@ -67,6 +67,7 @@
"libbuddy_system_allocator",
"libfdtpci",
"libhyp",
+ "liblibfdt",
"liblog_rust_nostd",
"libonce_cell_nostd",
"libsmccc",
diff --git a/vmbase/example/src/pci.rs b/vmbase/example/src/pci.rs
index 41a3ff4..384a9c1 100644
--- a/vmbase/example/src/pci.rs
+++ b/vmbase/example/src/pci.rs
@@ -15,7 +15,7 @@
//! Functions to scan the PCI bus for VirtIO device.
use aarch64_paging::paging::MemoryRegion;
-use alloc::alloc::{alloc, dealloc, handle_alloc_error, Layout};
+use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error, Layout};
use core::{mem::size_of, ptr::NonNull};
use fdtpci::PciInfo;
use log::{debug, info};
@@ -103,7 +103,7 @@
debug!("dma_alloc: pages={}", pages);
let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
// Safe because the layout has a non-zero size.
- let vaddr = unsafe { alloc(layout) };
+ let vaddr = unsafe { alloc_zeroed(layout) };
let vaddr =
if let Some(vaddr) = NonNull::new(vaddr) { vaddr } else { handle_alloc_error(layout) };
let paddr = virt_to_phys(vaddr);
diff --git a/vmbase/src/fdt.rs b/vmbase/src/fdt.rs
new file mode 100644
index 0000000..537ca03
--- /dev/null
+++ b/vmbase/src/fdt.rs
@@ -0,0 +1,54 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! High-level FDT functions.
+
+use crate::cstr;
+use core::ops::Range;
+use libfdt::{self, Fdt, FdtError};
+
+/// Represents information about a SWIOTLB buffer.
+#[derive(Debug)]
+pub struct SwiotlbInfo {
+ /// The address of the SWIOTLB buffer, if available.
+ pub addr: Option<usize>,
+ /// The size of the SWIOTLB buffer.
+ pub size: usize,
+ /// The alignment of the SWIOTLB buffer, if available.
+ pub align: Option<usize>,
+}
+
+impl SwiotlbInfo {
+ /// Creates a `SwiotlbInfo` struct from the given device tree.
+ pub fn new_from_fdt(fdt: &Fdt) -> libfdt::Result<SwiotlbInfo> {
+ let node =
+ fdt.compatible_nodes(cstr!("restricted-dma-pool"))?.next().ok_or(FdtError::NotFound)?;
+
+ let (addr, size, align) = if let Some(mut reg) = node.reg()? {
+ let reg = reg.next().ok_or(FdtError::NotFound)?;
+ let size = reg.size.ok_or(FdtError::NotFound)?;
+ (Some(reg.addr.try_into().unwrap()), size.try_into().unwrap(), None)
+ } else {
+ let size = node.getprop_u64(cstr!("size"))?.ok_or(FdtError::NotFound)?;
+ let align = node.getprop_u64(cstr!("alignment"))?.ok_or(FdtError::NotFound)?;
+ (None, size.try_into().unwrap(), Some(align.try_into().unwrap()))
+ };
+ Ok(Self { addr, size, align })
+ }
+
+ /// Returns the fixed range of memory mapped by the SWIOTLB buffer, if available.
+ pub fn fixed_range(&self) -> Option<Range<usize>> {
+ self.addr.map(|addr| addr..addr + self.size)
+ }
+}
diff --git a/vmbase/src/lib.rs b/vmbase/src/lib.rs
index ebb3707..54f3384 100644
--- a/vmbase/src/lib.rs
+++ b/vmbase/src/lib.rs
@@ -22,6 +22,7 @@
mod bionic;
pub mod console;
mod entry;
+pub mod fdt;
pub mod layout;
mod linker;
pub mod logger;
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 5d60c85..61cbeb0 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -220,10 +220,9 @@
}
/// Initialize the shared heap to dynamically share memory from the global allocator.
- pub fn init_dynamic_shared_pool(&mut self) -> Result<()> {
+ pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
const INIT_CAP: usize = 10;
- let granule = get_hypervisor().memory_protection_granule()?;
let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
if previous.is_some() {
return Err(MemoryTrackerError::SharedMemorySetFailure);
diff --git a/vmbase/src/util.rs b/vmbase/src/util.rs
index 8c230a1..7fe6015 100644
--- a/vmbase/src/util.rs
+++ b/vmbase/src/util.rs
@@ -16,6 +16,14 @@
use core::ops::Range;
+/// Create &CStr out of &str literal
+#[macro_export]
+macro_rules! cstr {
+ ($str:literal) => {{
+ core::ffi::CStr::from_bytes_with_nul(concat!($str, "\0").as_bytes()).unwrap()
+ }};
+}
+
/// Flatten [[T; N]] into &[T]
/// TODO: use slice::flatten when it graduates from experimental
pub fn flatten<T, const N: usize>(original: &[[T; N]]) -> &[T] {
diff --git a/vmbase/src/virtio/hal.rs b/vmbase/src/virtio/hal.rs
index ac5b967..36f9e56 100644
--- a/vmbase/src/virtio/hal.rs
+++ b/vmbase/src/virtio/hal.rs
@@ -42,13 +42,14 @@
/// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
/// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
/// block of memory using `alloc_shared`, which is guaranteed to allocate valid and unique
- /// memory. We request an alignment of at least `PAGE_SIZE` from `alloc_shared`.
+ /// memory. We request an alignment of at least `PAGE_SIZE` from `alloc_shared`. We zero the
+ /// buffer before returning it.
fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
- let vaddr = alloc_shared(dma_layout(pages))
- .expect("Failed to allocate and share VirtIO DMA range with host");
- // TODO(ptosi): Move this zeroing to virtio_drivers, if it silently wants a zeroed region.
+ let layout = dma_layout(pages);
+ let vaddr =
+ alloc_shared(layout).expect("Failed to allocate and share VirtIO DMA range with host");
// SAFETY - vaddr points to a region allocated for the caller so is safe to access.
- unsafe { core::ptr::write_bytes(vaddr.as_ptr(), 0, dma_layout(pages).size()) };
+ unsafe { core::ptr::write_bytes(vaddr.as_ptr(), 0, layout.size()) };
let paddr = virt_to_phys(vaddr);
(paddr, vaddr)
}