Merge changes from topic "mount-vendor-in-microdroid" into main
* changes:
Propagate to Microdroid whether it should mount vendor partition
Add test api for setting vendor disk image
Add a way to add a vendor disk image when launching Microdroid VM
diff --git a/authfs/Android.bp b/authfs/Android.bp
index 2532026..154a1d6 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -23,7 +23,7 @@
"liblog_rust",
"libnix",
"libopenssl",
- "libprotobuf_deprecated",
+ "libprotobuf",
"librpcbinder_rs",
"libthiserror",
],
diff --git a/compos/Android.bp b/compos/Android.bp
index c120b0f..2f6be98 100644
--- a/compos/Android.bp
+++ b/compos/Android.bp
@@ -18,7 +18,7 @@
"libminijail_rust",
"libnix",
"libodsign_proto_rust",
- "libprotobuf_deprecated",
+ "libprotobuf",
"libregex",
"librpcbinder_rs",
"librustutils",
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index f66de32..b0294dd 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -22,7 +22,7 @@
"liblibc",
"liblog_rust",
"libodsign_proto_rust",
- "libprotobuf_deprecated",
+ "libprotobuf",
"librustutils",
"libshared_child",
"libvmclient",
diff --git a/compos/src/artifact_signer.rs b/compos/src/artifact_signer.rs
index d3843fc..908e438 100644
--- a/compos/src/artifact_signer.rs
+++ b/compos/src/artifact_signer.rs
@@ -63,7 +63,7 @@
/// with accompanying sigature file.
pub fn write_info_and_signature(self, info_path: &Path) -> Result<()> {
let mut info = OdsignInfo::new();
- info.mut_file_hashes().extend(self.file_digests.into_iter());
+ info.file_hashes.extend(self.file_digests.into_iter());
let bytes = info.write_to_bytes()?;
let signature = compos_key::sign(&bytes)?;
diff --git a/libs/hyp/Android.bp b/libs/hyp/Android.bp
index 1bb8722..8baf9dd 100644
--- a/libs/hyp/Android.bp
+++ b/libs/hyp/Android.bp
@@ -8,7 +8,6 @@
srcs: ["src/lib.rs"],
prefer_rlib: true,
rustlibs: [
- "libbitflags",
"libonce_cell_nostd",
"libsmccc",
"libuuid_nostd",
diff --git a/libs/hyp/src/hypervisor/common.rs b/libs/hyp/src/hypervisor/common.rs
index ec7d168..7c030a1 100644
--- a/libs/hyp/src/hypervisor/common.rs
+++ b/libs/hyp/src/hypervisor/common.rs
@@ -16,47 +16,49 @@
use crate::error::Result;
use crate::util::SIZE_4KB;
-use bitflags::bitflags;
/// Expected MMIO guard granule size, validated during MMIO guard initialization.
pub const MMIO_GUARD_GRANULE_SIZE: usize = SIZE_4KB;
-bitflags! {
- /// Capabilities that Hypervisor backends can declare support for.
- pub struct HypervisorCap: u32 {
- /// Capability for guest to share its memory with host at runtime.
- const DYNAMIC_MEM_SHARE = 0b1;
+/// Trait for the hypervisor.
+pub trait Hypervisor {
+ /// Returns the hypervisor's MMIO_GUARD implementation, if any.
+ fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+ None
+ }
+
+ /// Returns the hypervisor's dynamic memory sharing implementation, if any.
+ fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+ None
}
}
-/// Trait for the hypervisor.
-pub trait Hypervisor {
+pub trait MmioGuardedHypervisor {
/// Initializes the hypervisor by enrolling a MMIO guard and checking the memory granule size.
/// By enrolling, all MMIO will be blocked unless allow-listed with `mmio_guard_map`.
/// Protected VMs are auto-enrolled.
- fn mmio_guard_init(&self) -> Result<()>;
+ fn init(&self) -> Result<()>;
/// Maps a page containing the given memory address to the hypervisor MMIO guard.
/// The page size corresponds to the MMIO guard granule size.
- fn mmio_guard_map(&self, addr: usize) -> Result<()>;
+ fn map(&self, addr: usize) -> Result<()>;
/// Unmaps a page containing the given memory address from the hypervisor MMIO guard.
/// The page size corresponds to the MMIO guard granule size.
- fn mmio_guard_unmap(&self, addr: usize) -> Result<()>;
+ fn unmap(&self, addr: usize) -> Result<()>;
+}
+pub trait MemSharingHypervisor {
/// Shares a region of memory with host, granting it read, write and execute permissions.
/// The size of the region is equal to the memory protection granule returned by
/// [`hyp_meminfo`].
- fn mem_share(&self, base_ipa: u64) -> Result<()>;
+ fn share(&self, base_ipa: u64) -> Result<()>;
/// Revokes access permission from host to a memory region previously shared with
/// [`mem_share`]. The size of the region is equal to the memory protection granule returned by
/// [`hyp_meminfo`].
- fn mem_unshare(&self, base_ipa: u64) -> Result<()>;
+ fn unshare(&self, base_ipa: u64) -> Result<()>;
/// Returns the memory protection granule size in bytes.
- fn memory_protection_granule(&self) -> Result<usize>;
-
- /// Check if required capabilities are supported.
- fn has_cap(&self, cap: HypervisorCap) -> bool;
+ fn granule(&self) -> Result<usize>;
}
diff --git a/libs/hyp/src/hypervisor/geniezone.rs b/libs/hyp/src/hypervisor/geniezone.rs
index 0741978..24eb89e 100644
--- a/libs/hyp/src/hypervisor/geniezone.rs
+++ b/libs/hyp/src/hypervisor/geniezone.rs
@@ -14,7 +14,9 @@
//! Wrappers around calls to the GenieZone hypervisor.
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
+use super::common::{
+ Hypervisor, MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE,
+};
use crate::error::{Error, Result};
use crate::util::page_address;
use core::fmt::{self, Display, Formatter};
@@ -40,7 +42,6 @@
// and share the same identification along with guest VMs.
// The previous uuid was removed due to duplication elsewhere.
pub const UUID: Uuid = uuid!("7e134ed0-3b82-488d-8cee-69c19211dbe7");
- const CAPABILITIES: HypervisorCap = HypervisorCap::DYNAMIC_MEM_SHARE;
}
/// Error from a GenieZone HVC call.
@@ -85,7 +86,17 @@
}
impl Hypervisor for GeniezoneHypervisor {
- fn mmio_guard_init(&self) -> Result<()> {
+ fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+ Some(self)
+ }
+
+ fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+ Some(self)
+ }
+}
+
+impl MmioGuardedHypervisor for GeniezoneHypervisor {
+ fn init(&self) -> Result<()> {
mmio_guard_enroll()?;
let mmio_granule = mmio_guard_granule()?;
if mmio_granule != MMIO_GUARD_GRANULE_SIZE {
@@ -94,43 +105,41 @@
Ok(())
}
- fn mmio_guard_map(&self, addr: usize) -> Result<()> {
+ fn map(&self, addr: usize) -> Result<()> {
let mut args = [0u64; 17];
args[0] = page_address(addr);
checked_hvc64_expect_zero(VENDOR_HYP_GZVM_MMIO_GUARD_MAP_FUNC_ID, args)
}
- fn mmio_guard_unmap(&self, addr: usize) -> Result<()> {
+ fn unmap(&self, addr: usize) -> Result<()> {
let mut args = [0u64; 17];
args[0] = page_address(addr);
checked_hvc64_expect_zero(VENDOR_HYP_GZVM_MMIO_GUARD_UNMAP_FUNC_ID, args)
}
+}
- fn mem_share(&self, base_ipa: u64) -> Result<()> {
+impl MemSharingHypervisor for GeniezoneHypervisor {
+ fn share(&self, base_ipa: u64) -> Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_GZVM_FUNC_MEM_SHARE, args)
}
- fn mem_unshare(&self, base_ipa: u64) -> Result<()> {
+ fn unshare(&self, base_ipa: u64) -> Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_GZVM_FUNC_MEM_UNSHARE, args)
}
- fn memory_protection_granule(&self) -> Result<usize> {
+ fn granule(&self) -> Result<usize> {
let args = [0u64; 17];
let granule = checked_hvc64(ARM_SMCCC_GZVM_FUNC_HYP_MEMINFO, args)?;
Ok(granule.try_into().unwrap())
}
-
- fn has_cap(&self, cap: HypervisorCap) -> bool {
- Self::CAPABILITIES.contains(cap)
- }
}
fn mmio_guard_granule() -> Result<usize> {
diff --git a/libs/hyp/src/hypervisor/gunyah.rs b/libs/hyp/src/hypervisor/gunyah.rs
index 252430f..45c01bf 100644
--- a/libs/hyp/src/hypervisor/gunyah.rs
+++ b/libs/hyp/src/hypervisor/gunyah.rs
@@ -1,5 +1,4 @@
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
-use crate::error::Result;
+use super::common::Hypervisor;
use uuid::{uuid, Uuid};
pub(super) struct GunyahHypervisor;
@@ -8,32 +7,4 @@
pub const UUID: Uuid = uuid!("c1d58fcd-a453-5fdb-9265-ce36673d5f14");
}
-impl Hypervisor for GunyahHypervisor {
- fn mmio_guard_init(&self) -> Result<()> {
- Ok(())
- }
-
- fn mmio_guard_map(&self, _addr: usize) -> Result<()> {
- Ok(())
- }
-
- fn mmio_guard_unmap(&self, _addr: usize) -> Result<()> {
- Ok(())
- }
-
- fn mem_share(&self, _base_ipa: u64) -> Result<()> {
- unimplemented!();
- }
-
- fn mem_unshare(&self, _base_ipa: u64) -> Result<()> {
- unimplemented!();
- }
-
- fn memory_protection_granule(&self) -> Result<usize> {
- Ok(MMIO_GUARD_GRANULE_SIZE)
- }
-
- fn has_cap(&self, _cap: HypervisorCap) -> bool {
- false
- }
-}
+impl Hypervisor for GunyahHypervisor {}
diff --git a/libs/hyp/src/hypervisor/kvm.rs b/libs/hyp/src/hypervisor/kvm.rs
index a89f9b8..a95b8de 100644
--- a/libs/hyp/src/hypervisor/kvm.rs
+++ b/libs/hyp/src/hypervisor/kvm.rs
@@ -14,7 +14,9 @@
//! Wrappers around calls to the KVM hypervisor.
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
+use super::common::{
+ Hypervisor, MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE,
+};
use crate::error::{Error, Result};
use crate::util::page_address;
use core::fmt::{self, Display, Formatter};
@@ -70,17 +72,30 @@
const VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID: u32 = 0xc6000007;
const VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID: u32 = 0xc6000008;
-pub(super) struct KvmHypervisor;
+pub(super) struct RegularKvmHypervisor;
-impl KvmHypervisor {
+impl RegularKvmHypervisor {
// Based on ARM_SMCCC_VENDOR_HYP_UID_KVM_REG values listed in Linux kernel source:
// https://github.com/torvalds/linux/blob/master/include/linux/arm-smccc.h
pub(super) const UUID: Uuid = uuid!("28b46fb6-2ec5-11e9-a9ca-4b564d003a74");
- const CAPABILITIES: HypervisorCap = HypervisorCap::DYNAMIC_MEM_SHARE;
}
-impl Hypervisor for KvmHypervisor {
- fn mmio_guard_init(&self) -> Result<()> {
+impl Hypervisor for RegularKvmHypervisor {}
+
+pub(super) struct ProtectedKvmHypervisor;
+
+impl Hypervisor for ProtectedKvmHypervisor {
+ fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+ Some(self)
+ }
+
+ fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+ Some(self)
+ }
+}
+
+impl MmioGuardedHypervisor for ProtectedKvmHypervisor {
+ fn init(&self) -> Result<()> {
mmio_guard_enroll()?;
let mmio_granule = mmio_guard_granule()?;
if mmio_granule != MMIO_GUARD_GRANULE_SIZE {
@@ -89,7 +104,7 @@
Ok(())
}
- fn mmio_guard_map(&self, addr: usize) -> Result<()> {
+ fn map(&self, addr: usize) -> Result<()> {
let mut args = [0u64; 17];
args[0] = page_address(addr);
@@ -99,7 +114,7 @@
.map_err(|e| Error::KvmError(e, VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID))
}
- fn mmio_guard_unmap(&self, addr: usize) -> Result<()> {
+ fn unmap(&self, addr: usize) -> Result<()> {
let mut args = [0u64; 17];
args[0] = page_address(addr);
@@ -110,30 +125,28 @@
Err(e) => Err(Error::KvmError(e, VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID)),
}
}
+}
- fn mem_share(&self, base_ipa: u64) -> Result<()> {
+impl MemSharingHypervisor for ProtectedKvmHypervisor {
+ fn share(&self, base_ipa: u64) -> Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_KVM_FUNC_MEM_SHARE, args)
}
- fn mem_unshare(&self, base_ipa: u64) -> Result<()> {
+ fn unshare(&self, base_ipa: u64) -> Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_KVM_FUNC_MEM_UNSHARE, args)
}
- fn memory_protection_granule(&self) -> Result<usize> {
+ fn granule(&self) -> Result<usize> {
let args = [0u64; 17];
let granule = checked_hvc64(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO, args)?;
Ok(granule.try_into().unwrap())
}
-
- fn has_cap(&self, cap: HypervisorCap) -> bool {
- Self::CAPABILITIES.contains(cap)
- }
}
fn mmio_guard_granule() -> Result<usize> {
diff --git a/libs/hyp/src/hypervisor/mod.rs b/libs/hyp/src/hypervisor/mod.rs
index 93d53fe..bc9e406 100644
--- a/libs/hyp/src/hypervisor/mod.rs
+++ b/libs/hyp/src/hypervisor/mod.rs
@@ -23,30 +23,31 @@
use crate::error::{Error, Result};
use alloc::boxed::Box;
-pub use common::Hypervisor;
-pub use common::HypervisorCap;
-pub use common::MMIO_GUARD_GRANULE_SIZE;
+use common::Hypervisor;
+pub use common::{MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE};
pub use geniezone::GeniezoneError;
use geniezone::GeniezoneHypervisor;
use gunyah::GunyahHypervisor;
pub use kvm::KvmError;
-use kvm::KvmHypervisor;
+use kvm::{ProtectedKvmHypervisor, RegularKvmHypervisor};
use once_cell::race::OnceBox;
use smccc::hvc64;
use uuid::Uuid;
enum HypervisorBackend {
- Kvm,
+ RegularKvm,
Gunyah,
Geniezone,
+ ProtectedKvm,
}
impl HypervisorBackend {
fn get_hypervisor(&self) -> &'static dyn Hypervisor {
match self {
- Self::Kvm => &KvmHypervisor,
+ Self::RegularKvm => &RegularKvmHypervisor,
Self::Gunyah => &GunyahHypervisor,
Self::Geniezone => &GeniezoneHypervisor,
+ Self::ProtectedKvm => &ProtectedKvmHypervisor,
}
}
}
@@ -58,7 +59,16 @@
match uuid {
GeniezoneHypervisor::UUID => Ok(HypervisorBackend::Geniezone),
GunyahHypervisor::UUID => Ok(HypervisorBackend::Gunyah),
- KvmHypervisor::UUID => Ok(HypervisorBackend::Kvm),
+ RegularKvmHypervisor::UUID => {
+ // Protected KVM has the same UUID so differentiate based on MEM_SHARE.
+ match ProtectedKvmHypervisor.as_mem_sharer().unwrap().granule() {
+ Ok(_) => Ok(HypervisorBackend::ProtectedKvm),
+ Err(Error::KvmError(KvmError::NotSupported, _)) => {
+ Ok(HypervisorBackend::RegularKvm)
+ }
+ Err(e) => Err(e),
+ }
+ }
u => Err(Error::UnsupportedHypervisorUuid(u)),
}
}
@@ -95,8 +105,18 @@
}
/// Gets the hypervisor singleton.
-pub fn get_hypervisor() -> &'static dyn Hypervisor {
+fn get_hypervisor() -> &'static dyn Hypervisor {
static HYPERVISOR: OnceBox<HypervisorBackend> = OnceBox::new();
HYPERVISOR.get_or_init(|| Box::new(detect_hypervisor())).get_hypervisor()
}
+
+/// Gets the MMIO_GUARD hypervisor singleton, if any.
+pub fn get_mmio_guard() -> Option<&'static dyn MmioGuardedHypervisor> {
+ get_hypervisor().as_mmio_guard()
+}
+
+/// Gets the dynamic memory sharing hypervisor singleton, if any.
+pub fn get_mem_sharer() -> Option<&'static dyn MemSharingHypervisor> {
+ get_hypervisor().as_mem_sharer()
+}
diff --git a/libs/hyp/src/lib.rs b/libs/hyp/src/lib.rs
index 32a59d1..486a181 100644
--- a/libs/hyp/src/lib.rs
+++ b/libs/hyp/src/lib.rs
@@ -21,6 +21,6 @@
mod util;
pub use error::{Error, Result};
-pub use hypervisor::{get_hypervisor, Hypervisor, HypervisorCap, KvmError, MMIO_GUARD_GRANULE_SIZE};
+pub use hypervisor::{get_mem_sharer, get_mmio_guard, KvmError, MMIO_GUARD_GRANULE_SIZE};
use hypervisor::GeniezoneError;
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index 8e0bb65..afc36d0 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -16,6 +16,8 @@
//! to a bare-metal environment.
#![no_std]
+#![deny(unsafe_op_in_unsafe_fn)]
+#![deny(clippy::undocumented_unsafe_blocks)]
mod iterators;
@@ -205,7 +207,7 @@
}
/// Find parent node.
pub fn parent(&self) -> Result<Self> {
- // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe { libfdt_bindgen::fdt_parent_offset(self.fdt.as_ptr(), self.offset) };
Ok(Self { fdt: self.fdt, offset: fdt_err(ret)? })
@@ -311,7 +313,7 @@
name: &CStr,
) -> Result<Option<(*const c_void, usize)>> {
let mut len: i32 = 0;
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) and the
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor) and the
// function respects the passed number of characters.
let prop = unsafe {
libfdt_bindgen::fdt_getprop_namelen(
@@ -342,7 +344,7 @@
}
fn next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
- // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe {
libfdt_bindgen::fdt_node_offset_by_compatible(
self.fdt.as_ptr(),
@@ -355,14 +357,14 @@
}
fn address_cells(&self) -> Result<AddrCells> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor).
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
unsafe { libfdt_bindgen::fdt_address_cells(self.fdt.as_ptr(), self.offset) }
.try_into()
.map_err(|_| FdtError::Internal)
}
fn size_cells(&self) -> Result<SizeCells> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor).
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
unsafe { libfdt_bindgen::fdt_size_cells(self.fdt.as_ptr(), self.offset) }
.try_into()
.map_err(|_| FdtError::Internal)
@@ -378,7 +380,7 @@
impl<'a> FdtNodeMut<'a> {
/// Append a property name-value (possibly empty) pair to the given node.
pub fn appendprop<T: AsRef<[u8]>>(&mut self, name: &CStr, value: &T) -> Result<()> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor).
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
let ret = unsafe {
libfdt_bindgen::fdt_appendprop(
self.fdt.as_mut_ptr(),
@@ -394,7 +396,7 @@
/// Append a (address, size) pair property to the given node.
pub fn appendprop_addrrange(&mut self, name: &CStr, addr: u64, size: u64) -> Result<()> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor).
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
let ret = unsafe {
libfdt_bindgen::fdt_appendprop_addrrange(
self.fdt.as_mut_ptr(),
@@ -411,7 +413,7 @@
/// Create or change a property name-value pair to the given node.
pub fn setprop(&mut self, name: &CStr, value: &[u8]) -> Result<()> {
- // SAFETY - New value size is constrained to the DT totalsize
+ // SAFETY: New value size is constrained to the DT totalsize
// (validated by underlying libfdt).
let ret = unsafe {
libfdt_bindgen::fdt_setprop(
@@ -429,7 +431,7 @@
/// Replace the value of the given property with the given value, and ensure that the given
/// value has the same length as the current value length
pub fn setprop_inplace(&mut self, name: &CStr, value: &[u8]) -> Result<()> {
- // SAFETY - fdt size is not altered
+ // SAFETY: fdt size is not altered
let ret = unsafe {
libfdt_bindgen::fdt_setprop_inplace(
self.fdt.as_mut_ptr(),
@@ -457,7 +459,7 @@
/// Delete the given property.
pub fn delprop(&mut self, name: &CStr) -> Result<()> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) when the
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor) when the
// library locates the node's property. Removing the property may shift the offsets of
// other nodes and properties but the borrow checker should prevent this function from
// being called when FdtNode instances are in use.
@@ -470,7 +472,7 @@
/// Overwrite the given property with FDT_NOP, effectively removing it from the DT.
pub fn nop_property(&mut self, name: &CStr) -> Result<()> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) when the
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor) when the
// library locates the node's property.
let ret = unsafe {
libfdt_bindgen::fdt_nop_property(self.fdt.as_mut_ptr(), self.offset, name.as_ptr())
@@ -490,7 +492,7 @@
return Err(FdtError::NoSpace);
}
- // SAFETY - new_size is smaller than the old size
+ // SAFETY: new_size is smaller than the old size
let ret = unsafe {
libfdt_bindgen::fdt_setprop(
self.fdt.as_mut_ptr(),
@@ -511,7 +513,7 @@
/// Add a new subnode to the given node and return it as a FdtNodeMut on success.
pub fn add_subnode(&'a mut self, name: &CStr) -> Result<Self> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor).
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
let ret = unsafe {
libfdt_bindgen::fdt_add_subnode(self.fdt.as_mut_ptr(), self.offset, name.as_ptr())
};
@@ -520,7 +522,7 @@
}
fn parent(&'a self) -> Result<FdtNode<'a>> {
- // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe { libfdt_bindgen::fdt_parent_offset(self.fdt.as_ptr(), self.offset) };
Ok(FdtNode { fdt: &*self.fdt, offset: fdt_err(ret)? })
@@ -528,7 +530,7 @@
/// Return the compatible node of the given name that is next to this node
pub fn next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
- // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe {
libfdt_bindgen::fdt_node_offset_by_compatible(
self.fdt.as_ptr(),
@@ -553,7 +555,7 @@
// mutable reference to DT, so we can't use current node (which also has a mutable reference to
// DT).
pub fn delete_and_next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
- // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe {
libfdt_bindgen::fdt_node_offset_by_compatible(
self.fdt.as_ptr(),
@@ -563,7 +565,7 @@
};
let next_offset = fdt_err_or_option(ret)?;
- // SAFETY - fdt_nop_node alter only the bytes in the blob which contain the node and its
+ // SAFETY: fdt_nop_node alter only the bytes in the blob which contain the node and its
// properties and subnodes, and will not alter or move any other part of the tree.
let ret = unsafe { libfdt_bindgen::fdt_nop_node(self.fdt.as_mut_ptr(), self.offset) };
fdt_err_expect_zero(ret)?;
@@ -611,7 +613,7 @@
///
/// Fails if the FDT does not pass validation.
pub fn from_slice(fdt: &[u8]) -> Result<&Self> {
- // SAFETY - The FDT will be validated before it is returned.
+ // SAFETY: The FDT will be validated before it is returned.
let fdt = unsafe { Self::unchecked_from_slice(fdt) };
fdt.check_full()?;
Ok(fdt)
@@ -621,7 +623,7 @@
///
/// Fails if the FDT does not pass validation.
pub fn from_mut_slice(fdt: &mut [u8]) -> Result<&mut Self> {
- // SAFETY - The FDT will be validated before it is returned.
+ // SAFETY: The FDT will be validated before it is returned.
let fdt = unsafe { Self::unchecked_from_mut_slice(fdt) };
fdt.check_full()?;
Ok(fdt)
@@ -629,7 +631,7 @@
/// Creates an empty Flattened Device Tree with a mutable slice.
pub fn create_empty_tree(fdt: &mut [u8]) -> Result<&mut Self> {
- // SAFETY - fdt_create_empty_tree() only write within the specified length,
+ // SAFETY: fdt_create_empty_tree() only write within the specified length,
// and returns error if buffer was insufficient.
// There will be no memory write outside of the given fdt.
let ret = unsafe {
@@ -640,7 +642,7 @@
};
fdt_err_expect_zero(ret)?;
- // SAFETY - The FDT will be validated before it is returned.
+ // SAFETY: The FDT will be validated before it is returned.
let fdt = unsafe { Self::unchecked_from_mut_slice(fdt) };
fdt.check_full()?;
@@ -653,7 +655,9 @@
///
/// The returned FDT might be invalid, only use on slices containing a valid DT.
pub unsafe fn unchecked_from_slice(fdt: &[u8]) -> &Self {
- mem::transmute::<&[u8], &Self>(fdt)
+ // SAFETY: Fdt is a wrapper around a [u8], so the transmute is valid. The caller is
+ // responsible for ensuring that it is actually a valid FDT.
+ unsafe { mem::transmute::<&[u8], &Self>(fdt) }
}
/// Wraps a mutable slice containing a Flattened Device Tree.
@@ -662,7 +666,9 @@
///
/// The returned FDT might be invalid, only use on slices containing a valid DT.
pub unsafe fn unchecked_from_mut_slice(fdt: &mut [u8]) -> &mut Self {
- mem::transmute::<&mut [u8], &mut Self>(fdt)
+ // SAFETY: Fdt is a wrapper around a [u8], so the transmute is valid. The caller is
+ // responsible for ensuring that it is actually a valid FDT.
+ unsafe { mem::transmute::<&mut [u8], &mut Self>(fdt) }
}
/// Update this FDT from a slice containing another FDT
@@ -682,7 +688,7 @@
/// Make the whole slice containing the DT available to libfdt.
pub fn unpack(&mut self) -> Result<()> {
- // SAFETY - "Opens" the DT in-place (supported use-case) by updating its header and
+ // SAFETY: "Opens" the DT in-place (supported use-case) by updating its header and
// internal structures to make use of the whole self.fdt slice but performs no accesses
// outside of it and leaves the DT in a state that will be detected by other functions.
let ret = unsafe {
@@ -699,7 +705,7 @@
///
/// Doesn't shrink the underlying memory slice.
pub fn pack(&mut self) -> Result<()> {
- // SAFETY - "Closes" the DT in-place by updating its header and relocating its structs.
+ // SAFETY: "Closes" the DT in-place by updating its header and relocating its structs.
let ret = unsafe { libfdt_bindgen::fdt_pack(self.as_mut_ptr()) };
fdt_err_expect_zero(ret)
}
@@ -710,10 +716,12 @@
///
/// On failure, the library corrupts the DT and overlay so both must be discarded.
pub unsafe fn apply_overlay<'a>(&'a mut self, overlay: &'a mut Fdt) -> Result<&'a mut Self> {
- fdt_err_expect_zero(libfdt_bindgen::fdt_overlay_apply(
- self.as_mut_ptr(),
- overlay.as_mut_ptr(),
- ))?;
+ let ret =
+ // SAFETY: Both pointers are valid because they come from references, and fdt_overlay_apply
+ // doesn't keep them after it returns. It may corrupt their contents if there is an error,
+ // but that's our caller's responsibility.
+ unsafe { libfdt_bindgen::fdt_overlay_apply(self.as_mut_ptr(), overlay.as_mut_ptr()) };
+ fdt_err_expect_zero(ret)?;
Ok(self)
}
@@ -779,7 +787,7 @@
fn path_offset(&self, path: &CStr) -> Result<Option<c_int>> {
let len = path.to_bytes().len().try_into().map_err(|_| FdtError::BadPath)?;
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) and the
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor) and the
// function respects the passed number of characters.
let ret = unsafe {
// *_namelen functions don't include the trailing nul terminator in 'len'.
@@ -791,7 +799,7 @@
fn check_full(&self) -> Result<()> {
let len = self.buffer.len();
- // SAFETY - Only performs read accesses within the limits of the slice. If successful, this
+ // SAFETY: Only performs read accesses within the limits of the slice. If successful, this
// call guarantees to other unsafe calls that the header contains a valid totalsize (w.r.t.
// 'len' i.e. the self.fdt slice) that those C functions can use to perform bounds
// checking. The library doesn't maintain an internal state (such as pointers) between
@@ -815,7 +823,7 @@
fn header(&self) -> &libfdt_bindgen::fdt_header {
let p = self.as_ptr().cast::<_>();
- // SAFETY - A valid FDT (verified by constructor) must contain a valid fdt_header.
+ // SAFETY: A valid FDT (verified by constructor) must contain a valid fdt_header.
unsafe { &*p }
}
diff --git a/microdroid/payload/Android.bp b/microdroid/payload/Android.bp
index 4814a64..8225875 100644
--- a/microdroid/payload/Android.bp
+++ b/microdroid/payload/Android.bp
@@ -31,6 +31,7 @@
protos: ["metadata.proto"],
source_stem: "microdroid_metadata",
host_supported: true,
+ use_protobuf3: true,
apex_available: [
"com.android.virt",
],
diff --git a/microdroid/payload/metadata/Android.bp b/microdroid/payload/metadata/Android.bp
index e3138e8..cd182fc 100644
--- a/microdroid/payload/metadata/Android.bp
+++ b/microdroid/payload/metadata/Android.bp
@@ -12,7 +12,7 @@
rustlibs: [
"libanyhow",
"libmicrodroid_metadata_proto_rust",
- "libprotobuf_deprecated",
+ "libprotobuf",
],
apex_available: [
"com.android.virt",
diff --git a/microdroid/payload/metadata/src/lib.rs b/microdroid/payload/metadata/src/lib.rs
index bfbec60..f00391a 100644
--- a/microdroid/payload/metadata/src/lib.rs
+++ b/microdroid/payload/metadata/src/lib.rs
@@ -24,7 +24,7 @@
use std::io::Write;
pub use microdroid_metadata::metadata::{
- ApexPayload, ApkPayload, Metadata, Metadata_oneof_payload as PayloadMetadata, PayloadConfig,
+ metadata::Payload as PayloadMetadata, ApexPayload, ApkPayload, Metadata, PayloadConfig,
};
/// Reads a metadata from a reader
diff --git a/microdroid_manager/src/dice.rs b/microdroid_manager/src/dice.rs
index 3a2a1e6..bacefcd 100644
--- a/microdroid_manager/src/dice.rs
+++ b/microdroid_manager/src/dice.rs
@@ -170,21 +170,23 @@
/// PayloadConfig = {
/// 1: tstr // payload_binary_name
/// }
-pub fn format_payload_config_descriptor(payload_metadata: &PayloadMetadata) -> Result<Vec<u8>> {
+pub fn format_payload_config_descriptor(payload: &PayloadMetadata) -> Result<Vec<u8>> {
const MICRODROID_PAYLOAD_COMPONENT_NAME: &str = "Microdroid payload";
- let config_descriptor_cbor_value = match payload_metadata {
- PayloadMetadata::config_path(payload_config_path) => cbor!({
+ let config_descriptor_cbor_value = match payload {
+ PayloadMetadata::ConfigPath(payload_config_path) => cbor!({
-70002 => MICRODROID_PAYLOAD_COMPONENT_NAME,
-71000 => payload_config_path
}),
- PayloadMetadata::config(payload_config) => cbor!({
+ PayloadMetadata::Config(payload_config) => cbor!({
-70002 => MICRODROID_PAYLOAD_COMPONENT_NAME,
-71001 => {1 => payload_config.payload_binary_name}
}),
+ _ => bail!("Failed to match the payload against a config type: {:?}", payload),
}
.context("Failed to build a CBOR Value from payload metadata")?;
let mut config_descriptor = Vec::new();
+
ser::into_writer(&config_descriptor_cbor_value, &mut config_descriptor)?;
Ok(config_descriptor)
}
@@ -196,7 +198,7 @@
#[test]
fn payload_metadata_with_path_formats_correctly() -> Result<()> {
- let payload_metadata = PayloadMetadata::config_path("/config_path".to_string());
+ let payload_metadata = PayloadMetadata::ConfigPath("/config_path".to_string());
let config_descriptor = format_payload_config_descriptor(&payload_metadata)?;
static EXPECTED_CONFIG_DESCRIPTOR: &[u8] = &[
0xa2, 0x3a, 0x00, 0x01, 0x11, 0x71, 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72,
@@ -214,7 +216,7 @@
payload_binary_name: "payload_binary".to_string(),
..Default::default()
};
- let payload_metadata = PayloadMetadata::config(payload_config);
+ let payload_metadata = PayloadMetadata::Config(payload_config);
let config_descriptor = format_payload_config_descriptor(&payload_metadata)?;
static EXPECTED_CONFIG_DESCRIPTOR: &[u8] = &[
0xa2, 0x3a, 0x00, 0x01, 0x11, 0x71, 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72,
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index 9c19feb..1cdcde1 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -228,7 +228,7 @@
load_crashkernel_if_supported().context("Failed to load crashkernel")?;
- swap::init_swap().context("Failed to initialise swap")?;
+ swap::init_swap().context("Failed to initialize swap")?;
info!("swap enabled.");
let service = get_vms_rpc_binder()
@@ -435,8 +435,9 @@
// Restricted APIs are only allowed to be used by platform or test components. Infer this from
// the use of a VM config file since those can only be used by platform and test components.
let allow_restricted_apis = match payload_metadata {
- PayloadMetadata::config_path(_) => true,
- PayloadMetadata::config(_) => false,
+ PayloadMetadata::ConfigPath(_) => true,
+ PayloadMetadata::Config(_) => false,
+ _ => false, // default is false for safety
};
let config = load_config(payload_metadata).context("Failed to load payload metadata")?;
@@ -792,14 +793,14 @@
fn load_config(payload_metadata: PayloadMetadata) -> Result<VmPayloadConfig> {
match payload_metadata {
- PayloadMetadata::config_path(path) => {
+ PayloadMetadata::ConfigPath(path) => {
let path = Path::new(&path);
info!("loading config from {:?}...", path);
let file = ioutil::wait_for_file(path, WAIT_TIMEOUT)
.with_context(|| format!("Failed to read {:?}", path))?;
Ok(serde_json::from_reader(file)?)
}
- PayloadMetadata::config(payload_config) => {
+ PayloadMetadata::Config(payload_config) => {
let task = Task {
type_: TaskType::MicrodroidLauncher,
command: payload_config.payload_binary_name,
@@ -814,6 +815,7 @@
enable_authfs: false,
})
}
+ _ => bail!("Failed to match config against a config type."),
}
}
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 6f96fc0..2d1c418 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -23,7 +23,7 @@
use core::num::NonZeroUsize;
use core::ops::Range;
use core::slice;
-use hyp::{get_hypervisor, HypervisorCap};
+use hyp::{get_mem_sharer, get_mmio_guard};
use log::debug;
use log::error;
use log::info;
@@ -33,10 +33,9 @@
use vmbase::{
configure_heap, console,
layout::{self, crosvm},
- logger, main,
+ main,
memory::{min_dcache_line_size, MemoryTracker, MEMORY, SIZE_128KB, SIZE_4KB},
power::reboot,
- rand,
};
use zeroize::Zeroize;
@@ -112,8 +111,8 @@
RebootReason::InvalidFdt
})?;
- if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
- let granule = get_hypervisor().memory_protection_granule().map_err(|e| {
+ if let Some(mem_sharer) = get_mem_sharer() {
+ let granule = mem_sharer.granule().map_err(|e| {
error!("Failed to get memory protection granule: {e}");
RebootReason::InternalError
})?;
@@ -192,21 +191,7 @@
// - only perform logging once the logger has been initialized
// - only access non-pvmfw memory once (and while) it has been mapped
- logger::init(LevelFilter::Info).map_err(|_| RebootReason::InternalError)?;
-
- // Use debug!() to avoid printing to the UART if we failed to configure it as only local
- // builds that have tweaked the logger::init() call will actually attempt to log the message.
-
- get_hypervisor().mmio_guard_init().map_err(|e| {
- debug!("{e}");
- RebootReason::InternalError
- })?;
-
- get_hypervisor().mmio_guard_map(console::BASE_ADDRESS).map_err(|e| {
- debug!("Failed to configure the UART: {e}");
- RebootReason::InternalError
- })?;
-
+ log::set_max_level(LevelFilter::Info);
crypto::init();
let page_table = memory::init_page_table().map_err(|e| {
@@ -235,11 +220,6 @@
let slices = MemorySlices::new(fdt, payload, payload_size)?;
- rand::init().map_err(|e| {
- error!("Failed to initialize rand: {e}");
- RebootReason::InternalError
- })?;
-
// This wrapper allows main() to be blissfully ignorant of platform details.
let next_bcc = crate::main(slices.fdt, slices.kernel, slices.ramdisk, bcc_slice, debug_policy)?;
@@ -253,10 +233,12 @@
})?;
// Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
MEMORY.lock().as_mut().unwrap().unshare_all_memory();
- get_hypervisor().mmio_guard_unmap(console::BASE_ADDRESS).map_err(|e| {
- error!("Failed to unshare the UART: {e}");
- RebootReason::InternalError
- })?;
+ if let Some(mmio_guard) = get_mmio_guard() {
+ mmio_guard.unmap(console::BASE_ADDRESS).map_err(|e| {
+ error!("Failed to unshare the UART: {e}");
+ RebootReason::InternalError
+ })?;
+ }
// Drop MemoryTracker and deactivate page table.
drop(MEMORY.lock().take());
@@ -278,19 +260,19 @@
let scratch = layout::scratch_range();
- assert_ne!(scratch.len(), 0, "scratch memory is empty.");
- assert_eq!(scratch.start % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
- assert_eq!(scratch.end % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
+ assert_ne!(scratch.end - scratch.start, 0, "scratch memory is empty.");
+ assert_eq!(scratch.start.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
+ assert_eq!(scratch.end.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
- assert!(bcc.is_within(&scratch));
+ assert!(bcc.is_within(&(scratch.start.0..scratch.end.0)));
assert_eq!(bcc.start % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
assert_eq!(bcc.end % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
let stack = memory::stack_range();
- assert_ne!(stack.len(), 0, "stack region is empty.");
- assert_eq!(stack.start % ASM_STP_ALIGN, 0, "Misaligned stack region.");
- assert_eq!(stack.end % ASM_STP_ALIGN, 0, "Misaligned stack region.");
+ assert_ne!(stack.end - stack.start, 0, "stack region is empty.");
+ assert_eq!(stack.start.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
+ assert_eq!(stack.end.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
// Zero all memory that could hold secrets and that can't be safely written to from Rust.
// Disable the exception vector, caches and page table and then jump to the payload at the
@@ -375,11 +357,11 @@
sctlr_el1_val = in(reg) SCTLR_EL1_VAL,
bcc = in(reg) u64::try_from(bcc.start).unwrap(),
bcc_end = in(reg) u64::try_from(bcc.end).unwrap(),
- cache_line = in(reg) u64::try_from(scratch.start).unwrap(),
- scratch = in(reg) u64::try_from(scratch.start).unwrap(),
- scratch_end = in(reg) u64::try_from(scratch.end).unwrap(),
- stack = in(reg) u64::try_from(stack.start).unwrap(),
- stack_end = in(reg) u64::try_from(stack.end).unwrap(),
+ cache_line = in(reg) u64::try_from(scratch.start.0).unwrap(),
+ scratch = in(reg) u64::try_from(scratch.start.0).unwrap(),
+ scratch_end = in(reg) u64::try_from(scratch.end.0).unwrap(),
+ stack = in(reg) u64::try_from(stack.start.0).unwrap(),
+ stack_end = in(reg) u64::try_from(stack.end.0).unwrap(),
dcache_line_size = in(reg) u64::try_from(min_dcache_line_size()).unwrap(),
in("x0") fdt_address,
in("x30") payload_start,
@@ -396,7 +378,7 @@
let range = memory::appended_payload_range();
// SAFETY: This region is mapped and the linker script prevents it from overlapping with other
// objects.
- unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) }
+ unsafe { slice::from_raw_parts_mut(range.start.0 as *mut u8, range.end - range.start) }
}
enum AppendedConfigType {
diff --git a/pvmfw/src/exceptions.rs b/pvmfw/src/exceptions.rs
index c3f8a29..d9f0891 100644
--- a/pvmfw/src/exceptions.rs
+++ b/pvmfw/src/exceptions.rs
@@ -14,125 +14,34 @@
//! Exception handlers.
-use core::fmt;
-use vmbase::console;
-use vmbase::logger;
-use vmbase::memory::{page_4kb_of, MemoryTrackerError, MEMORY};
-use vmbase::read_sysreg;
-use vmbase::{eprintln, power::reboot};
+use vmbase::{
+ eprintln,
+ exceptions::{ArmException, Esr, HandleExceptionError},
+ logger,
+ memory::{handle_permission_fault, handle_translation_fault},
+ power::reboot,
+ read_sysreg,
+};
-const UART_PAGE: usize = page_4kb_of(console::BASE_ADDRESS);
-
-#[derive(Debug)]
-enum HandleExceptionError {
- PageTableUnavailable,
- PageTableNotInitialized,
- InternalError(MemoryTrackerError),
- UnknownException,
-}
-
-impl From<MemoryTrackerError> for HandleExceptionError {
- fn from(other: MemoryTrackerError) -> Self {
- Self::InternalError(other)
- }
-}
-
-impl fmt::Display for HandleExceptionError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- Self::PageTableUnavailable => write!(f, "Page table is not available."),
- Self::PageTableNotInitialized => write!(f, "Page table is not initialized."),
- Self::InternalError(e) => write!(f, "Error while updating page table: {e}"),
- Self::UnknownException => write!(f, "An unknown exception occurred, not handled."),
- }
- }
-}
-
-#[derive(Debug, PartialEq, Copy, Clone)]
-enum Esr {
- DataAbortTranslationFault,
- DataAbortPermissionFault,
- DataAbortSyncExternalAbort,
- Unknown(usize),
-}
-
-impl Esr {
- const EXT_DABT_32BIT: usize = 0x96000010;
- const TRANSL_FAULT_BASE_32BIT: usize = 0x96000004;
- const TRANSL_FAULT_ISS_MASK_32BIT: usize = !0x143;
- const PERM_FAULT_BASE_32BIT: usize = 0x9600004C;
- const PERM_FAULT_ISS_MASK_32BIT: usize = !0x103;
-}
-
-impl From<usize> for Esr {
- fn from(esr: usize) -> Self {
- if esr == Self::EXT_DABT_32BIT {
- Self::DataAbortSyncExternalAbort
- } else if esr & Self::TRANSL_FAULT_ISS_MASK_32BIT == Self::TRANSL_FAULT_BASE_32BIT {
- Self::DataAbortTranslationFault
- } else if esr & Self::PERM_FAULT_ISS_MASK_32BIT == Self::PERM_FAULT_BASE_32BIT {
- Self::DataAbortPermissionFault
- } else {
- Self::Unknown(esr)
- }
- }
-}
-
-impl fmt::Display for Esr {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- Self::DataAbortSyncExternalAbort => write!(f, "Synchronous external abort"),
- Self::DataAbortTranslationFault => write!(f, "Translation fault"),
- Self::DataAbortPermissionFault => write!(f, "Permission fault"),
- Self::Unknown(v) => write!(f, "Unknown exception esr={v:#08x}"),
- }
- }
-}
-
-#[inline]
-fn handle_translation_fault(far: usize) -> Result<(), HandleExceptionError> {
- let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
- let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
- Ok(memory.handle_mmio_fault(far)?)
-}
-
-#[inline]
-fn handle_permission_fault(far: usize) -> Result<(), HandleExceptionError> {
- let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
- let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
- Ok(memory.handle_permission_fault(far)?)
-}
-
-fn handle_exception(esr: Esr, far: usize) -> Result<(), HandleExceptionError> {
+fn handle_exception(exception: &ArmException) -> Result<(), HandleExceptionError> {
// Handle all translation faults on both read and write, and MMIO guard map
// flagged invalid pages or blocks that caused the exception.
// Handle permission faults for DBM flagged entries, and flag them as dirty on write.
- match esr {
- Esr::DataAbortTranslationFault => handle_translation_fault(far),
- Esr::DataAbortPermissionFault => handle_permission_fault(far),
+ match exception.esr {
+ Esr::DataAbortTranslationFault => handle_translation_fault(exception.far),
+ Esr::DataAbortPermissionFault => handle_permission_fault(exception.far),
_ => Err(HandleExceptionError::UnknownException),
}
}
-#[inline]
-fn handling_uart_exception(esr: Esr, far: usize) -> bool {
- esr == Esr::DataAbortSyncExternalAbort && page_4kb_of(far) == UART_PAGE
-}
-
#[no_mangle]
extern "C" fn sync_exception_current(elr: u64, _spsr: u64) {
// Disable logging in exception handler to prevent unsafe writes to UART.
let _guard = logger::suppress();
- let esr: Esr = read_sysreg!("esr_el1").into();
- let far = read_sysreg!("far_el1");
- if let Err(e) = handle_exception(esr, far) {
- // Don't print to the UART if we are handling an exception it could raise.
- if !handling_uart_exception(esr, far) {
- eprintln!("sync_exception_current");
- eprintln!("{e}");
- eprintln!("{esr}, far={far:#08x}, elr={elr:#08x}");
- }
+ let exception = ArmException::from_el1_regs();
+ if let Err(e) = handle_exception(&exception) {
+ exception.print("sync_exception_current", e, elr);
reboot()
}
}
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index efb354c..319100f 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -124,8 +124,24 @@
node.setprop(cstr!("bootargs"), bootargs.to_bytes_with_nul())
}
-/// Check if memory range is ok
-fn validate_memory_range(range: &Range<usize>) -> Result<(), RebootReason> {
+/// Reads and validates the memory range in the DT.
+///
+/// Only one memory range is expected with the crosvm setup for now.
+fn read_and_validate_memory_range(fdt: &Fdt) -> Result<Range<usize>, RebootReason> {
+ let mut memory = fdt.memory().map_err(|e| {
+ error!("Failed to read memory range from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ let range = memory.next().ok_or_else(|| {
+ error!("The /memory node in the DT contains no range.");
+ RebootReason::InvalidFdt
+ })?;
+ if memory.next().is_some() {
+ warn!(
+ "The /memory node in the DT contains more than one memory range, \
+ while only one is expected."
+ );
+ }
let base = range.start;
if base != MEM_START {
error!("Memory base address {:#x} is not {:#x}", base, MEM_START);
@@ -142,7 +158,7 @@
error!("Memory size is 0");
return Err(RebootReason::InvalidFdt);
}
- Ok(())
+ Ok(range)
}
fn patch_memory_range(fdt: &mut Fdt, memory_range: &Range<usize>) -> libfdt::Result<()> {
@@ -600,11 +616,7 @@
RebootReason::InvalidFdt
})?;
- let memory_range = fdt.first_memory_range().map_err(|e| {
- error!("Failed to read memory range from DT: {e}");
- RebootReason::InvalidFdt
- })?;
- validate_memory_range(&memory_range)?;
+ let memory_range = read_and_validate_memory_range(fdt)?;
let bootargs = read_bootargs_from(fdt).map_err(|e| {
error!("Failed to read bootargs from DT: {e}");
diff --git a/pvmfw/src/gpt.rs b/pvmfw/src/gpt.rs
index b553705..892850c 100644
--- a/pvmfw/src/gpt.rs
+++ b/pvmfw/src/gpt.rs
@@ -24,9 +24,11 @@
use uuid::Uuid;
use virtio_drivers::device::blk::SECTOR_SIZE;
use vmbase::util::ceiling_div;
-use vmbase::virtio::pci::VirtIOBlk;
+use vmbase::virtio::{pci, HalImpl};
use zerocopy::FromBytes;
+type VirtIOBlk = pci::VirtIOBlk<HalImpl>;
+
pub enum Error {
/// VirtIO error during read operation.
FailedRead(virtio_drivers::Error),
diff --git a/pvmfw/src/instance.rs b/pvmfw/src/instance.rs
index 1035559..f2b34da 100644
--- a/pvmfw/src/instance.rs
+++ b/pvmfw/src/instance.rs
@@ -32,6 +32,7 @@
use vmbase::rand;
use vmbase::util::ceiling_div;
use vmbase::virtio::pci::{PciTransportIterator, VirtIOBlk};
+use vmbase::virtio::HalImpl;
use zerocopy::AsBytes;
use zerocopy::FromBytes;
@@ -183,10 +184,11 @@
}
fn find_instance_img(pci_root: &mut PciRoot) -> Result<Partition> {
- for transport in
- PciTransportIterator::new(pci_root).filter(|t| DeviceType::Block == t.device_type())
+ for transport in PciTransportIterator::<HalImpl>::new(pci_root)
+ .filter(|t| DeviceType::Block == t.device_type())
{
- let device = VirtIOBlk::new(transport).map_err(Error::VirtIOBlkCreationFailed)?;
+ let device =
+ VirtIOBlk::<HalImpl>::new(transport).map_err(Error::VirtIOBlkCreationFailed)?;
match Partition::get_by_name(device, "vm-instance") {
Ok(Some(p)) => return Ok(p),
Ok(None) => {}
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index 61e2312..ba453e7 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -95,8 +95,8 @@
// Set up PCI bus for VirtIO devices.
let pci_info = PciInfo::from_fdt(fdt).map_err(handle_pci_error)?;
debug!("PCI: {:#x?}", pci_info);
- let mut pci_root = pci::initialise(pci_info, MEMORY.lock().as_mut().unwrap()).map_err(|e| {
- error!("Failed to initialise PCI: {e}");
+ let mut pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap()).map_err(|e| {
+ error!("Failed to initialize PCI: {e}");
RebootReason::InternalError
})?;
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 11fcd7c..27ab719 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -17,25 +17,27 @@
#![deny(unsafe_op_in_unsafe_fn)]
use crate::helpers::PVMFW_PAGE_SIZE;
+use aarch64_paging::paging::VirtualAddress;
use aarch64_paging::MapError;
+use core::ops::Range;
use core::result;
use log::error;
use vmbase::{
layout,
- memory::{MemoryRange, PageTable, SIZE_2MB, SIZE_4KB},
+ memory::{PageTable, SIZE_2MB, SIZE_4KB},
util::align_up,
};
/// Returns memory range reserved for the appended payload.
-pub fn appended_payload_range() -> MemoryRange {
- let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
+pub fn appended_payload_range() -> Range<VirtualAddress> {
+ let start = align_up(layout::binary_end().0, SIZE_4KB).unwrap();
// pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
let end = align_up(start, SIZE_2MB).unwrap();
- start..end
+ VirtualAddress(start)..VirtualAddress(end)
}
/// Region allocated for the stack.
-pub fn stack_range() -> MemoryRange {
+pub fn stack_range() -> Range<VirtualAddress> {
const STACK_PAGES: usize = 8;
layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)
@@ -46,12 +48,12 @@
// Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload,
// so dirty state management can be omitted.
- page_table.map_data(&layout::scratch_range())?;
- page_table.map_data(&stack_range())?;
- page_table.map_code(&layout::text_range())?;
- page_table.map_rodata(&layout::rodata_range())?;
- page_table.map_data_dbm(&appended_payload_range())?;
- if let Err(e) = page_table.map_device(&layout::console_uart_range()) {
+ page_table.map_data(&layout::scratch_range().into())?;
+ page_table.map_data(&stack_range().into())?;
+ page_table.map_code(&layout::text_range().into())?;
+ page_table.map_rodata(&layout::rodata_range().into())?;
+ page_table.map_data_dbm(&appended_payload_range().into())?;
+ if let Err(e) = page_table.map_device(&layout::console_uart_range().into()) {
error!("Failed to remap the UART as a dynamic page table entry: {e}");
return Err(e);
}
diff --git a/rialto/Android.bp b/rialto/Android.bp
index 9aa4667..1840278 100644
--- a/rialto/Android.bp
+++ b/rialto/Android.bp
@@ -13,6 +13,7 @@
"libfdtpci",
"liblibfdt",
"liblog_rust_nostd",
+ "libvirtio_drivers",
"libvmbase",
],
}
diff --git a/rialto/src/error.rs b/rialto/src/error.rs
index 8e2991c..c326566 100644
--- a/rialto/src/error.rs
+++ b/rialto/src/error.rs
@@ -19,7 +19,7 @@
use fdtpci::PciError;
use hyp::Error as HypervisorError;
use libfdt::FdtError;
-use vmbase::memory::MemoryTrackerError;
+use vmbase::{memory::MemoryTrackerError, virtio::pci};
pub type Result<T> = result::Result<T, Error>;
@@ -29,14 +29,14 @@
Hypervisor(HypervisorError),
/// Failed when attempting to map some range in the page table.
PageTableMapping(MapError),
- /// Failed to initialize the logger.
- LoggerInit,
/// Invalid FDT.
InvalidFdt(FdtError),
/// Invalid PCI.
InvalidPci(PciError),
/// Failed memory operation.
MemoryOperationFailed(MemoryTrackerError),
+ /// Failed to initialize PCI.
+ PciInitializationFailed(pci::PciError),
}
impl fmt::Display for Error {
@@ -46,10 +46,10 @@
Self::PageTableMapping(e) => {
write!(f, "Failed when attempting to map some range in the page table: {e}.")
}
- Self::LoggerInit => write!(f, "Failed to initialize the logger."),
Self::InvalidFdt(e) => write!(f, "Invalid FDT: {e}"),
Self::InvalidPci(e) => write!(f, "Invalid PCI: {e}"),
Self::MemoryOperationFailed(e) => write!(f, "Failed memory operation: {e}"),
+ Self::PciInitializationFailed(e) => write!(f, "Failed to initialize PCI: {e}"),
}
}
}
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index ce83624..3e0485d 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -24,10 +24,9 @@
use crate::error::{Error, Result};
use core::num::NonZeroUsize;
-use core::result;
use core::slice;
use fdtpci::PciInfo;
-use hyp::{get_hypervisor, HypervisorCap, KvmError};
+use hyp::{get_mem_sharer, get_mmio_guard};
use libfdt::FdtError;
use log::{debug, error, info};
use vmbase::{
@@ -37,36 +36,21 @@
main,
memory::{MemoryTracker, PageTable, MEMORY, PAGE_SIZE, SIZE_64KB},
power::reboot,
+ virtio::pci,
};
fn new_page_table() -> Result<PageTable> {
let mut page_table = PageTable::default();
- page_table.map_device(&crosvm::MMIO_RANGE)?;
- page_table.map_data(&layout::scratch_range())?;
- page_table.map_data(&layout::stack_range(40 * PAGE_SIZE))?;
- page_table.map_code(&layout::text_range())?;
- page_table.map_rodata(&layout::rodata_range())?;
- page_table.map_device(&layout::console_uart_range())?;
+ page_table.map_data(&layout::scratch_range().into())?;
+ page_table.map_data(&layout::stack_range(40 * PAGE_SIZE).into())?;
+ page_table.map_code(&layout::text_range().into())?;
+ page_table.map_rodata(&layout::rodata_range().into())?;
+ page_table.map_device(&layout::console_uart_range().into())?;
Ok(page_table)
}
-fn try_init_logger() -> Result<bool> {
- let mmio_guard_supported = match get_hypervisor().mmio_guard_init() {
- // pKVM blocks MMIO by default, we need to enable MMIO guard to support logging.
- Ok(()) => {
- get_hypervisor().mmio_guard_map(vmbase::console::BASE_ADDRESS)?;
- true
- }
- // MMIO guard enroll is not supported in unprotected VM.
- Err(hyp::Error::MmioGuardNotsupported) => false,
- Err(e) => return Err(e.into()),
- };
- vmbase::logger::init(log::LevelFilter::Debug).map_err(|_| Error::LoggerInit)?;
- Ok(mmio_guard_supported)
-}
-
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
@@ -91,8 +75,6 @@
let fdt = unsafe { slice::from_raw_parts(fdt_range.start as *mut u8, fdt_range.len()) };
// We do not need to validate the DT since it is already validated in pvmfw.
let fdt = libfdt::Fdt::from_slice(fdt)?;
- let pci_info = PciInfo::from_fdt(fdt)?;
- debug!("PCI: {pci_info:#x?}");
let memory_range = fdt.first_memory_range()?;
MEMORY.lock().as_mut().unwrap().shrink(&memory_range).map_err(|e| {
@@ -100,14 +82,14 @@
e
})?;
- if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
- let granule = memory_protection_granule()?;
+ if let Some(mem_sharer) = get_mem_sharer() {
+ let granule = mem_sharer.granule()?;
MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
error!("Failed to initialize dynamically shared pool.");
e
})?;
- } else {
- let range = SwiotlbInfo::new_from_fdt(fdt)?.fixed_range().ok_or_else(|| {
+ } else if let Ok(swiotlb_info) = SwiotlbInfo::new_from_fdt(fdt) {
+ let range = swiotlb_info.fixed_range().ok_or_else(|| {
error!("Pre-shared pool range not specified in swiotlb node");
Error::from(FdtError::BadValue)
})?;
@@ -115,50 +97,50 @@
error!("Failed to initialize pre-shared pool.");
e
})?;
+ } else {
+ info!("No MEM_SHARE capability detected or swiotlb found: allocating buffers from heap.");
+ MEMORY.lock().as_mut().unwrap().init_heap_shared_pool().map_err(|e| {
+ error!("Failed to initialize heap-based pseudo-shared pool.");
+ e
+ })?;
}
+
+ let pci_info = PciInfo::from_fdt(fdt)?;
+ debug!("PCI: {pci_info:#x?}");
+ let pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap())
+ .map_err(Error::PciInitializationFailed)?;
+ debug!("PCI root: {pci_root:#x?}");
Ok(())
}
-fn memory_protection_granule() -> result::Result<usize, hyp::Error> {
- match get_hypervisor().memory_protection_granule() {
- Ok(granule) => Ok(granule),
- // Take the default page size when KVM call is not supported in non-protected VMs.
- Err(hyp::Error::KvmError(KvmError::NotSupported, _)) => Ok(PAGE_SIZE),
- Err(e) => Err(e),
- }
-}
-
-fn try_unshare_all_memory(mmio_guard_supported: bool) -> Result<()> {
+fn try_unshare_all_memory() -> Result<()> {
info!("Starting unsharing memory...");
// No logging after unmapping UART.
- if mmio_guard_supported {
- get_hypervisor().mmio_guard_unmap(vmbase::console::BASE_ADDRESS)?;
+ if let Some(mmio_guard) = get_mmio_guard() {
+ mmio_guard.unmap(vmbase::console::BASE_ADDRESS)?;
}
// Unshares all memory and deactivates page table.
drop(MEMORY.lock().take());
Ok(())
}
-fn unshare_all_memory(mmio_guard_supported: bool) {
- if let Err(e) = try_unshare_all_memory(mmio_guard_supported) {
+fn unshare_all_memory() {
+ if let Err(e) = try_unshare_all_memory() {
error!("Failed to unshare the memory: {e}");
}
}
/// Entry point for Rialto.
pub fn main(fdt_addr: u64, _a1: u64, _a2: u64, _a3: u64) {
- let Ok(mmio_guard_supported) = try_init_logger() else {
- // Don't log anything if the logger initialization fails.
- reboot();
- };
+ log::set_max_level(log::LevelFilter::Debug);
// SAFETY: `fdt_addr` is supposed to be a valid pointer and points to
// a valid `Fdt`.
match unsafe { try_main(fdt_addr as usize) } {
- Ok(()) => unshare_all_memory(mmio_guard_supported),
+ Ok(()) => unshare_all_memory(),
Err(e) => {
error!("Rialto failed with {e}");
- unshare_all_memory(mmio_guard_supported);
+ unshare_all_memory();
reboot()
}
}
diff --git a/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java b/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
index 014f9f0..9cf28c7 100644
--- a/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
@@ -115,7 +115,7 @@
mAndroidDevice.supportsMicrodroid(/* protectedVm= */ true));
assumeFalse("Test requires setprop for using custom pvmfw and adb root", isUserBuild());
- mAndroidDevice.enableAdbRoot();
+ assumeTrue("Skip if adb root fails", mAndroidDevice.enableAdbRoot());
// tradefed copies the test artfacts under /tmp when running tests,
// so we should *find* the artifacts with the file name.
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index a3b56f9..15cb450 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -555,6 +555,14 @@
.setVmOutputCaptured(true);
e = assertThrows(IllegalStateException.class, () -> captureOutputOnNonDebuggable.build());
assertThat(e).hasMessageThat().contains("debug level must be FULL to capture output");
+
+ VirtualMachineConfig.Builder captureInputOnNonDebuggable =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("binary.so")
+ .setDebugLevel(VirtualMachineConfig.DEBUG_LEVEL_NONE)
+ .setVmConsoleInputSupported(true);
+ e = assertThrows(IllegalStateException.class, () -> captureInputOnNonDebuggable.build());
+ assertThat(e).hasMessageThat().contains("debug level must be FULL to use console input");
}
@Test
@@ -593,6 +601,9 @@
newBaselineBuilder().setDebugLevel(DEBUG_LEVEL_FULL);
VirtualMachineConfig debuggable = debuggableBuilder.build();
assertConfigCompatible(debuggable, debuggableBuilder.setVmOutputCaptured(true)).isFalse();
+ assertConfigCompatible(debuggable, debuggableBuilder.setVmOutputCaptured(false)).isTrue();
+ assertConfigCompatible(debuggable, debuggableBuilder.setVmConsoleInputSupported(true))
+ .isFalse();
VirtualMachineConfig currentContextConfig =
new VirtualMachineConfig.Builder(getContext())
@@ -1582,6 +1593,7 @@
.setProtectedVm(mProtectedVm)
.setPayloadBinaryName("MicrodroidTestNativeLib.so")
.setDebugLevel(DEBUG_LEVEL_FULL)
+ .setVmConsoleInputSupported(true) // even if console input is supported
.build();
final VirtualMachine vm = forceCreateNewVirtualMachine("test_vm_forward_log", vmConfig);
vm.run();
@@ -1596,6 +1608,28 @@
}
}
+ @Test
+ public void inputShouldBeExplicitlyAllowed() throws Exception {
+ assumeSupportedDevice();
+
+ final VirtualMachineConfig vmConfig =
+ new VirtualMachineConfig.Builder(getContext())
+ .setProtectedVm(mProtectedVm)
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .setVmOutputCaptured(true) // even if output is captured
+ .build();
+ final VirtualMachine vm = forceCreateNewVirtualMachine("test_vm_forward_log", vmConfig);
+ vm.run();
+
+ try {
+ assertThrowsVmExceptionContaining(
+ () -> vm.getConsoleInput(), "VM console input is not supported");
+ } finally {
+ vm.stop();
+ }
+ }
+
private boolean checkVmOutputIsRedirectedToLogcat(boolean debuggable) throws Exception {
String time =
LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"));
diff --git a/virtualizationmanager/src/crosvm.rs b/virtualizationmanager/src/crosvm.rs
index 13367c3..8c412f6 100644
--- a/virtualizationmanager/src/crosvm.rs
+++ b/virtualizationmanager/src/crosvm.rs
@@ -527,7 +527,7 @@
}
}
Ok(VmResponse::Err(e)) => {
- // ENOTSUP is returned when the balloon protocol is not initialised. This
+ // ENOTSUP is returned when the balloon protocol is not initialized. This
// can occur for numerous reasons: Guest is still booting, guest doesn't
// support ballooning, host doesn't support ballooning. We don't log or
// raise an error in this case: trim is just a hint and we can ignore it.
@@ -792,7 +792,7 @@
// devices in the same PCI bus and serial devices comes before the block devices. Arm crosvm
// doesn't have the issue.
// /dev/ttyS0
- command.arg(format!("--serial={}{},hardware=serial,num=1", &console_out_arg, &console_in_arg));
+ command.arg(format!("--serial={},hardware=serial,num=1", &console_out_arg));
// /dev/ttyS1
command.arg(format!("--serial=type=file,path={},hardware=serial,num=2", &failure_serial_path));
// /dev/hvc0
diff --git a/virtualizationmanager/src/payload.rs b/virtualizationmanager/src/payload.rs
index c11834d..ab6f31c 100644
--- a/virtualizationmanager/src/payload.rs
+++ b/virtualizationmanager/src/payload.rs
@@ -194,12 +194,12 @@
temporary_directory: &Path,
) -> Result<ParcelFileDescriptor> {
let payload_metadata = match &app_config.payload {
- Payload::PayloadConfig(payload_config) => PayloadMetadata::config(PayloadConfig {
+ Payload::PayloadConfig(payload_config) => PayloadMetadata::Config(PayloadConfig {
payload_binary_name: payload_config.payloadBinaryName.clone(),
..Default::default()
}),
Payload::ConfigPath(config_path) => {
- PayloadMetadata::config_path(format!("/mnt/apk/{}", config_path))
+ PayloadMetadata::ConfigPath(format!("/mnt/apk/{}", config_path))
}
};
diff --git a/vmbase/README.md b/vmbase/README.md
index 7f621fb..280d7e1 100644
--- a/vmbase/README.md
+++ b/vmbase/README.md
@@ -6,7 +6,7 @@
In particular it provides:
-- An [entry point](entry.S) that initialises the MMU with a hard-coded identity mapping, enables the
+- An [entry point](entry.S) that initializes the MMU with a hard-coded identity mapping, enables the
cache, prepares the image and allocates a stack.
- An [exception vector](exceptions.S) to call your exception handlers.
- A UART driver and `println!` macro for early console logging.
@@ -62,7 +62,7 @@
}
```
-vmbase adds a wrapper around your main function to initialise the console driver first (with the
+vmbase adds a wrapper around your main function to initialize the console driver first (with the
UART at base address `0x3f8`, the first UART allocated by crosvm), and make a PSCI `SYSTEM_OFF` call
to shutdown the VM if your main function ever returns.
@@ -93,7 +93,7 @@
The `println!` macro shouldn't be used in exception handlers, because it relies on a global instance
of the UART driver which might be locked when the exception happens, which would result in deadlock.
-Instead you can use `emergency_write_str` and `eprintln!`, which will re-initialise the UART every
+Instead you can use `emergency_write_str` and `eprintln!`, which will re-initialize the UART every
time to ensure that it can be used. This should still be used with care, as it may interfere with
whatever the rest of the program is doing with the UART.
diff --git a/vmbase/entry.S b/vmbase/entry.S
index 9f6993a..9177a4a 100644
--- a/vmbase/entry.S
+++ b/vmbase/entry.S
@@ -63,72 +63,6 @@
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
-/* SMC function IDs */
-.set .L_SMCCC_VERSION_ID, 0x80000000
-.set .L_SMCCC_TRNG_VERSION_ID, 0x84000050
-.set .L_SMCCC_TRNG_FEATURES_ID, 0x84000051
-.set .L_SMCCC_TRNG_RND64_ID, 0xc4000053
-
-/* SMC function versions */
-.set .L_SMCCC_VERSION_1_1, 0x0101
-.set .L_SMCCC_TRNG_VERSION_1_0, 0x0100
-
-/* Bionic-compatible stack protector */
-.section .data.stack_protector, "aw"
-__bionic_tls:
- .zero 40
-.global __stack_chk_guard
-__stack_chk_guard:
- .quad 0
-
-/**
- * This macro stores a random value into a register.
- * If a TRNG backed is not present or if an error occurs, the value remains unchanged.
- */
-.macro rnd_reg reg:req
- mov x20, x0
- mov x21, x1
- mov x22, x2
- mov x23, x3
-
- /* Verify SMCCC version >=1.1 */
- hvc_call .L_SMCCC_VERSION_ID
- cmp w0, 0
- b.lt 100f
- cmp w0, .L_SMCCC_VERSION_1_1
- b.lt 100f
-
- /* Verify TRNG ABI version 1.x */
- hvc_call .L_SMCCC_TRNG_VERSION_ID
- cmp w0, 0
- b.lt 100f
- cmp w0, .L_SMCCC_TRNG_VERSION_1_0
- b.lt 100f
-
- /* Call TRNG_FEATURES, ensure TRNG_RND is implemented */
- mov_i x1, .L_SMCCC_TRNG_RND64_ID
- hvc_call .L_SMCCC_TRNG_FEATURES_ID
- cmp w0, 0
- b.lt 100f
-
- /* Call TRNG_RND, request 64 bits of entropy */
- mov x1, #64
- hvc_call .L_SMCCC_TRNG_RND64_ID
- cmp x0, 0
- b.lt 100f
-
- mov \reg, x3
- b 101f
-
-100:
- reset_or_hang
-101:
- mov x0, x20
- mov x1, x21
- mov x2, x22
- mov x3, x23
-.endm
-
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
@@ -222,18 +156,17 @@
adr x30, vector_table_el1
msr vbar_el1, x30
- /* Set up Bionic-compatible thread-local storage. */
+ /*
+ * Set up Bionic-compatible thread-local storage.
+ *
+ * Note that TPIDR_EL0 can't be configured from rust_entry because the
+ * compiler will dereference it during function entry to access
+ * __stack_chk_guard and Rust doesn't support LLVM's
+ * __attribute__((no_stack_protector)).
+ */
adr_l x30, __bionic_tls
msr tpidr_el0, x30
- /* Randomize stack protector. */
- rnd_reg x29
- adr_l x30, __stack_chk_guard
- str x29, [x30]
-
- /* Write a null byte to the top of the stack guard to act as a string terminator. */
- strb wzr, [x30]
-
/* Call into Rust code. */
bl rust_entry
diff --git a/vmbase/example/src/layout.rs b/vmbase/example/src/layout.rs
index 2e9d27a..fc578bc 100644
--- a/vmbase/example/src/layout.rs
+++ b/vmbase/example/src/layout.rs
@@ -15,80 +15,36 @@
//! Memory layout.
use aarch64_paging::paging::{MemoryRegion, VirtualAddress};
-use core::arch::asm;
use core::ops::Range;
use log::info;
use vmbase::layout;
-use vmbase::STACK_CHK_GUARD;
/// The first 1 GiB of memory are used for MMIO.
pub const DEVICE_REGION: MemoryRegion = MemoryRegion::new(0, 0x40000000);
-fn into_va_range(r: Range<usize>) -> Range<VirtualAddress> {
- VirtualAddress(r.start)..VirtualAddress(r.end)
-}
-
-/// Memory reserved for the DTB.
-pub fn dtb_range() -> Range<VirtualAddress> {
- into_va_range(layout::dtb_range())
-}
-
-/// Executable code.
-pub fn text_range() -> Range<VirtualAddress> {
- into_va_range(layout::text_range())
-}
-
-/// Read-only data.
-pub fn rodata_range() -> Range<VirtualAddress> {
- into_va_range(layout::rodata_range())
-}
-
-/// Initialised writable data.
-pub fn data_range() -> Range<VirtualAddress> {
- into_va_range(layout::data_range())
-}
-
-/// Zero-initialised writable data.
-pub fn bss_range() -> Range<VirtualAddress> {
- into_va_range(layout::bss_range())
-}
-
/// Writable data region for the stack.
pub fn boot_stack_range() -> Range<VirtualAddress> {
const PAGE_SIZE: usize = 4 << 10;
- into_va_range(layout::stack_range(40 * PAGE_SIZE))
-}
-
-/// Writable data region for allocations.
-pub fn scratch_range() -> Range<VirtualAddress> {
- into_va_range(layout::scratch_range())
-}
-
-fn data_load_address() -> VirtualAddress {
- VirtualAddress(layout::data_load_address())
-}
-
-fn binary_end() -> VirtualAddress {
- VirtualAddress(layout::binary_end())
+ layout::stack_range(40 * PAGE_SIZE)
}
pub fn print_addresses() {
- let dtb = dtb_range();
+ let dtb = layout::dtb_range();
info!("dtb: {}..{} ({} bytes)", dtb.start, dtb.end, dtb.end - dtb.start);
- let text = text_range();
+ let text = layout::text_range();
info!("text: {}..{} ({} bytes)", text.start, text.end, text.end - text.start);
- let rodata = rodata_range();
+ let rodata = layout::rodata_range();
info!("rodata: {}..{} ({} bytes)", rodata.start, rodata.end, rodata.end - rodata.start);
- info!("binary end: {}", binary_end());
- let data = data_range();
+ info!("binary end: {}", layout::binary_end());
+ let data = layout::data_range();
info!(
"data: {}..{} ({} bytes, loaded at {})",
data.start,
data.end,
data.end - data.start,
- data_load_address(),
+ layout::data_load_address(),
);
- let bss = bss_range();
+ let bss = layout::bss_range();
info!("bss: {}..{} ({} bytes)", bss.start, bss.end, bss.end - bss.start);
let boot_stack = boot_stack_range();
info!(
@@ -98,18 +54,3 @@
boot_stack.end - boot_stack.start
);
}
-
-/// Bionic-compatible thread-local storage entry, at the given offset from TPIDR_EL0.
-pub fn bionic_tls(off: usize) -> u64 {
- let mut base: usize;
- unsafe {
- asm!("mrs {base}, tpidr_el0", base = out(reg) base);
- let ptr = (base + off) as *const u64;
- *ptr
- }
-}
-
-/// Value of __stack_chk_guard.
-pub fn stack_chk_guard() -> u64 {
- *STACK_CHK_GUARD
-}
diff --git a/vmbase/example/src/main.rs b/vmbase/example/src/main.rs
index b3b5732..8086885 100644
--- a/vmbase/example/src/main.rs
+++ b/vmbase/example/src/main.rs
@@ -23,31 +23,53 @@
extern crate alloc;
-use crate::layout::{
- bionic_tls, boot_stack_range, dtb_range, print_addresses, rodata_range, scratch_range,
- stack_chk_guard, text_range, DEVICE_REGION,
-};
+use crate::layout::{boot_stack_range, print_addresses, DEVICE_REGION};
use crate::pci::{check_pci, get_bar_region};
-use aarch64_paging::{idmap::IdMap, paging::Attributes};
+use aarch64_paging::paging::MemoryRegion;
+use aarch64_paging::MapError;
use alloc::{vec, vec::Vec};
use fdtpci::PciInfo;
use libfdt::Fdt;
use log::{debug, error, info, trace, warn, LevelFilter};
-use vmbase::{configure_heap, cstr, logger, main, memory::SIZE_64KB};
+use vmbase::{
+ bionic, configure_heap, cstr,
+ layout::{dtb_range, rodata_range, scratch_range, text_range},
+ linker, logger, main,
+ memory::{PageTable, SIZE_64KB},
+};
static INITIALISED_DATA: [u32; 4] = [1, 2, 3, 4];
static mut ZEROED_DATA: [u32; 10] = [0; 10];
static mut MUTABLE_DATA: [u32; 4] = [1, 2, 3, 4];
-const ASID: usize = 1;
-const ROOT_LEVEL: usize = 1;
-
main!(main);
configure_heap!(SIZE_64KB);
+fn init_page_table(pci_bar_range: &MemoryRegion) -> Result<(), MapError> {
+ let mut page_table = PageTable::default();
+
+ page_table.map_device(&DEVICE_REGION)?;
+ page_table.map_code(&text_range().into())?;
+ page_table.map_rodata(&rodata_range().into())?;
+ page_table.map_data(&scratch_range().into())?;
+ page_table.map_data(&boot_stack_range().into())?;
+ page_table.map_rodata(&dtb_range().into())?;
+ page_table.map_device(pci_bar_range)?;
+
+ info!("Activating IdMap...");
+ // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
+ // aware of so activating it shouldn't have any visible effect.
+ unsafe {
+ page_table.activate();
+ }
+ info!("Activated.");
+
+ Ok(())
+}
+
/// Entry point for VM bootloader.
pub fn main(arg0: u64, arg1: u64, arg2: u64, arg3: u64) {
- logger::init(LevelFilter::Debug).unwrap();
+ log::set_max_level(LevelFilter::Debug);
info!("Hello world");
info!("x0={:#018x}, x1={:#018x}, x2={:#018x}, x3={:#018x}", arg0, arg1, arg2, arg3);
@@ -71,68 +93,7 @@
check_alloc();
- let mut idmap = IdMap::new(ASID, ROOT_LEVEL);
- idmap
- .map_range(
- &DEVICE_REGION,
- Attributes::VALID | Attributes::DEVICE_NGNRE | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
- idmap
- .map_range(
- &text_range().into(),
- Attributes::VALID | Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY,
- )
- .unwrap();
- idmap
- .map_range(
- &rodata_range().into(),
- Attributes::VALID
- | Attributes::NORMAL
- | Attributes::NON_GLOBAL
- | Attributes::READ_ONLY
- | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
- idmap
- .map_range(
- &scratch_range().into(),
- Attributes::VALID
- | Attributes::NORMAL
- | Attributes::NON_GLOBAL
- | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
- idmap
- .map_range(
- &boot_stack_range().into(),
- Attributes::VALID
- | Attributes::NORMAL
- | Attributes::NON_GLOBAL
- | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
- idmap
- .map_range(
- &dtb_range().into(),
- Attributes::VALID
- | Attributes::NORMAL
- | Attributes::NON_GLOBAL
- | Attributes::READ_ONLY
- | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
- idmap
- .map_range(
- &get_bar_region(&pci_info),
- Attributes::VALID | Attributes::DEVICE_NGNRE | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
-
- info!("Activating IdMap...");
- trace!("{:?}", idmap);
- idmap.activate();
- info!("Activated.");
+ init_page_table(&get_bar_region(&pci_info)).unwrap();
check_data();
check_dice();
@@ -144,10 +105,20 @@
}
fn check_stack_guard() {
- const BIONIC_TLS_STACK_GRD_OFF: usize = 40;
-
info!("Testing stack guard");
- assert_eq!(bionic_tls(BIONIC_TLS_STACK_GRD_OFF), stack_chk_guard());
+ // SAFETY: No concurrency issue should occur when running these tests.
+ let stack_guard = unsafe { bionic::TLS.stack_guard };
+ assert_ne!(stack_guard, 0);
+ // Check that a NULL-terminating value is added for C functions consuming strings from stack.
+ assert_eq!(stack_guard.to_ne_bytes().last(), Some(&0));
+ // Check that the TLS and guard are properly accessible from the dedicated register.
+ assert_eq!(stack_guard, bionic::__get_tls().stack_guard);
+ // Check that the LLVM __stack_chk_guard alias is also properly set up.
+ assert_eq!(
+ stack_guard,
+ // SAFETY: No concurrency issue should occur when running these tests.
+ unsafe { linker::__stack_chk_guard },
+ );
}
fn check_data() {
diff --git a/vmbase/example/src/pci.rs b/vmbase/example/src/pci.rs
index 6abe66e..6d33215 100644
--- a/vmbase/example/src/pci.rs
+++ b/vmbase/example/src/pci.rs
@@ -20,13 +20,14 @@
use fdtpci::PciInfo;
use log::{debug, info};
use virtio_drivers::{
- device::{blk::VirtIOBlk, console::VirtIOConsole},
+ device::console::VirtIOConsole,
transport::{
- pci::{bus::PciRoot, virtio_device_type, PciTransport},
+ pci::{bus::PciRoot, PciTransport},
DeviceType, Transport,
},
BufferDirection, Error, Hal, PhysAddr, PAGE_SIZE,
};
+use vmbase::virtio::pci::{self, PciTransportIterator};
/// The standard sector size of a VirtIO block device, in bytes.
const SECTOR_SIZE_BYTES: usize = 512;
@@ -37,39 +38,40 @@
pub fn check_pci(pci_root: &mut PciRoot) {
let mut checked_virtio_device_count = 0;
let mut block_device_count = 0;
- for (device_function, info) in pci_root.enumerate_bus(0) {
- let (status, command) = pci_root.get_status_command(device_function);
- info!("Found {} at {}, status {:?} command {:?}", info, device_function, status, command);
- if let Some(virtio_type) = virtio_device_type(&info) {
- info!(" VirtIO {:?}", virtio_type);
- let mut transport = PciTransport::new::<HalImpl>(pci_root, device_function).unwrap();
- info!(
- "Detected virtio PCI device with device type {:?}, features {:#018x}",
- transport.device_type(),
- transport.read_device_features(),
- );
- match virtio_type {
- DeviceType::Block => {
- check_virtio_block_device(transport, block_device_count);
- block_device_count += 1;
- checked_virtio_device_count += 1;
- }
- DeviceType::Console => {
- check_virtio_console_device(transport);
- checked_virtio_device_count += 1;
- }
- _ => {}
+ let mut socket_device_count = 0;
+ for mut transport in PciTransportIterator::<HalImpl>::new(pci_root) {
+ info!(
+ "Detected virtio PCI device with device type {:?}, features {:#018x}",
+ transport.device_type(),
+ transport.read_device_features(),
+ );
+ match transport.device_type() {
+ DeviceType::Block => {
+ check_virtio_block_device(transport, block_device_count);
+ block_device_count += 1;
+ checked_virtio_device_count += 1;
}
+ DeviceType::Console => {
+ check_virtio_console_device(transport);
+ checked_virtio_device_count += 1;
+ }
+ DeviceType::Socket => {
+ check_virtio_socket_device(transport);
+ socket_device_count += 1;
+ checked_virtio_device_count += 1;
+ }
+ _ => {}
}
}
- assert_eq!(checked_virtio_device_count, 5);
+ assert_eq!(checked_virtio_device_count, 6);
assert_eq!(block_device_count, 2);
+ assert_eq!(socket_device_count, 1);
}
/// Checks the given VirtIO block device.
-fn check_virtio_block_device(transport: impl Transport, index: usize) {
- let mut blk = VirtIOBlk::<HalImpl, _>::new(transport).expect("failed to create blk driver");
+fn check_virtio_block_device(transport: PciTransport, index: usize) {
+ let mut blk = pci::VirtIOBlk::<HalImpl>::new(transport).expect("failed to create blk driver");
info!("Found {} KiB block device.", blk.capacity() * SECTOR_SIZE_BYTES as u64 / 1024);
match index {
0 => {
@@ -93,9 +95,16 @@
}
}
+/// Checks the given VirtIO socket device.
+fn check_virtio_socket_device(transport: PciTransport) {
+ let socket = pci::VirtIOSocket::<HalImpl>::new(transport)
+ .expect("Failed to create VirtIO socket driver");
+ info!("Found socket device: guest_cid={}", socket.guest_cid());
+}
+
/// Checks the given VirtIO console device.
-fn check_virtio_console_device(transport: impl Transport) {
- let mut console = VirtIOConsole::<HalImpl, _>::new(transport)
+fn check_virtio_console_device(transport: PciTransport) {
+ let mut console = VirtIOConsole::<HalImpl, PciTransport>::new(transport)
.expect("Failed to create VirtIO console driver");
info!("Found console device: {:?}", console.info());
for &c in b"Hello VirtIO console\n" {
diff --git a/vmbase/sections.ld b/vmbase/sections.ld
index 5232d30..c7ef0ec 100644
--- a/vmbase/sections.ld
+++ b/vmbase/sections.ld
@@ -107,6 +107,9 @@
. = init_stack_pointer;
} >writable_data
+ /* Make our Bionic stack protector compatible with mainline LLVM */
+ __stack_chk_guard = __bionic_tls + 40;
+
/*
* Remove unused sections from the image.
*/
diff --git a/vmbase/src/arch.rs b/vmbase/src/arch.rs
index d7b63b3..d8bb8b2 100644
--- a/vmbase/src/arch.rs
+++ b/vmbase/src/arch.rs
@@ -19,8 +19,8 @@
macro_rules! read_sysreg {
($sysreg:literal) => {{
let mut r: usize;
- // Safe because it reads a system register and does not affect Rust.
#[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: Reading a system register does not affect memory.
unsafe {
core::arch::asm!(
concat!("mrs {}, ", $sysreg),
@@ -53,8 +53,8 @@
#[macro_export]
macro_rules! isb {
() => {{
- // Safe because this is just a memory barrier and does not affect Rust.
#[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: memory barriers do not affect Rust's memory model.
unsafe {
core::arch::asm!("isb", options(nomem, nostack, preserves_flags));
}
@@ -65,8 +65,8 @@
#[macro_export]
macro_rules! dsb {
($option:literal) => {{
- // Safe because this is just a memory barrier and does not affect Rust.
#[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: memory barriers do not affect Rust's memory model.
unsafe {
core::arch::asm!(concat!("dsb ", $option), options(nomem, nostack, preserves_flags));
}
@@ -79,9 +79,9 @@
($option:literal, $asid:expr, $addr:expr) => {{
let asid: usize = $asid;
let addr: usize = $addr;
- // Safe because it invalidates TLB and doesn't affect Rust. When the address matches a
- // block entry larger than the page size, all translations for the block are invalidated.
#[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: Invalidating the TLB doesn't affect Rust. When the address matches a
+ // block entry larger than the page size, all translations for the block are invalidated.
unsafe {
core::arch::asm!(
concat!("tlbi ", $option, ", {x}"),
diff --git a/vmbase/src/bionic.rs b/vmbase/src/bionic.rs
index 69da521..2ce0e83 100644
--- a/vmbase/src/bionic.rs
+++ b/vmbase/src/bionic.rs
@@ -23,12 +23,35 @@
use crate::console;
use crate::eprintln;
-use crate::linker;
+use crate::read_sysreg;
const EOF: c_int = -1;
-/// Reference to __stack_chk_guard.
-pub static STACK_CHK_GUARD: &u64 = unsafe { &linker::__stack_chk_guard };
+/// Bionic thread-local storage.
+#[repr(C)]
+pub struct Tls {
+ /// Unused.
+ _unused: [u8; 40],
+ /// Use by the compiler as stack canary value.
+ pub stack_guard: u64,
+}
+
+/// Bionic TLS.
+///
+/// Provides the TLS used by Bionic code. This is unique as vmbase only supports one thread.
+///
+/// Note that the linker script re-exports __bionic_tls.stack_guard as __stack_chk_guard for
+/// compatibility with non-Bionic LLVM.
+#[link_section = ".data.stack_protector"]
+#[export_name = "__bionic_tls"]
+pub static mut TLS: Tls = Tls { _unused: [0; 40], stack_guard: 0 };
+
+/// Gets a reference to the TLS from the dedicated system register.
+pub fn __get_tls() -> &'static mut Tls {
+ let tpidr = read_sysreg!("tpidr_el0");
+ // SAFETY: The register is currently only written to once, from entry.S, with a valid value.
+ unsafe { &mut *(tpidr as *mut Tls) }
+}
#[no_mangle]
extern "C" fn __stack_chk_fail() -> ! {
@@ -46,11 +69,13 @@
#[no_mangle]
unsafe extern "C" fn __errno() -> *mut c_int {
- &mut ERRNO as *mut _
+ // SAFETY: C functions which call this are only called from the main thread, not from exception
+ // handlers.
+ unsafe { &mut ERRNO as *mut _ }
}
fn set_errno(value: c_int) {
- // SAFETY - vmbase is currently single-threaded.
+ // SAFETY: vmbase is currently single-threaded.
unsafe { ERRNO = value };
}
@@ -58,15 +83,15 @@
///
/// # Safety
///
-/// Input strings `prefix` and `format` must be properly NULL-terminated.
+/// Input strings `prefix` and `format` must be valid and properly NUL-terminated.
///
/// # Note
///
/// This Rust functions is missing the last argument of its C/C++ counterpart, a va_list.
#[no_mangle]
unsafe extern "C" fn async_safe_fatal_va_list(prefix: *const c_char, format: *const c_char) {
- let prefix = CStr::from_ptr(prefix);
- let format = CStr::from_ptr(format);
+ // SAFETY: The caller guaranteed that both strings were valid and NUL-terminated.
+ let (prefix, format) = unsafe { (CStr::from_ptr(prefix), CStr::from_ptr(format)) };
if let (Ok(prefix), Ok(format)) = (prefix.to_str(), format.to_str()) {
// We don't bother with printf formatting.
@@ -100,7 +125,7 @@
#[no_mangle]
extern "C" fn fputs(c_str: *const c_char, stream: usize) -> c_int {
- // SAFETY - Just like libc, we need to assume that `s` is a valid NULL-terminated string.
+ // SAFETY: Just like libc, we need to assume that `s` is a valid NULL-terminated string.
let c_str = unsafe { CStr::from_ptr(c_str) };
if let (Ok(s), Ok(_)) = (c_str.to_str(), File::try_from(stream)) {
@@ -116,7 +141,7 @@
extern "C" fn fwrite(ptr: *const c_void, size: usize, nmemb: usize, stream: usize) -> usize {
let length = size.saturating_mul(nmemb);
- // SAFETY - Just like libc, we need to assume that `ptr` is valid.
+ // SAFETY: Just like libc, we need to assume that `ptr` is valid.
let bytes = unsafe { slice::from_raw_parts(ptr as *const u8, length) };
if let (Ok(s), Ok(_)) = (str::from_utf8(bytes), File::try_from(stream)) {
diff --git a/vmbase/src/console.rs b/vmbase/src/console.rs
index 7c8ddf6..a7d37b4 100644
--- a/vmbase/src/console.rs
+++ b/vmbase/src/console.rs
@@ -25,7 +25,7 @@
/// Initialises a new instance of the UART driver and returns it.
fn create() -> Uart {
- // Safe because BASE_ADDRESS is the base of the MMIO region for a UART and is mapped as device
+ // SAFETY: BASE_ADDRESS is the base of the MMIO region for a UART and is mapped as device
// memory.
unsafe { Uart::new(BASE_ADDRESS) }
}
@@ -51,7 +51,7 @@
write(CONSOLE.lock().as_mut().unwrap(), format_args).unwrap();
}
-/// Reinitialises the UART driver and writes a string to it.
+/// Reinitializes the UART driver and writes a string to it.
///
/// This is intended for use in situations where the UART may be in an unknown state or the global
/// instance may be locked, such as in an exception handler or panic handler.
@@ -60,7 +60,7 @@
let _ = uart.write_str(s);
}
-/// Reinitialises the UART driver and writes a formatted string to it.
+/// Reinitializes the UART driver and writes a formatted string to it.
///
/// This is intended for use in situations where the UART may be in an unknown state or the global
/// instance may be locked, such as in an exception handler or panic handler.
@@ -71,7 +71,7 @@
/// Prints the given formatted string to the console, followed by a newline.
///
-/// Panics if the console has not yet been initialised. May hang if used in an exception context;
+/// Panics if the console has not yet been initialized. May hang if used in an exception context;
/// use `eprintln!` instead.
macro_rules! println {
() => ($crate::console::write_str("\n"));
diff --git a/vmbase/src/entry.rs b/vmbase/src/entry.rs
index df0bb7c..24b5035 100644
--- a/vmbase/src/entry.rs
+++ b/vmbase/src/entry.rs
@@ -14,14 +14,53 @@
//! Rust entry point.
-use crate::{console, heap, power::shutdown};
+use crate::{
+ bionic, console, heap, logger,
+ power::{reboot, shutdown},
+ rand,
+};
+use core::mem::size_of;
+use hyp::{self, get_mmio_guard};
+
+fn try_console_init() -> Result<(), hyp::Error> {
+ console::init();
+
+ if let Some(mmio_guard) = get_mmio_guard() {
+ mmio_guard.init()?;
+ mmio_guard.map(console::BASE_ADDRESS)?;
+ }
+
+ Ok(())
+}
/// This is the entry point to the Rust code, called from the binary entry point in `entry.S`.
#[no_mangle]
extern "C" fn rust_entry(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
- // SAFETY - Only called once, from here, and inaccessible to client code.
+ // SAFETY: Only called once, from here, and inaccessible to client code.
unsafe { heap::init() };
- console::init();
+
+ if try_console_init().is_err() {
+ // Don't panic (or log) here to avoid accessing the console.
+ reboot()
+ }
+
+ logger::init().expect("Failed to initialize the logger");
+ // We initialize the logger to Off (like the log crate) and clients should log::set_max_level.
+
+ const SIZE_OF_STACK_GUARD: usize = size_of::<u64>();
+ let mut stack_guard = [0u8; SIZE_OF_STACK_GUARD];
+ // We keep a null byte at the top of the stack guard to act as a string terminator.
+ let random_guard = &mut stack_guard[..(SIZE_OF_STACK_GUARD - 1)];
+
+ rand::init().expect("Failed to initialize a source of entropy");
+ rand::fill_with_entropy(random_guard).expect("Failed to get stack canary entropy");
+ bionic::__get_tls().stack_guard = u64::from_ne_bytes(stack_guard);
+
+ // Note: If rust_entry ever returned (which it shouldn't by being -> !), the compiler-injected
+ // stack guard comparison would detect a mismatch and call __stack_chk_fail.
+
+ // SAFETY: `main` is provided by the application using the `main!` macro, and we make sure it
+ // has the right type.
unsafe {
main(x0, x1, x2, x3);
}
@@ -35,16 +74,21 @@
/// Marks the main function of the binary.
///
+/// Once main is entered, it can assume that:
+/// - The panic_handler has been configured and panic!() and friends are available;
+/// - The global_allocator has been configured and heap memory is available;
+/// - The logger has been configured and the log::{info, warn, error, ...} macros are available.
+///
/// Example:
///
/// ```rust
-/// use vmbase::{logger, main};
+/// use vmbase::main;
/// use log::{info, LevelFilter};
///
/// main!(my_main);
///
/// fn my_main() {
-/// logger::init(LevelFilter::Info).unwrap();
+/// log::set_max_level(LevelFilter::Info);
/// info!("Hello world");
/// }
/// ```
diff --git a/vmbase/src/exceptions.rs b/vmbase/src/exceptions.rs
new file mode 100644
index 0000000..7833334
--- /dev/null
+++ b/vmbase/src/exceptions.rs
@@ -0,0 +1,139 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Helper functions and structs for exception handlers.
+
+use crate::{
+ console, eprintln,
+ memory::{page_4kb_of, MemoryTrackerError},
+ read_sysreg,
+};
+use aarch64_paging::paging::VirtualAddress;
+use core::fmt;
+
+const UART_PAGE: usize = page_4kb_of(console::BASE_ADDRESS);
+
+/// Represents an error that can occur while handling an exception.
+#[derive(Debug)]
+pub enum HandleExceptionError {
+ /// The page table is unavailable.
+ PageTableUnavailable,
+ /// The page table has not been initialized.
+ PageTableNotInitialized,
+ /// An internal error occurred in the memory tracker.
+ InternalError(MemoryTrackerError),
+ /// An unknown exception occurred.
+ UnknownException,
+}
+
+impl From<MemoryTrackerError> for HandleExceptionError {
+ fn from(other: MemoryTrackerError) -> Self {
+ Self::InternalError(other)
+ }
+}
+
+impl fmt::Display for HandleExceptionError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Self::PageTableUnavailable => write!(f, "Page table is not available."),
+ Self::PageTableNotInitialized => write!(f, "Page table is not initialized."),
+ Self::InternalError(e) => write!(f, "Error while updating page table: {e}"),
+ Self::UnknownException => write!(f, "An unknown exception occurred, not handled."),
+ }
+ }
+}
+
+/// Represents the possible types of exception syndrome register (ESR) values.
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum Esr {
+ /// Data abort due to translation fault.
+ DataAbortTranslationFault,
+ /// Data abort due to permission fault.
+ DataAbortPermissionFault,
+ /// Data abort due to a synchronous external abort.
+ DataAbortSyncExternalAbort,
+ /// An unknown ESR value.
+ Unknown(usize),
+}
+
+impl Esr {
+ const EXT_DABT_32BIT: usize = 0x96000010;
+ const TRANSL_FAULT_BASE_32BIT: usize = 0x96000004;
+ const TRANSL_FAULT_ISS_MASK_32BIT: usize = !0x143;
+ const PERM_FAULT_BASE_32BIT: usize = 0x9600004C;
+ const PERM_FAULT_ISS_MASK_32BIT: usize = !0x103;
+}
+
+impl From<usize> for Esr {
+ fn from(esr: usize) -> Self {
+ if esr == Self::EXT_DABT_32BIT {
+ Self::DataAbortSyncExternalAbort
+ } else if esr & Self::TRANSL_FAULT_ISS_MASK_32BIT == Self::TRANSL_FAULT_BASE_32BIT {
+ Self::DataAbortTranslationFault
+ } else if esr & Self::PERM_FAULT_ISS_MASK_32BIT == Self::PERM_FAULT_BASE_32BIT {
+ Self::DataAbortPermissionFault
+ } else {
+ Self::Unknown(esr)
+ }
+ }
+}
+
+impl fmt::Display for Esr {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Self::DataAbortSyncExternalAbort => write!(f, "Synchronous external abort"),
+ Self::DataAbortTranslationFault => write!(f, "Translation fault"),
+ Self::DataAbortPermissionFault => write!(f, "Permission fault"),
+ Self::Unknown(v) => write!(f, "Unknown exception esr={v:#08x}"),
+ }
+ }
+}
+/// A struct representing an Armv8 exception.
+pub struct ArmException {
+ /// The value of the exception syndrome register.
+ pub esr: Esr,
+ /// The faulting virtual address read from the fault address register.
+ pub far: VirtualAddress,
+}
+
+impl fmt::Display for ArmException {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "ArmException: esr={}, far={}", self.esr, self.far)
+ }
+}
+
+impl ArmException {
+ /// Reads the values of the EL1 exception syndrome register (`esr_el1`)
+ /// and fault address register (`far_el1`) and returns a new instance of
+ /// `ArmException` with these values.
+ pub fn from_el1_regs() -> Self {
+ let esr: Esr = read_sysreg!("esr_el1").into();
+ let far = read_sysreg!("far_el1");
+ Self { esr, far: VirtualAddress(far) }
+ }
+
+ /// Prints the details of an obj and the exception, excluding UART exceptions.
+ pub fn print<T: fmt::Display>(&self, exception_name: &str, obj: T, elr: u64) {
+ // Don't print to the UART if we are handling an exception it could raise.
+ if !self.is_uart_exception() {
+ eprintln!("{exception_name}");
+ eprintln!("{obj}");
+ eprintln!("{}, elr={:#08x}", self, elr);
+ }
+ }
+
+ fn is_uart_exception(&self) -> bool {
+ self.esr == Esr::DataAbortSyncExternalAbort && page_4kb_of(self.far.0) == UART_PAGE
+ }
+}
diff --git a/vmbase/src/heap.rs b/vmbase/src/heap.rs
index b00ca6f..c8b76ac 100644
--- a/vmbase/src/heap.rs
+++ b/vmbase/src/heap.rs
@@ -33,7 +33,7 @@
($len:expr) => {
static mut __HEAP_ARRAY: [u8; $len] = [0; $len];
#[export_name = "HEAP"]
- // SAFETY - HEAP will only be accessed once as mut, from init().
+ // SAFETY: HEAP will only be accessed once as mut, from init().
static mut __HEAP: &'static mut [u8] = unsafe { &mut __HEAP_ARRAY };
};
}
@@ -65,12 +65,12 @@
pub fn aligned_boxed_slice(size: usize, align: usize) -> Option<Box<[u8]>> {
let size = NonZeroUsize::new(size)?.get();
let layout = Layout::from_size_align(size, align).ok()?;
- // SAFETY - We verify that `size` and the returned `ptr` are non-null.
+ // SAFETY: We verify that `size` and the returned `ptr` are non-null.
let ptr = unsafe { alloc(layout) };
let ptr = NonNull::new(ptr)?.as_ptr();
let slice_ptr = ptr::slice_from_raw_parts_mut(ptr, size);
- // SAFETY - The memory was allocated using the proper layout by our global_allocator.
+ // SAFETY: The memory was allocated using the proper layout by our global_allocator.
Some(unsafe { Box::from_raw(slice_ptr) })
}
@@ -100,9 +100,9 @@
heap_range.contains(&(ptr.as_ptr() as *const u8)),
"free() called on a pointer that is not part of the HEAP: {ptr:?}"
);
+ // SAFETY: ptr is non-null and was allocated by allocate, which prepends a correctly aligned
+ // usize.
let (ptr, size) = unsafe {
- // SAFETY: ptr is non-null and was allocated by allocate, which prepends a correctly aligned
- // usize.
let ptr = ptr.cast::<usize>().as_ptr().offset(-1);
(ptr, *ptr)
};
diff --git a/vmbase/src/hvc.rs b/vmbase/src/hvc.rs
index 9a5e716..ebd1625 100644
--- a/vmbase/src/hvc.rs
+++ b/vmbase/src/hvc.rs
@@ -22,20 +22,19 @@
};
const ARM_SMCCC_TRNG_VERSION: u32 = 0x8400_0050;
-#[allow(dead_code)]
const ARM_SMCCC_TRNG_FEATURES: u32 = 0x8400_0051;
#[allow(dead_code)]
const ARM_SMCCC_TRNG_GET_UUID: u32 = 0x8400_0052;
#[allow(dead_code)]
const ARM_SMCCC_TRNG_RND32: u32 = 0x8400_0053;
-const ARM_SMCCC_TRNG_RND64: u32 = 0xc400_0053;
+pub const ARM_SMCCC_TRNG_RND64: u32 = 0xc400_0053;
/// Returns the (major, minor) version tuple, as defined by the SMCCC TRNG.
-pub fn trng_version() -> trng::Result<(u16, u16)> {
+pub fn trng_version() -> trng::Result<trng::Version> {
let args = [0u64; 17];
let version = positive_or_error_64::<Error>(hvc64(ARM_SMCCC_TRNG_VERSION, args)[0])?;
- Ok(((version >> 16) as u16, version as u16))
+ (version as u32 as i32).try_into()
}
pub type TrngRng64Entropy = (u64, u64, u64);
@@ -49,3 +48,10 @@
Ok((regs[1], regs[2], regs[3]))
}
+
+pub fn trng_features(fid: u32) -> trng::Result<u64> {
+ let mut args = [0u64; 17];
+ args[0] = fid as u64;
+
+ positive_or_error_64::<Error>(hvc64(ARM_SMCCC_TRNG_FEATURES, args)[0])
+}
diff --git a/vmbase/src/hvc/trng.rs b/vmbase/src/hvc/trng.rs
index 6331d66..efb86f6 100644
--- a/vmbase/src/hvc/trng.rs
+++ b/vmbase/src/hvc/trng.rs
@@ -16,7 +16,7 @@
use core::result;
/// Standard SMCCC TRNG error values as described in DEN 0098 1.0 REL0.
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
pub enum Error {
/// The call is not supported by the implementation.
NotSupported,
@@ -55,3 +55,40 @@
}
pub type Result<T> = result::Result<T, Error>;
+
+/// A version of the SMCCC TRNG interface.
+#[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
+pub struct Version {
+ pub major: u16,
+ pub minor: u16,
+}
+
+impl fmt::Display for Version {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}.{}", self.major, self.minor)
+ }
+}
+
+impl fmt::Debug for Version {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl TryFrom<i32> for Version {
+ type Error = Error;
+
+ fn try_from(value: i32) -> core::result::Result<Self, Error> {
+ if value < 0 {
+ Err((value as i64).into())
+ } else {
+ Ok(Self { major: (value >> 16) as u16, minor: value as u16 })
+ }
+ }
+}
+
+impl From<Version> for u32 {
+ fn from(version: Version) -> Self {
+ (u32::from(version.major) << 16) | u32::from(version.minor)
+ }
+}
diff --git a/vmbase/src/layout/mod.rs b/vmbase/src/layout/mod.rs
index 21c113a..f7e8170 100644
--- a/vmbase/src/layout/mod.rs
+++ b/vmbase/src/layout/mod.rs
@@ -17,6 +17,8 @@
pub mod crosvm;
use crate::console::BASE_ADDRESS;
+use crate::linker::__stack_chk_guard;
+use aarch64_paging::paging::VirtualAddress;
use core::ops::Range;
use core::ptr::addr_of;
@@ -27,11 +29,14 @@
#[macro_export]
macro_rules! linker_addr {
($symbol:ident) => {{
- unsafe { addr_of!($crate::linker::$symbol) as usize }
+ // SAFETY: We're just getting the address of an extern static symbol provided by the linker,
+ // not dereferencing it.
+ let addr = unsafe { addr_of!($crate::linker::$symbol) as usize };
+ VirtualAddress(addr)
}};
}
-/// Get the address range between a pair of linker-defined symbols.
+/// Gets the virtual address range between a pair of linker-defined symbols.
#[macro_export]
macro_rules! linker_region {
($begin:ident,$end:ident) => {{
@@ -43,57 +48,65 @@
}
/// Memory reserved for the DTB.
-pub fn dtb_range() -> Range<usize> {
+pub fn dtb_range() -> Range<VirtualAddress> {
linker_region!(dtb_begin, dtb_end)
}
/// Executable code.
-pub fn text_range() -> Range<usize> {
+pub fn text_range() -> Range<VirtualAddress> {
linker_region!(text_begin, text_end)
}
/// Read-only data.
-pub fn rodata_range() -> Range<usize> {
+pub fn rodata_range() -> Range<VirtualAddress> {
linker_region!(rodata_begin, rodata_end)
}
/// Initialised writable data.
-pub fn data_range() -> Range<usize> {
+pub fn data_range() -> Range<VirtualAddress> {
linker_region!(data_begin, data_end)
}
-/// Zero-initialised writable data.
-pub fn bss_range() -> Range<usize> {
+/// Zero-initialized writable data.
+pub fn bss_range() -> Range<VirtualAddress> {
linker_region!(bss_begin, bss_end)
}
/// Writable data region for the stack.
-pub fn stack_range(stack_size: usize) -> Range<usize> {
+pub fn stack_range(stack_size: usize) -> Range<VirtualAddress> {
let end = linker_addr!(init_stack_pointer);
- let start = end.checked_sub(stack_size).unwrap();
+ let start = VirtualAddress(end.0.checked_sub(stack_size).unwrap());
assert!(start >= linker_addr!(stack_limit));
start..end
}
/// All writable sections, excluding the stack.
-pub fn scratch_range() -> Range<usize> {
+pub fn scratch_range() -> Range<VirtualAddress> {
linker_region!(eh_stack_limit, bss_end)
}
/// UART console range.
-pub fn console_uart_range() -> Range<usize> {
+pub fn console_uart_range() -> Range<VirtualAddress> {
const CONSOLE_LEN: usize = 1; // `uart::Uart` only uses one u8 register.
- BASE_ADDRESS..(BASE_ADDRESS + CONSOLE_LEN)
+ VirtualAddress(BASE_ADDRESS)..VirtualAddress(BASE_ADDRESS + CONSOLE_LEN)
}
/// Read-write data (original).
-pub fn data_load_address() -> usize {
+pub fn data_load_address() -> VirtualAddress {
linker_addr!(data_lma)
}
/// End of the binary image.
-pub fn binary_end() -> usize {
+pub fn binary_end() -> VirtualAddress {
linker_addr!(bin_end)
}
+
+/// Value of __stack_chk_guard.
+pub fn stack_chk_guard() -> u64 {
+ // SAFETY: __stack_chk_guard shouldn't have any mutable aliases unless the stack overflows. If
+ // it does, then there could be undefined behaviour all over the program, but we want to at
+ // least have a chance at catching it.
+ unsafe { addr_of!(__stack_chk_guard).read_volatile() }
+}
diff --git a/vmbase/src/lib.rs b/vmbase/src/lib.rs
index 88bad8b..ca8756d 100644
--- a/vmbase/src/lib.rs
+++ b/vmbase/src/lib.rs
@@ -15,18 +15,21 @@
//! Basic functionality for bare-metal binaries to run in a VM under crosvm.
#![no_std]
+#![deny(unsafe_op_in_unsafe_fn)]
+#![deny(clippy::undocumented_unsafe_blocks)]
extern crate alloc;
pub mod arch;
-mod bionic;
+pub mod bionic;
pub mod console;
mod entry;
+pub mod exceptions;
pub mod fdt;
pub mod heap;
mod hvc;
pub mod layout;
-mod linker;
+pub mod linker;
pub mod logger;
pub mod memory;
pub mod power;
@@ -35,8 +38,6 @@
pub mod util;
pub mod virtio;
-pub use bionic::STACK_CHK_GUARD;
-
use core::panic::PanicInfo;
use power::reboot;
diff --git a/vmbase/src/logger.rs b/vmbase/src/logger.rs
index c30adad..9130918 100644
--- a/vmbase/src/logger.rs
+++ b/vmbase/src/logger.rs
@@ -20,19 +20,20 @@
use crate::console::println;
use core::sync::atomic::{AtomicBool, Ordering};
-use log::{LevelFilter, Log, Metadata, Record, SetLoggerError};
+use log::{Log, Metadata, Record, SetLoggerError};
struct Logger {
is_enabled: AtomicBool,
}
-static mut LOGGER: Logger = Logger::new();
+
+static LOGGER: Logger = Logger::new();
impl Logger {
const fn new() -> Self {
Self { is_enabled: AtomicBool::new(true) }
}
- fn swap_enabled(&mut self, enabled: bool) -> bool {
+ fn swap_enabled(&self, enabled: bool) -> bool {
self.is_enabled.swap(enabled, Ordering::Relaxed)
}
}
@@ -58,27 +59,19 @@
impl SuppressGuard {
fn new() -> Self {
- // Safe because it modifies an atomic.
- unsafe { Self { old_enabled: LOGGER.swap_enabled(false) } }
+ Self { old_enabled: LOGGER.swap_enabled(false) }
}
}
impl Drop for SuppressGuard {
fn drop(&mut self) {
- // Safe because it modifies an atomic.
- unsafe {
- LOGGER.swap_enabled(self.old_enabled);
- }
+ LOGGER.swap_enabled(self.old_enabled);
}
}
/// Initialize vmbase logger with a given max logging level.
-pub fn init(max_level: LevelFilter) -> Result<(), SetLoggerError> {
- // Safe because it only sets the global logger.
- unsafe {
- log::set_logger(&LOGGER)?;
- }
- log::set_max_level(max_level);
+pub(crate) fn init() -> Result<(), SetLoggerError> {
+ log::set_logger(&LOGGER)?;
Ok(())
}
diff --git a/vmbase/src/memory/dbm.rs b/vmbase/src/memory/dbm.rs
index d429b30..401022e 100644
--- a/vmbase/src/memory/dbm.rs
+++ b/vmbase/src/memory/dbm.rs
@@ -34,7 +34,7 @@
} else {
tcr &= !TCR_EL1_HA_HD_BITS
};
- // Safe because it writes to a system register and does not affect Rust.
+ // SAFETY: Changing this bit in TCR doesn't affect Rust's view of memory.
unsafe { write_sysreg!("tcr_el1", tcr) }
isb!();
}
diff --git a/vmbase/src/memory/mod.rs b/vmbase/src/memory/mod.rs
index 5e78565..898aa10 100644
--- a/vmbase/src/memory/mod.rs
+++ b/vmbase/src/memory/mod.rs
@@ -22,7 +22,10 @@
pub use error::MemoryTrackerError;
pub use page_table::PageTable;
-pub use shared::{alloc_shared, dealloc_shared, MemoryRange, MemoryTracker, MEMORY};
+pub use shared::{
+ alloc_shared, dealloc_shared, handle_permission_fault, handle_translation_fault, MemoryRange,
+ MemoryTracker, MEMORY,
+};
pub use util::{
flush, flushed_zeroize, min_dcache_line_size, page_4kb_of, phys_to_virt, virt_to_phys,
PAGE_SIZE, SIZE_128KB, SIZE_2MB, SIZE_4KB, SIZE_4MB, SIZE_64KB,
diff --git a/vmbase/src/memory/page_table.rs b/vmbase/src/memory/page_table.rs
index 3943b03..e067e96 100644
--- a/vmbase/src/memory/page_table.rs
+++ b/vmbase/src/memory/page_table.rs
@@ -18,7 +18,7 @@
use aarch64_paging::idmap::IdMap;
use aarch64_paging::paging::{Attributes, MemoryRegion, PteUpdater};
use aarch64_paging::MapError;
-use core::{ops::Range, result};
+use core::result;
/// Software bit used to indicate a device that should be lazily mapped.
pub(super) const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
@@ -88,50 +88,44 @@
/// Maps the given range of virtual addresses to the physical addresses as lazily mapped
/// nGnRE device memory.
- pub fn map_device_lazy(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DEVICE_LAZY)
+ pub fn map_device_lazy(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, DEVICE_LAZY)
}
/// Maps the given range of virtual addresses to the physical addresses as valid device
/// nGnRE device memory.
- pub fn map_device(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DEVICE)
+ pub fn map_device(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, DEVICE)
}
/// Maps the given range of virtual addresses to the physical addresses as non-executable
/// and writable normal memory.
- pub fn map_data(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DATA)
+ pub fn map_data(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, DATA)
}
/// Maps the given range of virtual addresses to the physical addresses as non-executable,
/// read-only and writable-clean normal memory.
- pub fn map_data_dbm(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DATA_DBM)
+ pub fn map_data_dbm(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, DATA_DBM)
}
/// Maps the given range of virtual addresses to the physical addresses as read-only
/// normal memory.
- pub fn map_code(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, CODE)
+ pub fn map_code(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, CODE)
}
/// Maps the given range of virtual addresses to the physical addresses as non-executable
/// and read-only normal memory.
- pub fn map_rodata(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, RODATA)
- }
-
- /// Maps the given range of virtual addresses to the physical addresses with the given
- /// attributes.
- fn map_range(&mut self, range: &Range<usize>, attr: Attributes) -> Result<()> {
- self.idmap.map_range(&MemoryRegion::new(range.start, range.end), attr)
+ pub fn map_rodata(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, RODATA)
}
/// Applies the provided updater function to a number of PTEs corresponding to a given memory
/// range.
- pub fn modify_range(&mut self, range: &Range<usize>, f: &PteUpdater) -> Result<()> {
- self.idmap.modify_range(&MemoryRegion::new(range.start, range.end), f)
+ pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<()> {
+ self.idmap.modify_range(range, f)
}
}
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 61cbeb0..173c0ec 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -19,18 +19,20 @@
use super::page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG};
use super::util::{page_4kb_of, virt_to_phys};
use crate::dsb;
+use crate::exceptions::HandleExceptionError;
use crate::util::RangeExt as _;
-use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
use alloc::boxed::Box;
use alloc::vec::Vec;
use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
use core::alloc::Layout;
+use core::mem::size_of;
use core::num::NonZeroUsize;
use core::ops::Range;
use core::ptr::NonNull;
use core::result;
-use hyp::{get_hypervisor, MMIO_GUARD_GRANULE_SIZE};
+use hyp::{get_mem_sharer, get_mmio_guard, MMIO_GUARD_GRANULE_SIZE};
use log::{debug, error, trace};
use once_cell::race::OnceBox;
use spin::mutex::SpinMutex;
@@ -44,6 +46,11 @@
/// Memory range.
pub type MemoryRange = Range<usize>;
+
+fn get_va_range(range: &MemoryRange) -> VaRange {
+ VaRange::new(range.start, range.end)
+}
+
type Result<T> = result::Result<T, MemoryTrackerError>;
#[derive(Clone, Copy, Debug, Default, PartialEq)]
@@ -69,6 +76,8 @@
payload_range: Option<MemoryRange>,
}
+// TODO: Remove this once aarch64-paging crate is updated.
+// SAFETY: Only `PageTable` doesn't implement Send, but it should.
unsafe impl Send for MemoryTracker {}
impl MemoryTracker {
@@ -80,7 +89,7 @@
mut page_table: PageTable,
total: MemoryRange,
mmio_range: MemoryRange,
- payload_range: Option<MemoryRange>,
+ payload_range: Option<Range<VirtualAddress>>,
) -> Self {
assert!(
!total.overlaps(&mmio_range),
@@ -93,7 +102,7 @@
set_dbm_enabled(true);
debug!("Activating dynamic page table...");
- // SAFETY - page_table duplicates the static mappings for everything that the Rust code is
+ // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
// aware of so activating it shouldn't have any visible effect.
unsafe { page_table.activate() }
debug!("... Success!");
@@ -104,7 +113,7 @@
regions: ArrayVec::new(),
mmio_regions: ArrayVec::new(),
mmio_range,
- payload_range,
+ payload_range: payload_range.map(|r| r.start.0..r.end.0),
}
}
@@ -130,7 +139,7 @@
pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
self.check(®ion)?;
- self.page_table.map_rodata(range).map_err(|e| {
+ self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
error!("Error during range allocation: {e}");
MemoryTrackerError::FailedToMap
})?;
@@ -141,7 +150,7 @@
pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
self.check(®ion)?;
- self.page_table.map_data_dbm(range).map_err(|e| {
+ self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
error!("Error during mutable range allocation: {e}");
MemoryTrackerError::FailedToMap
})?;
@@ -171,10 +180,17 @@
return Err(MemoryTrackerError::Full);
}
- self.page_table.map_device_lazy(&range).map_err(|e| {
- error!("Error during MMIO device mapping: {e}");
- MemoryTrackerError::FailedToMap
- })?;
+ if get_mmio_guard().is_some() {
+ self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
+ error!("Error during lazy MMIO device mapping: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ } else {
+ self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
+ error!("Error during MMIO device mapping: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ }
if self.mmio_regions.try_push(range).is_some() {
return Err(MemoryTrackerError::Full);
@@ -211,10 +227,12 @@
///
/// Note that they are not unmapped from the page table.
pub fn mmio_unmap_all(&mut self) -> Result<()> {
- for range in &self.mmio_regions {
- self.page_table
- .modify_range(range, &mmio_guard_unmap_page)
- .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
+ if get_mmio_guard().is_some() {
+ for range in &self.mmio_regions {
+ self.page_table
+ .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
+ .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
+ }
}
Ok(())
}
@@ -256,6 +274,19 @@
Ok(())
}
+ /// Initialize the shared heap to use heap memory directly.
+ ///
+ /// When running on "non-protected" hypervisors which permit host direct accesses to guest
+ /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
+ /// dedicated region so this function instructs the shared pool to use the global allocator.
+ pub fn init_heap_shared_pool(&mut self) -> Result<()> {
+ // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
+ // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
+ // without any actual "dynamic memory sharing" taking place and, as such, the granule may
+ // be set to the one of the global_allocator i.e. a byte.
+ self.init_dynamic_shared_pool(size_of::<u8>())
+ }
+
/// Unshares any memory that may have been shared.
pub fn unshare_all_memory(&mut self) {
drop(SHARED_MEMORY.lock().take());
@@ -263,12 +294,14 @@
/// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
/// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
- pub fn handle_mmio_fault(&mut self, addr: usize) -> Result<()> {
- let page_range = page_4kb_of(addr)..page_4kb_of(addr) + MMIO_GUARD_GRANULE_SIZE;
+ fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
+ let page_start = VirtualAddress(page_4kb_of(addr.0));
+ let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
+ let mmio_guard = get_mmio_guard().unwrap();
self.page_table
.modify_range(&page_range, &verify_lazy_mapped_block)
.map_err(|_| MemoryTrackerError::InvalidPte)?;
- get_hypervisor().mmio_guard_map(page_range.start)?;
+ mmio_guard.map(page_start.0)?;
// Maps a single device page, breaking up block mappings if necessary.
self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
}
@@ -284,7 +317,7 @@
// Now flush writable-dirty pages in those regions.
for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
self.page_table
- .modify_range(range, &flush_dirty_range)
+ .modify_range(&get_va_range(range), &flush_dirty_range)
.map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
}
Ok(())
@@ -293,9 +326,9 @@
/// Handles permission fault for read-only blocks by setting writable-dirty state.
/// In general, this should be called from the exception handler when hardware dirty
/// state management is disabled or unavailable.
- pub fn handle_permission_fault(&mut self, addr: usize) -> Result<()> {
+ fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
self.page_table
- .modify_range(&(addr..addr + 1), &mark_dirty_block)
+ .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
.map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
}
}
@@ -353,7 +386,7 @@
/// Unshares all pages when dropped.
struct MemorySharer {
granule: usize,
- shared_regions: Vec<(usize, Layout)>,
+ frames: Vec<(usize, Layout)>,
}
impl MemorySharer {
@@ -361,42 +394,47 @@
/// `granule` must be a power of 2.
fn new(granule: usize, capacity: usize) -> Self {
assert!(granule.is_power_of_two());
- Self { granule, shared_regions: Vec::with_capacity(capacity) }
+ Self { granule, frames: Vec::with_capacity(capacity) }
}
/// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
let layout = hint.align_to(self.granule).unwrap().pad_to_align();
assert_ne!(layout.size(), 0);
- // SAFETY - layout has non-zero size.
+ // SAFETY: layout has non-zero size.
let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
handle_alloc_error(layout);
};
let base = shared.as_ptr() as usize;
let end = base.checked_add(layout.size()).unwrap();
- trace!("Sharing memory region {:#x?}", base..end);
- for vaddr in (base..end).step_by(self.granule) {
- let vaddr = NonNull::new(vaddr as *mut _).unwrap();
- get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
- }
- self.shared_regions.push((base, layout));
+ if let Some(mem_sharer) = get_mem_sharer() {
+ trace!("Sharing memory region {:#x?}", base..end);
+ for vaddr in (base..end).step_by(self.granule) {
+ let vaddr = NonNull::new(vaddr as *mut _).unwrap();
+ mem_sharer.share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+ }
+ }
+
+ self.frames.push((base, layout));
pool.add_frame(base, end);
}
}
impl Drop for MemorySharer {
fn drop(&mut self) {
- while let Some((base, layout)) = self.shared_regions.pop() {
- let end = base.checked_add(layout.size()).unwrap();
- trace!("Unsharing memory region {:#x?}", base..end);
- for vaddr in (base..end).step_by(self.granule) {
- let vaddr = NonNull::new(vaddr as *mut _).unwrap();
- get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+ while let Some((base, layout)) = self.frames.pop() {
+ if let Some(mem_sharer) = get_mem_sharer() {
+ let end = base.checked_add(layout.size()).unwrap();
+ trace!("Unsharing memory region {:#x?}", base..end);
+ for vaddr in (base..end).step_by(self.granule) {
+ let vaddr = NonNull::new(vaddr as *mut _).unwrap();
+ mem_sharer.unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+ }
}
- // SAFETY - The region was obtained from alloc_zeroed() with the recorded layout.
+ // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.
unsafe { dealloc(base as *mut _, layout) };
}
}
@@ -448,9 +486,25 @@
// Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
// should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
// virt_to_phys here, and just pass page_base instead.
- get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
+ get_mmio_guard().unwrap().unmap(page_base).map_err(|e| {
error!("Error MMIO guard unmapping: {e}");
})?;
}
Ok(())
}
+
+/// Handles a translation fault with the given fault address register (FAR).
+#[inline]
+pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
+ let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
+ let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
+ Ok(memory.handle_mmio_fault(far)?)
+}
+
+/// Handles a permission fault with the given fault address register (FAR).
+#[inline]
+pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
+ let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
+ let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
+ Ok(memory.handle_permission_fault(far)?)
+}
diff --git a/vmbase/src/memory/util.rs b/vmbase/src/memory/util.rs
index b9ef5c9..48d4c55 100644
--- a/vmbase/src/memory/util.rs
+++ b/vmbase/src/memory/util.rs
@@ -55,7 +55,7 @@
let start = unchecked_align_down(start, line_size);
for line in (start..end).step_by(line_size) {
- // SAFETY - Clearing cache lines shouldn't have Rust-visible side effects.
+ // SAFETY: Clearing cache lines shouldn't have Rust-visible side effects.
unsafe {
asm!(
"dc cvau, {x}",
diff --git a/vmbase/src/rand.rs b/vmbase/src/rand.rs
index 00567b8..6b8d7e0 100644
--- a/vmbase/src/rand.rs
+++ b/vmbase/src/rand.rs
@@ -14,16 +14,29 @@
//! Functions and drivers for obtaining true entropy.
-use crate::hvc;
+use crate::hvc::{self, TrngRng64Entropy};
use core::fmt;
use core::mem::size_of;
+use smccc::{self, Hvc};
/// Error type for rand operations.
pub enum Error {
+ /// No source of entropy found.
+ NoEntropySource,
+ /// Error during architectural SMCCC call.
+ Smccc(smccc::arch::Error),
/// Error during SMCCC TRNG call.
Trng(hvc::trng::Error),
+ /// Unsupported SMCCC version.
+ UnsupportedSmcccVersion(smccc::arch::Version),
/// Unsupported SMCCC TRNG version.
- UnsupportedVersion((u16, u16)),
+ UnsupportedTrngVersion(hvc::trng::Version),
+}
+
+impl From<smccc::arch::Error> for Error {
+ fn from(e: smccc::arch::Error) -> Self {
+ Self::Smccc(e)
+ }
}
impl From<hvc::trng::Error> for Error {
@@ -38,10 +51,11 @@
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
+ Self::NoEntropySource => write!(f, "No source of entropy available"),
+ Self::Smccc(e) => write!(f, "Architectural SMCCC error: {e}"),
Self::Trng(e) => write!(f, "SMCCC TRNG error: {e}"),
- Self::UnsupportedVersion((x, y)) => {
- write!(f, "Unsupported SMCCC TRNG version v{x}.{y}")
- }
+ Self::UnsupportedSmcccVersion(v) => write!(f, "Unsupported SMCCC version {v}"),
+ Self::UnsupportedTrngVersion(v) => write!(f, "Unsupported SMCCC TRNG version {v}"),
}
}
}
@@ -53,15 +67,35 @@
}
/// Configure the source of entropy.
-pub fn init() -> Result<()> {
- match hvc::trng_version()? {
- (1, _) => Ok(()),
- version => Err(Error::UnsupportedVersion(version)),
+pub(crate) fn init() -> Result<()> {
+ // SMCCC TRNG requires SMCCC v1.1.
+ match smccc::arch::version::<Hvc>()? {
+ smccc::arch::Version { major: 1, minor } if minor >= 1 => (),
+ version => return Err(Error::UnsupportedSmcccVersion(version)),
}
+
+ // TRNG_RND requires SMCCC TRNG v1.0.
+ match hvc::trng_version()? {
+ hvc::trng::Version { major: 1, minor: _ } => (),
+ version => return Err(Error::UnsupportedTrngVersion(version)),
+ }
+
+ // TRNG_RND64 doesn't define any special capabilities so ignore the successful result.
+ let _ = hvc::trng_features(hvc::ARM_SMCCC_TRNG_RND64).map_err(|e| {
+ if e == hvc::trng::Error::NotSupported {
+ // SMCCC TRNG is currently our only source of entropy.
+ Error::NoEntropySource
+ } else {
+ e.into()
+ }
+ })?;
+
+ Ok(())
}
-fn fill_with_entropy(s: &mut [u8]) -> Result<()> {
- const MAX_BYTES_PER_CALL: usize = size_of::<hvc::TrngRng64Entropy>();
+/// Fills a slice of bytes with true entropy.
+pub fn fill_with_entropy(s: &mut [u8]) -> Result<()> {
+ const MAX_BYTES_PER_CALL: usize = size_of::<TrngRng64Entropy>();
let (aligned, remainder) = s.split_at_mut(s.len() - s.len() % MAX_BYTES_PER_CALL);
@@ -89,13 +123,14 @@
Ok(())
}
-fn repeat_trng_rnd(n_bytes: usize) -> hvc::trng::Result<hvc::TrngRng64Entropy> {
+fn repeat_trng_rnd(n_bytes: usize) -> Result<TrngRng64Entropy> {
let bits = usize::try_from(u8::BITS).unwrap();
let n_bits = (n_bytes * bits).try_into().unwrap();
loop {
match hvc::trng_rnd64(n_bits) {
- Err(hvc::trng::Error::NoEntropy) => continue,
- res => return res,
+ Ok(entropy) => return Ok(entropy),
+ Err(hvc::trng::Error::NoEntropy) => (),
+ Err(e) => return Err(e.into()),
}
}
}
@@ -114,7 +149,7 @@
#[no_mangle]
extern "C" fn CRYPTO_sysrand(out: *mut u8, req: usize) {
- // SAFETY - We need to assume that out points to valid memory of size req.
+ // SAFETY: We need to assume that out points to valid memory of size req.
let s = unsafe { core::slice::from_raw_parts_mut(out, req) };
fill_with_entropy(s).unwrap()
}
diff --git a/vmbase/src/uart.rs b/vmbase/src/uart.rs
index 0fc2494..09d747f 100644
--- a/vmbase/src/uart.rs
+++ b/vmbase/src/uart.rs
@@ -38,8 +38,8 @@
/// Writes a single byte to the UART.
pub fn write_byte(&self, byte: u8) {
- // Safe because we know that the base address points to the control registers of an UART
- // device which is appropriately mapped.
+ // SAFETY: We know that the base address points to the control registers of a UART device
+ // which is appropriately mapped.
unsafe {
write_volatile(self.base_address, byte);
}
@@ -55,5 +55,5 @@
}
}
-// Safe because it just contains a pointer to device memory, which can be accessed from any context.
+// SAFETY: `Uart` just contains a pointer to device memory, which can be accessed from any context.
unsafe impl Send for Uart {}
diff --git a/vmbase/src/virtio/hal.rs b/vmbase/src/virtio/hal.rs
index 36f9e56..0d3f445 100644
--- a/vmbase/src/virtio/hal.rs
+++ b/vmbase/src/virtio/hal.rs
@@ -32,10 +32,8 @@
/// HAL implementation for the virtio_drivers crate.
pub struct HalImpl;
-/// # Safety
-///
-/// See the 'Implementation Safety' comments on methods below for how they fulfill the safety
-/// requirements of the unsafe `Hal` trait.
+/// SAFETY: See the 'Implementation Safety' comments on methods below for how they fulfill the
+/// safety requirements of the unsafe `Hal` trait.
unsafe impl Hal for HalImpl {
/// # Implementation Safety
///
@@ -48,14 +46,14 @@
let layout = dma_layout(pages);
let vaddr =
alloc_shared(layout).expect("Failed to allocate and share VirtIO DMA range with host");
- // SAFETY - vaddr points to a region allocated for the caller so is safe to access.
+ // SAFETY: vaddr points to a region allocated for the caller so is safe to access.
unsafe { core::ptr::write_bytes(vaddr.as_ptr(), 0, layout.size()) };
let paddr = virt_to_phys(vaddr);
(paddr, vaddr)
}
unsafe fn dma_dealloc(_paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
- // SAFETY - Memory was allocated by `dma_alloc` using `alloc_shared` with the same layout.
+ // SAFETY: Memory was allocated by `dma_alloc` using `alloc_shared` with the same layout.
unsafe { dealloc_shared(vaddr, dma_layout(pages)) }
.expect("Failed to unshare VirtIO DMA range with host");
0
@@ -68,7 +66,7 @@
/// range. It can't alias any other allocations because we previously validated in
/// `map_mmio_range` that the PCI MMIO range didn't overlap with any other memory ranges.
unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
- let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised");
+ let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialized");
let bar_range = {
let start = pci_info.bar_range.start.try_into().unwrap();
let end = pci_info.bar_range.end.try_into().unwrap();
@@ -96,7 +94,7 @@
if direction == BufferDirection::DriverToDevice {
let src = buffer.cast::<u8>().as_ptr().cast_const();
trace!("VirtIO bounce buffer at {bounce:?} (PA:{paddr:#x}) initialized from {src:?}");
- // SAFETY - Both regions are valid, properly aligned, and don't overlap.
+ // SAFETY: Both regions are valid, properly aligned, and don't overlap.
unsafe { copy_nonoverlapping(src, bounce.as_ptr(), size) };
}
@@ -109,11 +107,11 @@
if direction == BufferDirection::DeviceToDriver {
let dest = buffer.cast::<u8>().as_ptr();
trace!("VirtIO bounce buffer at {bounce:?} (PA:{paddr:#x}) copied back to {dest:?}");
- // SAFETY - Both regions are valid, properly aligned, and don't overlap.
+ // SAFETY: Both regions are valid, properly aligned, and don't overlap.
unsafe { copy_nonoverlapping(bounce.as_ptr(), dest, size) };
}
- // SAFETY - Memory was allocated by `share` using `alloc_shared` with the same layout.
+ // SAFETY: Memory was allocated by `share` using `alloc_shared` with the same layout.
unsafe { dealloc_shared(bounce, bb_layout(size)) }
.expect("Failed to unshare and deallocate VirtIO bounce buffer");
}
diff --git a/vmbase/src/virtio/mod.rs b/vmbase/src/virtio/mod.rs
index df916bc..fbe41e3 100644
--- a/vmbase/src/virtio/mod.rs
+++ b/vmbase/src/virtio/mod.rs
@@ -16,3 +16,5 @@
mod hal;
pub mod pci;
+
+pub use hal::HalImpl;
diff --git a/vmbase/src/virtio/pci.rs b/vmbase/src/virtio/pci.rs
index cbb4d26..1d05c18 100644
--- a/vmbase/src/virtio/pci.rs
+++ b/vmbase/src/virtio/pci.rs
@@ -14,19 +14,20 @@
//! Functions to scan the PCI bus for VirtIO devices.
-use super::hal::HalImpl;
use crate::memory::{MemoryTracker, MemoryTrackerError};
use alloc::boxed::Box;
use core::fmt;
+use core::marker::PhantomData;
use fdtpci::PciInfo;
use log::debug;
use once_cell::race::OnceBox;
use virtio_drivers::{
- device::blk,
+ device::{blk, socket},
transport::pci::{
bus::{BusDeviceIterator, PciRoot},
virtio_device_type, PciTransport,
},
+ Hal,
};
pub(super) static PCI_INFO: OnceBox<PciInfo> = OnceBox::new();
@@ -63,7 +64,7 @@
/// 3. Creates and returns a `PciRoot`.
///
/// This must only be called once; it will panic if it is called a second time.
-pub fn initialise(pci_info: PciInfo, memory: &mut MemoryTracker) -> Result<PciRoot, PciError> {
+pub fn initialize(pci_info: PciInfo, memory: &mut MemoryTracker) -> Result<PciRoot, PciError> {
PCI_INFO.set(Box::new(pci_info.clone())).map_err(|_| PciError::DuplicateInitialization)?;
memory.map_mmio_range(pci_info.cam_range.clone()).map_err(PciError::CamMapFailed)?;
@@ -76,23 +77,29 @@
}
/// Virtio Block device.
-pub type VirtIOBlk = blk::VirtIOBlk<HalImpl, PciTransport>;
+pub type VirtIOBlk<T> = blk::VirtIOBlk<T, PciTransport>;
+
+/// Virtio Socket device.
+///
+/// Spec: https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html 5.10
+pub type VirtIOSocket<T> = socket::VirtIOSocket<T, PciTransport>;
/// An iterator that iterates over the PCI transport for each device.
-pub struct PciTransportIterator<'a> {
+pub struct PciTransportIterator<'a, T: Hal> {
pci_root: &'a mut PciRoot,
bus: BusDeviceIterator,
+ _hal: PhantomData<T>,
}
-impl<'a> PciTransportIterator<'a> {
+impl<'a, T: Hal> PciTransportIterator<'a, T> {
/// Creates a new iterator.
pub fn new(pci_root: &'a mut PciRoot) -> Self {
let bus = pci_root.enumerate_bus(0);
- Self { pci_root, bus }
+ Self { pci_root, bus, _hal: PhantomData }
}
}
-impl<'a> Iterator for PciTransportIterator<'a> {
+impl<'a, T: Hal> Iterator for PciTransportIterator<'a, T> {
type Item = PciTransport;
fn next(&mut self) -> Option<Self::Item> {
@@ -109,7 +116,7 @@
};
debug!(" VirtIO {:?}", virtio_type);
- return PciTransport::new::<HalImpl>(self.pci_root, device_function).ok();
+ return PciTransport::new::<T>(self.pci_root, device_function).ok();
}
}
}