Merge changes I73e522f9,Ifc92a85b,I9b918dd9,I6f84eff9 into main
* changes:
doc: add debug/README.md
doc: major refactoring of the getting started doc
doc: move the getting started page
doc: rewrite the front page
diff --git a/javalib/api/test-current.txt b/javalib/api/test-current.txt
index 1298000..cf95770 100644
--- a/javalib/api/test-current.txt
+++ b/javalib/api/test-current.txt
@@ -13,6 +13,7 @@
public static final class VirtualMachineConfig.Builder {
method @NonNull @RequiresPermission(android.system.virtualmachine.VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION) public android.system.virtualmachine.VirtualMachineConfig.Builder setPayloadConfigPath(@NonNull String);
+ method @NonNull @RequiresPermission(android.system.virtualmachine.VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION) public android.system.virtualmachine.VirtualMachineConfig.Builder setVendorDiskImage(@NonNull java.io.File);
method @NonNull public android.system.virtualmachine.VirtualMachineConfig.Builder setVmConsoleInputSupported(boolean);
}
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
index b400eeb..4cad2e3 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
@@ -76,6 +76,7 @@
private static final String KEY_ENCRYPTED_STORAGE_BYTES = "encryptedStorageBytes";
private static final String KEY_VM_OUTPUT_CAPTURED = "vmOutputCaptured";
private static final String KEY_VM_CONSOLE_INPUT_SUPPORTED = "vmConsoleInputSupported";
+ private static final String KEY_VENDOR_DISK_IMAGE_PATH = "vendorDiskImagePath";
/** @hide */
@Retention(RetentionPolicy.SOURCE)
@@ -167,6 +168,8 @@
/** Whether the app can write console input to the VM */
private final boolean mVmConsoleInputSupported;
+ @Nullable private final File mVendorDiskImage;
+
private VirtualMachineConfig(
@Nullable String packageName,
@Nullable String apkPath,
@@ -178,7 +181,8 @@
@CpuTopology int cpuTopology,
long encryptedStorageBytes,
boolean vmOutputCaptured,
- boolean vmConsoleInputSupported) {
+ boolean vmConsoleInputSupported,
+ @Nullable File vendorDiskImage) {
// This is only called from Builder.build(); the builder handles parameter validation.
mPackageName = packageName;
mApkPath = apkPath;
@@ -191,6 +195,7 @@
mEncryptedStorageBytes = encryptedStorageBytes;
mVmOutputCaptured = vmOutputCaptured;
mVmConsoleInputSupported = vmConsoleInputSupported;
+ mVendorDiskImage = vendorDiskImage;
}
/** Loads a config from a file. */
@@ -267,6 +272,11 @@
builder.setVmOutputCaptured(b.getBoolean(KEY_VM_OUTPUT_CAPTURED));
builder.setVmConsoleInputSupported(b.getBoolean(KEY_VM_CONSOLE_INPUT_SUPPORTED));
+ String vendorDiskImagePath = b.getString(KEY_VENDOR_DISK_IMAGE_PATH);
+ if (vendorDiskImagePath != null) {
+ builder.setVendorDiskImage(new File(vendorDiskImagePath));
+ }
+
return builder.build();
}
@@ -302,6 +312,9 @@
}
b.putBoolean(KEY_VM_OUTPUT_CAPTURED, mVmOutputCaptured);
b.putBoolean(KEY_VM_CONSOLE_INPUT_SUPPORTED, mVmConsoleInputSupported);
+ if (mVendorDiskImage != null) {
+ b.putString(KEY_VENDOR_DISK_IMAGE_PATH, mVendorDiskImage.getAbsolutePath());
+ }
b.writeToStream(output);
}
@@ -501,6 +514,20 @@
vsConfig.cpuTopology = android.system.virtualizationservice.CpuTopology.ONE_CPU;
break;
}
+ if (mVendorDiskImage != null) {
+ VirtualMachineAppConfig.CustomConfig customConfig =
+ new VirtualMachineAppConfig.CustomConfig();
+ customConfig.taskProfiles = new String[0];
+ try {
+ customConfig.vendorImage =
+ ParcelFileDescriptor.open(mVendorDiskImage, MODE_READ_ONLY);
+ } catch (FileNotFoundException e) {
+ throw new VirtualMachineException(
+ "Failed to open vendor disk image " + mVendorDiskImage.getAbsolutePath(),
+ e);
+ }
+ vsConfig.customConfig = customConfig;
+ }
return vsConfig;
}
@@ -572,6 +599,7 @@
private long mEncryptedStorageBytes;
private boolean mVmOutputCaptured = false;
private boolean mVmConsoleInputSupported = false;
+ @Nullable private File mVendorDiskImage;
/**
* Creates a builder for the given context.
@@ -645,7 +673,8 @@
mCpuTopology,
mEncryptedStorageBytes,
mVmOutputCaptured,
- mVmConsoleInputSupported);
+ mVmConsoleInputSupported,
+ mVendorDiskImage);
}
/**
@@ -863,5 +892,18 @@
mVmConsoleInputSupported = supported;
return this;
}
+
+ /**
+ * Sets the path to the disk image with vendor-specific modules.
+ *
+ * @hide
+ */
+ @TestApi
+ @RequiresPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION)
+ @NonNull
+ public Builder setVendorDiskImage(@NonNull File vendorDiskImage) {
+ mVendorDiskImage = vendorDiskImage;
+ return this;
+ }
}
}
diff --git a/libs/hyp/Android.bp b/libs/hyp/Android.bp
index 1bb8722..8baf9dd 100644
--- a/libs/hyp/Android.bp
+++ b/libs/hyp/Android.bp
@@ -8,7 +8,6 @@
srcs: ["src/lib.rs"],
prefer_rlib: true,
rustlibs: [
- "libbitflags",
"libonce_cell_nostd",
"libsmccc",
"libuuid_nostd",
diff --git a/libs/hyp/src/hypervisor/common.rs b/libs/hyp/src/hypervisor/common.rs
index ec7d168..7c030a1 100644
--- a/libs/hyp/src/hypervisor/common.rs
+++ b/libs/hyp/src/hypervisor/common.rs
@@ -16,47 +16,49 @@
use crate::error::Result;
use crate::util::SIZE_4KB;
-use bitflags::bitflags;
/// Expected MMIO guard granule size, validated during MMIO guard initialization.
pub const MMIO_GUARD_GRANULE_SIZE: usize = SIZE_4KB;
-bitflags! {
- /// Capabilities that Hypervisor backends can declare support for.
- pub struct HypervisorCap: u32 {
- /// Capability for guest to share its memory with host at runtime.
- const DYNAMIC_MEM_SHARE = 0b1;
+/// Trait for the hypervisor.
+pub trait Hypervisor {
+ /// Returns the hypervisor's MMIO_GUARD implementation, if any.
+ fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+ None
+ }
+
+ /// Returns the hypervisor's dynamic memory sharing implementation, if any.
+ fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+ None
}
}
-/// Trait for the hypervisor.
-pub trait Hypervisor {
+pub trait MmioGuardedHypervisor {
/// Initializes the hypervisor by enrolling a MMIO guard and checking the memory granule size.
/// By enrolling, all MMIO will be blocked unless allow-listed with `mmio_guard_map`.
/// Protected VMs are auto-enrolled.
- fn mmio_guard_init(&self) -> Result<()>;
+ fn init(&self) -> Result<()>;
/// Maps a page containing the given memory address to the hypervisor MMIO guard.
/// The page size corresponds to the MMIO guard granule size.
- fn mmio_guard_map(&self, addr: usize) -> Result<()>;
+ fn map(&self, addr: usize) -> Result<()>;
/// Unmaps a page containing the given memory address from the hypervisor MMIO guard.
/// The page size corresponds to the MMIO guard granule size.
- fn mmio_guard_unmap(&self, addr: usize) -> Result<()>;
+ fn unmap(&self, addr: usize) -> Result<()>;
+}
+pub trait MemSharingHypervisor {
/// Shares a region of memory with host, granting it read, write and execute permissions.
/// The size of the region is equal to the memory protection granule returned by
/// [`hyp_meminfo`].
- fn mem_share(&self, base_ipa: u64) -> Result<()>;
+ fn share(&self, base_ipa: u64) -> Result<()>;
/// Revokes access permission from host to a memory region previously shared with
/// [`mem_share`]. The size of the region is equal to the memory protection granule returned by
/// [`hyp_meminfo`].
- fn mem_unshare(&self, base_ipa: u64) -> Result<()>;
+ fn unshare(&self, base_ipa: u64) -> Result<()>;
/// Returns the memory protection granule size in bytes.
- fn memory_protection_granule(&self) -> Result<usize>;
-
- /// Check if required capabilities are supported.
- fn has_cap(&self, cap: HypervisorCap) -> bool;
+ fn granule(&self) -> Result<usize>;
}
diff --git a/libs/hyp/src/hypervisor/geniezone.rs b/libs/hyp/src/hypervisor/geniezone.rs
index 0741978..24eb89e 100644
--- a/libs/hyp/src/hypervisor/geniezone.rs
+++ b/libs/hyp/src/hypervisor/geniezone.rs
@@ -14,7 +14,9 @@
//! Wrappers around calls to the GenieZone hypervisor.
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
+use super::common::{
+ Hypervisor, MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE,
+};
use crate::error::{Error, Result};
use crate::util::page_address;
use core::fmt::{self, Display, Formatter};
@@ -40,7 +42,6 @@
// and share the same identification along with guest VMs.
// The previous uuid was removed due to duplication elsewhere.
pub const UUID: Uuid = uuid!("7e134ed0-3b82-488d-8cee-69c19211dbe7");
- const CAPABILITIES: HypervisorCap = HypervisorCap::DYNAMIC_MEM_SHARE;
}
/// Error from a GenieZone HVC call.
@@ -85,7 +86,17 @@
}
impl Hypervisor for GeniezoneHypervisor {
- fn mmio_guard_init(&self) -> Result<()> {
+ fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+ Some(self)
+ }
+
+ fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+ Some(self)
+ }
+}
+
+impl MmioGuardedHypervisor for GeniezoneHypervisor {
+ fn init(&self) -> Result<()> {
mmio_guard_enroll()?;
let mmio_granule = mmio_guard_granule()?;
if mmio_granule != MMIO_GUARD_GRANULE_SIZE {
@@ -94,43 +105,41 @@
Ok(())
}
- fn mmio_guard_map(&self, addr: usize) -> Result<()> {
+ fn map(&self, addr: usize) -> Result<()> {
let mut args = [0u64; 17];
args[0] = page_address(addr);
checked_hvc64_expect_zero(VENDOR_HYP_GZVM_MMIO_GUARD_MAP_FUNC_ID, args)
}
- fn mmio_guard_unmap(&self, addr: usize) -> Result<()> {
+ fn unmap(&self, addr: usize) -> Result<()> {
let mut args = [0u64; 17];
args[0] = page_address(addr);
checked_hvc64_expect_zero(VENDOR_HYP_GZVM_MMIO_GUARD_UNMAP_FUNC_ID, args)
}
+}
- fn mem_share(&self, base_ipa: u64) -> Result<()> {
+impl MemSharingHypervisor for GeniezoneHypervisor {
+ fn share(&self, base_ipa: u64) -> Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_GZVM_FUNC_MEM_SHARE, args)
}
- fn mem_unshare(&self, base_ipa: u64) -> Result<()> {
+ fn unshare(&self, base_ipa: u64) -> Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_GZVM_FUNC_MEM_UNSHARE, args)
}
- fn memory_protection_granule(&self) -> Result<usize> {
+ fn granule(&self) -> Result<usize> {
let args = [0u64; 17];
let granule = checked_hvc64(ARM_SMCCC_GZVM_FUNC_HYP_MEMINFO, args)?;
Ok(granule.try_into().unwrap())
}
-
- fn has_cap(&self, cap: HypervisorCap) -> bool {
- Self::CAPABILITIES.contains(cap)
- }
}
fn mmio_guard_granule() -> Result<usize> {
diff --git a/libs/hyp/src/hypervisor/gunyah.rs b/libs/hyp/src/hypervisor/gunyah.rs
index 252430f..45c01bf 100644
--- a/libs/hyp/src/hypervisor/gunyah.rs
+++ b/libs/hyp/src/hypervisor/gunyah.rs
@@ -1,5 +1,4 @@
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
-use crate::error::Result;
+use super::common::Hypervisor;
use uuid::{uuid, Uuid};
pub(super) struct GunyahHypervisor;
@@ -8,32 +7,4 @@
pub const UUID: Uuid = uuid!("c1d58fcd-a453-5fdb-9265-ce36673d5f14");
}
-impl Hypervisor for GunyahHypervisor {
- fn mmio_guard_init(&self) -> Result<()> {
- Ok(())
- }
-
- fn mmio_guard_map(&self, _addr: usize) -> Result<()> {
- Ok(())
- }
-
- fn mmio_guard_unmap(&self, _addr: usize) -> Result<()> {
- Ok(())
- }
-
- fn mem_share(&self, _base_ipa: u64) -> Result<()> {
- unimplemented!();
- }
-
- fn mem_unshare(&self, _base_ipa: u64) -> Result<()> {
- unimplemented!();
- }
-
- fn memory_protection_granule(&self) -> Result<usize> {
- Ok(MMIO_GUARD_GRANULE_SIZE)
- }
-
- fn has_cap(&self, _cap: HypervisorCap) -> bool {
- false
- }
-}
+impl Hypervisor for GunyahHypervisor {}
diff --git a/libs/hyp/src/hypervisor/kvm.rs b/libs/hyp/src/hypervisor/kvm.rs
index a89f9b8..a95b8de 100644
--- a/libs/hyp/src/hypervisor/kvm.rs
+++ b/libs/hyp/src/hypervisor/kvm.rs
@@ -14,7 +14,9 @@
//! Wrappers around calls to the KVM hypervisor.
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
+use super::common::{
+ Hypervisor, MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE,
+};
use crate::error::{Error, Result};
use crate::util::page_address;
use core::fmt::{self, Display, Formatter};
@@ -70,17 +72,30 @@
const VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID: u32 = 0xc6000007;
const VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID: u32 = 0xc6000008;
-pub(super) struct KvmHypervisor;
+pub(super) struct RegularKvmHypervisor;
-impl KvmHypervisor {
+impl RegularKvmHypervisor {
// Based on ARM_SMCCC_VENDOR_HYP_UID_KVM_REG values listed in Linux kernel source:
// https://github.com/torvalds/linux/blob/master/include/linux/arm-smccc.h
pub(super) const UUID: Uuid = uuid!("28b46fb6-2ec5-11e9-a9ca-4b564d003a74");
- const CAPABILITIES: HypervisorCap = HypervisorCap::DYNAMIC_MEM_SHARE;
}
-impl Hypervisor for KvmHypervisor {
- fn mmio_guard_init(&self) -> Result<()> {
+impl Hypervisor for RegularKvmHypervisor {}
+
+pub(super) struct ProtectedKvmHypervisor;
+
+impl Hypervisor for ProtectedKvmHypervisor {
+ fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+ Some(self)
+ }
+
+ fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+ Some(self)
+ }
+}
+
+impl MmioGuardedHypervisor for ProtectedKvmHypervisor {
+ fn init(&self) -> Result<()> {
mmio_guard_enroll()?;
let mmio_granule = mmio_guard_granule()?;
if mmio_granule != MMIO_GUARD_GRANULE_SIZE {
@@ -89,7 +104,7 @@
Ok(())
}
- fn mmio_guard_map(&self, addr: usize) -> Result<()> {
+ fn map(&self, addr: usize) -> Result<()> {
let mut args = [0u64; 17];
args[0] = page_address(addr);
@@ -99,7 +114,7 @@
.map_err(|e| Error::KvmError(e, VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID))
}
- fn mmio_guard_unmap(&self, addr: usize) -> Result<()> {
+ fn unmap(&self, addr: usize) -> Result<()> {
let mut args = [0u64; 17];
args[0] = page_address(addr);
@@ -110,30 +125,28 @@
Err(e) => Err(Error::KvmError(e, VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID)),
}
}
+}
- fn mem_share(&self, base_ipa: u64) -> Result<()> {
+impl MemSharingHypervisor for ProtectedKvmHypervisor {
+ fn share(&self, base_ipa: u64) -> Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_KVM_FUNC_MEM_SHARE, args)
}
- fn mem_unshare(&self, base_ipa: u64) -> Result<()> {
+ fn unshare(&self, base_ipa: u64) -> Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_KVM_FUNC_MEM_UNSHARE, args)
}
- fn memory_protection_granule(&self) -> Result<usize> {
+ fn granule(&self) -> Result<usize> {
let args = [0u64; 17];
let granule = checked_hvc64(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO, args)?;
Ok(granule.try_into().unwrap())
}
-
- fn has_cap(&self, cap: HypervisorCap) -> bool {
- Self::CAPABILITIES.contains(cap)
- }
}
fn mmio_guard_granule() -> Result<usize> {
diff --git a/libs/hyp/src/hypervisor/mod.rs b/libs/hyp/src/hypervisor/mod.rs
index 93d53fe..bc9e406 100644
--- a/libs/hyp/src/hypervisor/mod.rs
+++ b/libs/hyp/src/hypervisor/mod.rs
@@ -23,30 +23,31 @@
use crate::error::{Error, Result};
use alloc::boxed::Box;
-pub use common::Hypervisor;
-pub use common::HypervisorCap;
-pub use common::MMIO_GUARD_GRANULE_SIZE;
+use common::Hypervisor;
+pub use common::{MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE};
pub use geniezone::GeniezoneError;
use geniezone::GeniezoneHypervisor;
use gunyah::GunyahHypervisor;
pub use kvm::KvmError;
-use kvm::KvmHypervisor;
+use kvm::{ProtectedKvmHypervisor, RegularKvmHypervisor};
use once_cell::race::OnceBox;
use smccc::hvc64;
use uuid::Uuid;
enum HypervisorBackend {
- Kvm,
+ RegularKvm,
Gunyah,
Geniezone,
+ ProtectedKvm,
}
impl HypervisorBackend {
fn get_hypervisor(&self) -> &'static dyn Hypervisor {
match self {
- Self::Kvm => &KvmHypervisor,
+ Self::RegularKvm => &RegularKvmHypervisor,
Self::Gunyah => &GunyahHypervisor,
Self::Geniezone => &GeniezoneHypervisor,
+ Self::ProtectedKvm => &ProtectedKvmHypervisor,
}
}
}
@@ -58,7 +59,16 @@
match uuid {
GeniezoneHypervisor::UUID => Ok(HypervisorBackend::Geniezone),
GunyahHypervisor::UUID => Ok(HypervisorBackend::Gunyah),
- KvmHypervisor::UUID => Ok(HypervisorBackend::Kvm),
+ RegularKvmHypervisor::UUID => {
+ // Protected KVM has the same UUID so differentiate based on MEM_SHARE.
+ match ProtectedKvmHypervisor.as_mem_sharer().unwrap().granule() {
+ Ok(_) => Ok(HypervisorBackend::ProtectedKvm),
+ Err(Error::KvmError(KvmError::NotSupported, _)) => {
+ Ok(HypervisorBackend::RegularKvm)
+ }
+ Err(e) => Err(e),
+ }
+ }
u => Err(Error::UnsupportedHypervisorUuid(u)),
}
}
@@ -95,8 +105,18 @@
}
/// Gets the hypervisor singleton.
-pub fn get_hypervisor() -> &'static dyn Hypervisor {
+fn get_hypervisor() -> &'static dyn Hypervisor {
static HYPERVISOR: OnceBox<HypervisorBackend> = OnceBox::new();
HYPERVISOR.get_or_init(|| Box::new(detect_hypervisor())).get_hypervisor()
}
+
+/// Gets the MMIO_GUARD hypervisor singleton, if any.
+pub fn get_mmio_guard() -> Option<&'static dyn MmioGuardedHypervisor> {
+ get_hypervisor().as_mmio_guard()
+}
+
+/// Gets the dynamic memory sharing hypervisor singleton, if any.
+pub fn get_mem_sharer() -> Option<&'static dyn MemSharingHypervisor> {
+ get_hypervisor().as_mem_sharer()
+}
diff --git a/libs/hyp/src/lib.rs b/libs/hyp/src/lib.rs
index 3b3b30a..486a181 100644
--- a/libs/hyp/src/lib.rs
+++ b/libs/hyp/src/lib.rs
@@ -21,8 +21,6 @@
mod util;
pub use error::{Error, Result};
-pub use hypervisor::{
- get_hypervisor, Hypervisor, HypervisorCap, KvmError, MMIO_GUARD_GRANULE_SIZE,
-};
+pub use hypervisor::{get_mem_sharer, get_mmio_guard, KvmError, MMIO_GUARD_GRANULE_SIZE};
use hypervisor::GeniezoneError;
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 5440695..2d3f084 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -23,6 +23,10 @@
"apex",
"linkerconfig",
"second_stage_resources",
+
+ // Ideally we should only create the /vendor for Microdroid VMs that will mount /vendor, but
+ // for the time being we will just create it unconditionally.
+ "vendor",
]
microdroid_symlinks = [
diff --git a/microdroid/bootconfig.x86_64 b/microdroid/bootconfig.x86_64
index 6076889..eed9212 100644
--- a/microdroid/bootconfig.x86_64
+++ b/microdroid/bootconfig.x86_64
@@ -1 +1 @@
-androidboot.boot_devices = pci0000:00/0000:00:04.0,pci0000:00/0000:00:05.0,pci0000:00/0000:00:06.0
+androidboot.boot_devices = pci0000:00/0000:00:04.0,pci0000:00/0000:00:05.0,pci0000:00/0000:00:06.0,pci0000:00/0000:00:07.0
diff --git a/microdroid/fstab.microdroid b/microdroid/fstab.microdroid
index 9478c7c..da000b9 100644
--- a/microdroid/fstab.microdroid
+++ b/microdroid/fstab.microdroid
@@ -1 +1,7 @@
system /system ext4 noatime,ro,errors=panic wait,slotselect,avb=vbmeta,first_stage_mount,logical
+# This is a temporary solution to unblock other devs that depend on /vendor partition in Microdroid
+# The /vendor partition will only be mounted if the kernel cmdline contains
+# androidboot.microdroid.mount_vendor=1.
+# TODO(b/285855430): this should probably be defined in the DT
+# TODO(b/285855436): should be mounted on top of dm-verity device
+/dev/block/by-name/microdroid-vendor /vendor ext4 noatime,ro,errors=panic wait,first_stage_mount
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 3d2fea8..f3bd637 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -23,7 +23,7 @@
use core::num::NonZeroUsize;
use core::ops::Range;
use core::slice;
-use hyp::{get_hypervisor, HypervisorCap};
+use hyp::{get_mem_sharer, get_mmio_guard};
use log::debug;
use log::error;
use log::info;
@@ -33,10 +33,9 @@
use vmbase::{
configure_heap, console,
layout::{self, crosvm},
- logger, main,
+ main,
memory::{min_dcache_line_size, MemoryTracker, MEMORY, SIZE_128KB, SIZE_4KB},
power::reboot,
- rand,
};
use zeroize::Zeroize;
@@ -85,12 +84,11 @@
impl<'a> MemorySlices<'a> {
fn new(fdt: usize, kernel: usize, kernel_size: usize) -> Result<Self, RebootReason> {
- // SAFETY - SIZE_2MB is non-zero.
- const FDT_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(crosvm::FDT_MAX_SIZE) };
+ let fdt_size = NonZeroUsize::new(crosvm::FDT_MAX_SIZE).unwrap();
// TODO - Only map the FDT as read-only, until we modify it right before jump_to_payload()
// e.g. by generating a DTBO for a template DT in main() and, on return, re-map DT as RW,
// overwrite with the template DT and apply the DTBO.
- let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, FDT_SIZE).map_err(|e| {
+ let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, fdt_size).map_err(|e| {
error!("Failed to allocate the FDT range: {e}");
RebootReason::InternalError
})?;
@@ -112,8 +110,8 @@
RebootReason::InvalidFdt
})?;
- if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
- let granule = get_hypervisor().memory_protection_granule().map_err(|e| {
+ if let Some(mem_sharer) = get_mem_sharer() {
+ let granule = mem_sharer.granule().map_err(|e| {
error!("Failed to get memory protection granule: {e}");
RebootReason::InternalError
})?;
@@ -192,21 +190,7 @@
// - only perform logging once the logger has been initialized
// - only access non-pvmfw memory once (and while) it has been mapped
- logger::init(LevelFilter::Info).map_err(|_| RebootReason::InternalError)?;
-
- // Use debug!() to avoid printing to the UART if we failed to configure it as only local
- // builds that have tweaked the logger::init() call will actually attempt to log the message.
-
- get_hypervisor().mmio_guard_init().map_err(|e| {
- debug!("{e}");
- RebootReason::InternalError
- })?;
-
- get_hypervisor().mmio_guard_map(console::BASE_ADDRESS).map_err(|e| {
- debug!("Failed to configure the UART: {e}");
- RebootReason::InternalError
- })?;
-
+ log::set_max_level(LevelFilter::Info);
crypto::init();
let page_table = memory::init_page_table().map_err(|e| {
@@ -235,11 +219,6 @@
let slices = MemorySlices::new(fdt, payload, payload_size)?;
- rand::init().map_err(|e| {
- error!("Failed to initialize rand: {e}");
- RebootReason::InternalError
- })?;
-
// This wrapper allows main() to be blissfully ignorant of platform details.
let next_bcc = crate::main(slices.fdt, slices.kernel, slices.ramdisk, bcc_slice, debug_policy)?;
@@ -253,10 +232,12 @@
})?;
// Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
MEMORY.lock().as_mut().unwrap().unshare_all_memory();
- get_hypervisor().mmio_guard_unmap(console::BASE_ADDRESS).map_err(|e| {
- error!("Failed to unshare the UART: {e}");
- RebootReason::InternalError
- })?;
+ if let Some(mmio_guard) = get_mmio_guard() {
+ mmio_guard.unmap(console::BASE_ADDRESS).map_err(|e| {
+ error!("Failed to unshare the UART: {e}");
+ RebootReason::InternalError
+ })?;
+ }
// Drop MemoryTracker and deactivate page table.
drop(MEMORY.lock().take());
diff --git a/pvmfw/src/exceptions.rs b/pvmfw/src/exceptions.rs
index 802ebbb..d9f0891 100644
--- a/pvmfw/src/exceptions.rs
+++ b/pvmfw/src/exceptions.rs
@@ -14,95 +14,14 @@
//! Exception handlers.
-use aarch64_paging::paging::VirtualAddress;
-use core::fmt;
-use vmbase::console;
-use vmbase::logger;
-use vmbase::memory::{page_4kb_of, MemoryTrackerError, MEMORY};
-use vmbase::read_sysreg;
-use vmbase::{eprintln, power::reboot};
-
-const UART_PAGE: usize = page_4kb_of(console::BASE_ADDRESS);
-
-#[derive(Debug)]
-enum HandleExceptionError {
- PageTableUnavailable,
- PageTableNotInitialized,
- InternalError(MemoryTrackerError),
- UnknownException,
-}
-
-impl From<MemoryTrackerError> for HandleExceptionError {
- fn from(other: MemoryTrackerError) -> Self {
- Self::InternalError(other)
- }
-}
-
-impl fmt::Display for HandleExceptionError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- Self::PageTableUnavailable => write!(f, "Page table is not available."),
- Self::PageTableNotInitialized => write!(f, "Page table is not initialized."),
- Self::InternalError(e) => write!(f, "Error while updating page table: {e}"),
- Self::UnknownException => write!(f, "An unknown exception occurred, not handled."),
- }
- }
-}
-
-#[derive(Debug, PartialEq, Copy, Clone)]
-enum Esr {
- DataAbortTranslationFault,
- DataAbortPermissionFault,
- DataAbortSyncExternalAbort,
- Unknown(usize),
-}
-
-impl Esr {
- const EXT_DABT_32BIT: usize = 0x96000010;
- const TRANSL_FAULT_BASE_32BIT: usize = 0x96000004;
- const TRANSL_FAULT_ISS_MASK_32BIT: usize = !0x143;
- const PERM_FAULT_BASE_32BIT: usize = 0x9600004C;
- const PERM_FAULT_ISS_MASK_32BIT: usize = !0x103;
-}
-
-impl From<usize> for Esr {
- fn from(esr: usize) -> Self {
- if esr == Self::EXT_DABT_32BIT {
- Self::DataAbortSyncExternalAbort
- } else if esr & Self::TRANSL_FAULT_ISS_MASK_32BIT == Self::TRANSL_FAULT_BASE_32BIT {
- Self::DataAbortTranslationFault
- } else if esr & Self::PERM_FAULT_ISS_MASK_32BIT == Self::PERM_FAULT_BASE_32BIT {
- Self::DataAbortPermissionFault
- } else {
- Self::Unknown(esr)
- }
- }
-}
-
-impl fmt::Display for Esr {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- Self::DataAbortSyncExternalAbort => write!(f, "Synchronous external abort"),
- Self::DataAbortTranslationFault => write!(f, "Translation fault"),
- Self::DataAbortPermissionFault => write!(f, "Permission fault"),
- Self::Unknown(v) => write!(f, "Unknown exception esr={v:#08x}"),
- }
- }
-}
-
-#[inline]
-fn handle_translation_fault(far: VirtualAddress) -> Result<(), HandleExceptionError> {
- let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
- let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
- Ok(memory.handle_mmio_fault(far)?)
-}
-
-#[inline]
-fn handle_permission_fault(far: VirtualAddress) -> Result<(), HandleExceptionError> {
- let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
- let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
- Ok(memory.handle_permission_fault(far)?)
-}
+use vmbase::{
+ eprintln,
+ exceptions::{ArmException, Esr, HandleExceptionError},
+ logger,
+ memory::{handle_permission_fault, handle_translation_fault},
+ power::reboot,
+ read_sysreg,
+};
fn handle_exception(exception: &ArmException) -> Result<(), HandleExceptionError> {
// Handle all translation faults on both read and write, and MMIO guard map
@@ -115,45 +34,6 @@
}
}
-/// A struct representing an Armv8 exception.
-struct ArmException {
- /// The value of the exception syndrome register.
- esr: Esr,
- /// The faulting virtual address read from the fault address register.
- far: VirtualAddress,
-}
-
-impl fmt::Display for ArmException {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "ArmException: esr={}, far={}", self.esr, self.far)
- }
-}
-
-impl ArmException {
- /// Reads the values of the EL1 exception syndrome register (`esr_el1`)
- /// and fault address register (`far_el1`) and returns a new instance of
- /// `ArmException` with these values.
- fn from_el1_regs() -> Self {
- let esr: Esr = read_sysreg!("esr_el1").into();
- let far = read_sysreg!("far_el1");
- Self { esr, far: VirtualAddress(far) }
- }
-
- /// Prints the details of an obj and the exception, excluding UART exceptions.
- fn print<T: fmt::Display>(&self, exception_name: &str, obj: T, elr: u64) {
- // Don't print to the UART if we are handling an exception it could raise.
- if !self.is_uart_exception() {
- eprintln!("{exception_name}");
- eprintln!("{obj}");
- eprintln!("{}, elr={:#08x}", self, elr);
- }
- }
-
- fn is_uart_exception(&self) -> bool {
- self.esr == Esr::DataAbortSyncExternalAbort && page_4kb_of(self.far.0) == UART_PAGE
- }
-}
-
#[no_mangle]
extern "C" fn sync_exception_current(elr: u64, _spsr: u64) {
// Disable logging in exception handler to prevent unsafe writes to UART.
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index 319100f..2382b50 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -813,7 +813,7 @@
}
}
-fn read_common_debug_policy(fdt: &Fdt, debug_feature_name: &CStr) -> libfdt::Result<bool> {
+fn has_common_debug_policy(fdt: &Fdt, debug_feature_name: &CStr) -> libfdt::Result<bool> {
if let Some(node) = fdt.node(cstr!("/avf/guest/common"))? {
if let Some(value) = node.getprop_u32(debug_feature_name)? {
return Ok(value == 1);
@@ -823,8 +823,8 @@
}
fn filter_out_dangerous_bootargs(fdt: &mut Fdt, bootargs: &CStr) -> libfdt::Result<()> {
- let has_crashkernel = read_common_debug_policy(fdt, cstr!("ramdump"))?;
- let has_console = read_common_debug_policy(fdt, cstr!("log"))?;
+ let has_crashkernel = has_common_debug_policy(fdt, cstr!("ramdump"))?;
+ let has_console = has_common_debug_policy(fdt, cstr!("log"))?;
let accepted: &[(&str, Box<dyn Fn(Option<&str>) -> bool>)] = &[
("panic", Box::new(|v| if let Some(v) = v { v == "=-1" } else { false })),
diff --git a/rialto/src/error.rs b/rialto/src/error.rs
index 84228c4..0c1e25d 100644
--- a/rialto/src/error.rs
+++ b/rialto/src/error.rs
@@ -29,8 +29,6 @@
Hypervisor(HypervisorError),
/// Failed when attempting to map some range in the page table.
PageTableMapping(MapError),
- /// Failed to initialize the logger.
- LoggerInit,
/// Invalid FDT.
InvalidFdt(FdtError),
/// Invalid PCI.
@@ -39,6 +37,10 @@
MemoryOperationFailed(MemoryTrackerError),
/// Failed to initialize PCI.
PciInitializationFailed(pci::PciError),
+ /// Failed to create VirtIO Socket device.
+ VirtIOSocketCreationFailed(virtio_drivers::Error),
+ /// Missing socket device.
+ MissingVirtIOSocketDevice,
}
impl fmt::Display for Error {
@@ -48,11 +50,14 @@
Self::PageTableMapping(e) => {
write!(f, "Failed when attempting to map some range in the page table: {e}.")
}
- Self::LoggerInit => write!(f, "Failed to initialize the logger."),
Self::InvalidFdt(e) => write!(f, "Invalid FDT: {e}"),
Self::InvalidPci(e) => write!(f, "Invalid PCI: {e}"),
Self::MemoryOperationFailed(e) => write!(f, "Failed memory operation: {e}"),
Self::PciInitializationFailed(e) => write!(f, "Failed to initialize PCI: {e}"),
+ Self::VirtIOSocketCreationFailed(e) => {
+ write!(f, "Failed to create VirtIO Socket device: {e}")
+ }
+ Self::MissingVirtIOSocketDevice => write!(f, "Missing VirtIO Socket device."),
}
}
}
diff --git a/rialto/src/exceptions.rs b/rialto/src/exceptions.rs
index 61f7846..b806b08 100644
--- a/rialto/src/exceptions.rs
+++ b/rialto/src/exceptions.rs
@@ -14,14 +14,37 @@
//! Exception handlers.
-use core::arch::asm;
-use vmbase::{console::emergency_write_str, eprintln, power::reboot};
+use vmbase::{
+ console::emergency_write_str,
+ eprintln,
+ exceptions::{ArmException, Esr, HandleExceptionError},
+ logger,
+ memory::{handle_permission_fault, handle_translation_fault},
+ power::reboot,
+ read_sysreg,
+};
+
+fn handle_exception(exception: &ArmException) -> Result<(), HandleExceptionError> {
+ // Handle all translation faults on both read and write, and MMIO guard map
+ // flagged invalid pages or blocks that caused the exception.
+ // Handle permission faults for DBM flagged entries, and flag them as dirty on write.
+ match exception.esr {
+ Esr::DataAbortTranslationFault => handle_translation_fault(exception.far),
+ Esr::DataAbortPermissionFault => handle_permission_fault(exception.far),
+ _ => Err(HandleExceptionError::UnknownException),
+ }
+}
#[no_mangle]
-extern "C" fn sync_exception_current() {
- emergency_write_str("sync_exception_current\n");
- print_esr();
- reboot();
+extern "C" fn sync_exception_current(elr: u64, _spsr: u64) {
+ // Disable logging in exception handler to prevent unsafe writes to UART.
+ let _guard = logger::suppress();
+
+ let exception = ArmException::from_el1_regs();
+ if let Err(e) = handle_exception(&exception) {
+ exception.print("sync_exception_current", e, elr);
+ reboot()
+ }
}
#[no_mangle]
@@ -71,9 +94,6 @@
#[inline]
fn print_esr() {
- let mut esr: u64;
- unsafe {
- asm!("mrs {esr}, esr_el1", esr = out(reg) esr);
- }
+ let esr = read_sysreg!("esr_el1");
eprintln!("esr={:#08x}", esr);
}
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index 5e693c8..bbc9997 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -24,20 +24,26 @@
use crate::error::{Error, Result};
use core::num::NonZeroUsize;
-use core::result;
use core::slice;
use fdtpci::PciInfo;
-use hyp::{get_hypervisor, HypervisorCap, KvmError};
+use hyp::{get_mem_sharer, get_mmio_guard};
use libfdt::FdtError;
use log::{debug, error, info};
+use virtio_drivers::{
+ transport::{pci::bus::PciRoot, DeviceType, Transport},
+ Hal,
+};
use vmbase::{
configure_heap,
fdt::SwiotlbInfo,
layout::{self, crosvm},
main,
- memory::{MemoryTracker, PageTable, MEMORY, PAGE_SIZE, SIZE_64KB},
+ memory::{MemoryTracker, PageTable, MEMORY, PAGE_SIZE, SIZE_128KB},
power::reboot,
- virtio::pci,
+ virtio::{
+ pci::{self, PciTransportIterator, VirtIOSocket},
+ HalImpl,
+ },
};
fn new_page_table() -> Result<PageTable> {
@@ -52,21 +58,6 @@
Ok(page_table)
}
-fn try_init_logger() -> Result<bool> {
- let mmio_guard_supported = match get_hypervisor().mmio_guard_init() {
- // pKVM blocks MMIO by default, we need to enable MMIO guard to support logging.
- Ok(()) => {
- get_hypervisor().mmio_guard_map(vmbase::console::BASE_ADDRESS)?;
- true
- }
- // MMIO guard enroll is not supported in unprotected VM.
- Err(hyp::Error::MmioGuardNotsupported) => false,
- Err(e) => return Err(e.into()),
- };
- vmbase::logger::init(log::LevelFilter::Debug).map_err(|_| Error::LoggerInit)?;
- Ok(mmio_guard_supported)
-}
-
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
@@ -98,14 +89,14 @@
e
})?;
- if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
- let granule = memory_protection_granule()?;
+ if let Some(mem_sharer) = get_mem_sharer() {
+ let granule = mem_sharer.granule()?;
MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
error!("Failed to initialize dynamically shared pool.");
e
})?;
- } else {
- let range = SwiotlbInfo::new_from_fdt(fdt)?.fixed_range().ok_or_else(|| {
+ } else if let Ok(swiotlb_info) = SwiotlbInfo::new_from_fdt(fdt) {
+ let range = swiotlb_info.fixed_range().ok_or_else(|| {
error!("Pre-shared pool range not specified in swiotlb node");
Error::from(FdtError::BadValue)
})?;
@@ -113,60 +104,65 @@
error!("Failed to initialize pre-shared pool.");
e
})?;
+ } else {
+ info!("No MEM_SHARE capability detected or swiotlb found: allocating buffers from heap.");
+ MEMORY.lock().as_mut().unwrap().init_heap_shared_pool().map_err(|e| {
+ error!("Failed to initialize heap-based pseudo-shared pool.");
+ e
+ })?;
}
let pci_info = PciInfo::from_fdt(fdt)?;
debug!("PCI: {pci_info:#x?}");
- let pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap())
+ let mut pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap())
.map_err(Error::PciInitializationFailed)?;
debug!("PCI root: {pci_root:#x?}");
+ let socket_device = find_socket_device::<HalImpl>(&mut pci_root)?;
+ debug!("Found socket device: guest cid = {:?}", socket_device.guest_cid());
Ok(())
}
-fn memory_protection_granule() -> result::Result<usize, hyp::Error> {
- match get_hypervisor().memory_protection_granule() {
- Ok(granule) => Ok(granule),
- // Take the default page size when KVM call is not supported in non-protected VMs.
- Err(hyp::Error::KvmError(KvmError::NotSupported, _)) => Ok(PAGE_SIZE),
- Err(e) => Err(e),
- }
+fn find_socket_device<T: Hal>(pci_root: &mut PciRoot) -> Result<VirtIOSocket<T>> {
+ PciTransportIterator::<T>::new(pci_root)
+ .find(|t| DeviceType::Socket == t.device_type())
+ .map(VirtIOSocket::<T>::new)
+ .transpose()
+ .map_err(Error::VirtIOSocketCreationFailed)?
+ .ok_or(Error::MissingVirtIOSocketDevice)
}
-fn try_unshare_all_memory(mmio_guard_supported: bool) -> Result<()> {
+fn try_unshare_all_memory() -> Result<()> {
info!("Starting unsharing memory...");
// No logging after unmapping UART.
- if mmio_guard_supported {
- get_hypervisor().mmio_guard_unmap(vmbase::console::BASE_ADDRESS)?;
+ if let Some(mmio_guard) = get_mmio_guard() {
+ mmio_guard.unmap(vmbase::console::BASE_ADDRESS)?;
}
// Unshares all memory and deactivates page table.
drop(MEMORY.lock().take());
Ok(())
}
-fn unshare_all_memory(mmio_guard_supported: bool) {
- if let Err(e) = try_unshare_all_memory(mmio_guard_supported) {
+fn unshare_all_memory() {
+ if let Err(e) = try_unshare_all_memory() {
error!("Failed to unshare the memory: {e}");
}
}
/// Entry point for Rialto.
pub fn main(fdt_addr: u64, _a1: u64, _a2: u64, _a3: u64) {
- let Ok(mmio_guard_supported) = try_init_logger() else {
- // Don't log anything if the logger initialization fails.
- reboot();
- };
+ log::set_max_level(log::LevelFilter::Debug);
// SAFETY: `fdt_addr` is supposed to be a valid pointer and points to
// a valid `Fdt`.
match unsafe { try_main(fdt_addr as usize) } {
- Ok(()) => unshare_all_memory(mmio_guard_supported),
+ Ok(()) => unshare_all_memory(),
Err(e) => {
error!("Rialto failed with {e}");
- unshare_all_memory(mmio_guard_supported);
+ unshare_all_memory();
reboot()
}
}
}
main!(main);
-configure_heap!(SIZE_64KB);
+configure_heap!(SIZE_128KB);
diff --git a/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java b/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
index 014f9f0..9cf28c7 100644
--- a/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
@@ -115,7 +115,7 @@
mAndroidDevice.supportsMicrodroid(/* protectedVm= */ true));
assumeFalse("Test requires setprop for using custom pvmfw and adb root", isUserBuild());
- mAndroidDevice.enableAdbRoot();
+ assumeTrue("Skip if adb root fails", mAndroidDevice.enableAdbRoot());
// tradefed copies the test artfacts under /tmp when running tests,
// so we should *find* the artifacts with the file name.
diff --git a/tests/testapk/Android.bp b/tests/testapk/Android.bp
index fe8f5c9..8a31c21 100644
--- a/tests/testapk/Android.bp
+++ b/tests/testapk/Android.bp
@@ -43,7 +43,10 @@
],
min_sdk_version: "33",
// Defined in ../vmshareapp/Android.bp
- data: [":MicrodroidVmShareApp"],
+ data: [
+ ":MicrodroidVmShareApp",
+ ":test_microdroid_vendor_image",
+ ],
}
// Defaults shared between MicrodroidTestNativeLib and MicrodroidPayloadInOtherAppNativeLib shared
diff --git a/tests/testapk/AndroidManifest.xml b/tests/testapk/AndroidManifest.xml
index 2ea3f6c..d6e6004 100644
--- a/tests/testapk/AndroidManifest.xml
+++ b/tests/testapk/AndroidManifest.xml
@@ -22,8 +22,7 @@
<queries>
<package android:name="com.android.microdroid.vmshare_app" />
</queries>
- <application>
- </application>
+ <application />
<instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
android:targetPackage="com.android.microdroid.test"
android:label="Microdroid Test" />
diff --git a/tests/testapk/AndroidTest.xml b/tests/testapk/AndroidTest.xml
index 929dd31..e72a2e3 100644
--- a/tests/testapk/AndroidTest.xml
+++ b/tests/testapk/AndroidTest.xml
@@ -23,6 +23,14 @@
<option name="test-file-name" value="MicrodroidTestApp.apk" />
<option name="test-file-name" value="MicrodroidVmShareApp.apk" />
</target_preparer>
+ <target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
+ <option name="run-command" value="mkdir -p /data/local/tmp/cts/microdroid" />
+ <option name="teardown-command" value="rm -rf /data/local/tmp/cts/microdroid" />
+ </target_preparer>
+ <target_preparer class="com.android.compatibility.common.tradefed.targetprep.FilePusher">
+ <option name="cleanup" value="true" />
+ <option name="push" value="test_microdroid_vendor_image.img->/data/local/tmp/cts/microdroid/test_microdroid_vendor_image.img" />
+ </target_preparer>
<test class="com.android.tradefed.testtype.AndroidJUnitTest" >
<option name="package" value="com.android.microdroid.test" />
<option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index 5ec4ca8..f6dc1b8 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -1975,8 +1975,10 @@
.isEqualTo(OsConstants.S_IRUSR | OsConstants.S_IXUSR);
}
- // Taken from bionic/libs/kernel/uapi/linux/mounth.h.
+ // Taken from bionic/libc/kernel/uapi/linux/mount.h
+ private static final int MS_RDONLY = 1;
private static final int MS_NOEXEC = 8;
+ private static final int MS_NOATIME = 1024;
@Test
@CddTest(requirements = {"9.17/C-1-5"})
@@ -2050,6 +2052,85 @@
}
}
+ @Test
+ public void configuringVendorDiskImageRequiresCustomPermission() throws Exception {
+ assumeSupportedDevice();
+
+ File vendorDiskImage =
+ new File("/data/local/tmp/cts/microdroid/test_microdroid_vendor_image.img");
+ VirtualMachineConfig config =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setVendorDiskImage(vendorDiskImage)
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .build();
+
+ VirtualMachine vm =
+ forceCreateNewVirtualMachine("test_vendor_image_req_custom_permission", config);
+
+ SecurityException e =
+ assertThrows(
+ SecurityException.class, () -> runVmTestService(TAG, vm, (ts, tr) -> {}));
+ assertThat(e)
+ .hasMessageThat()
+ .contains("android.permission.USE_CUSTOM_VIRTUAL_MACHINE permission");
+ }
+
+ @Test
+ public void bootsWithVendorPartition() throws Exception {
+ assumeSupportedDevice();
+
+ grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
+
+ File vendorDiskImage =
+ new File("/data/local/tmp/cts/microdroid/test_microdroid_vendor_image.img");
+ VirtualMachineConfig config =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setVendorDiskImage(vendorDiskImage)
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .build();
+
+ VirtualMachine vm = forceCreateNewVirtualMachine("test_boot_with_vendor", config);
+
+ TestResults testResults =
+ runVmTestService(
+ TAG,
+ vm,
+ (ts, tr) -> {
+ tr.mMountFlags = ts.getMountFlags("/vendor");
+ });
+
+ assertThat(testResults.mException).isNull();
+ int expectedFlags = MS_NOATIME | MS_RDONLY;
+ assertThat(testResults.mMountFlags & expectedFlags).isEqualTo(expectedFlags);
+ }
+
+ @Test
+ public void systemPartitionMountFlags() throws Exception {
+ assumeSupportedDevice();
+
+ VirtualMachineConfig config =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .build();
+
+ VirtualMachine vm = forceCreateNewVirtualMachine("test_system_mount_flags", config);
+
+ TestResults testResults =
+ runVmTestService(
+ TAG,
+ vm,
+ (ts, tr) -> {
+ tr.mMountFlags = ts.getMountFlags("/");
+ });
+
+ assertThat(testResults.mException).isNull();
+ int expectedFlags = MS_NOATIME | MS_RDONLY;
+ assertThat(testResults.mMountFlags & expectedFlags).isEqualTo(expectedFlags);
+ }
+
private static class VmShareServiceConnection implements ServiceConnection {
private final CountDownLatch mLatch = new CountDownLatch(1);
diff --git a/tests/vendor_images/Android.bp b/tests/vendor_images/Android.bp
new file mode 100644
index 0000000..09c657c
--- /dev/null
+++ b/tests/vendor_images/Android.bp
@@ -0,0 +1,9 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+android_filesystem {
+ name: "test_microdroid_vendor_image",
+ type: "ext4",
+ file_contexts: ":microdroid_vendor_file_contexts.gen",
+}
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index dd74d55..d0a8e85 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -20,7 +20,7 @@
use crate::composite::make_composite_image;
use crate::crosvm::{CrosvmConfig, DiskFile, PayloadState, VmContext, VmInstance, VmState};
use crate::debug_config::DebugConfig;
-use crate::payload::{add_microdroid_payload_images, add_microdroid_system_images};
+use crate::payload::{add_microdroid_payload_images, add_microdroid_system_images, add_microdroid_vendor_image};
use crate::selinux::{getfilecon, SeContext};
use android_os_permissions_aidl::aidl::android::os::IPermissionController;
use android_system_virtualizationcommon::aidl::android::system::virtualizationcommon::{
@@ -579,6 +579,15 @@
Ok(DiskFile { image, writable: disk.writable })
}
+fn append_kernel_param(param: &str, vm_config: &mut VirtualMachineRawConfig) {
+ if let Some(ref mut params) = vm_config.params {
+ params.push(' ');
+ params.push_str(param)
+ } else {
+ vm_config.params = Some(param.to_owned())
+ }
+}
+
fn load_app_config(
config: &VirtualMachineAppConfig,
debug_config: &DebugConfig,
@@ -620,6 +629,11 @@
}
vm_config.taskProfiles = custom_config.taskProfiles.clone();
vm_config.gdbPort = custom_config.gdbPort;
+
+ if let Some(file) = custom_config.vendorImage.as_ref() {
+ add_microdroid_vendor_image(clone_file(file)?, &mut vm_config);
+ append_kernel_param("androidboot.microdroid.mount_vendor=1", &mut vm_config)
+ }
}
if config.memoryMib > 0 {
@@ -1349,4 +1363,19 @@
assert!(modified_orig == modified_new, "idsig file was updated unnecessarily");
Ok(())
}
+
+ #[test]
+ fn test_append_kernel_param_first_param() {
+ let mut vm_config = VirtualMachineRawConfig { ..Default::default() };
+ append_kernel_param("foo=1", &mut vm_config);
+ assert_eq!(vm_config.params, Some("foo=1".to_owned()))
+ }
+
+ #[test]
+ fn test_append_kernel_param() {
+ let mut vm_config =
+ VirtualMachineRawConfig { params: Some("foo=5".to_owned()), ..Default::default() };
+ append_kernel_param("bar=42", &mut vm_config);
+ assert_eq!(vm_config.params, Some("foo=5 bar=42".to_owned()))
+ }
}
diff --git a/virtualizationmanager/src/payload.rs b/virtualizationmanager/src/payload.rs
index 733add6..ab6f31c 100644
--- a/virtualizationmanager/src/payload.rs
+++ b/virtualizationmanager/src/payload.rs
@@ -398,6 +398,18 @@
.collect()
}
+pub fn add_microdroid_vendor_image(vendor_image: File, vm_config: &mut VirtualMachineRawConfig) {
+ vm_config.disks.push(DiskImage {
+ image: None,
+ writable: false,
+ partitions: vec![Partition {
+ label: "microdroid-vendor".to_owned(),
+ image: Some(ParcelFileDescriptor::new(vendor_image)),
+ writable: false,
+ }],
+ })
+}
+
pub fn add_microdroid_system_images(
config: &VirtualMachineAppConfig,
instance_file: File,
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
index 6a0bf7c..2b762c4 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
@@ -105,6 +105,9 @@
* List of task profile names to apply for the VM
*/
String[] taskProfiles;
+
+ /** A disk image containing vendor specific modules. */
+ @nullable ParcelFileDescriptor vendorImage;
}
/** Configuration parameters guarded by android.permission.USE_CUSTOM_VIRTUAL_MACHINE */
diff --git a/vm/src/main.rs b/vm/src/main.rs
index 0800f57..d7c2c4d 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -115,6 +115,10 @@
/// Path to custom kernel image to use when booting Microdroid.
#[clap(long)]
kernel: Option<PathBuf>,
+
+ /// Path to disk image containing vendor-specific modules.
+ #[clap(long)]
+ vendor: Option<PathBuf>,
},
/// Run a virtual machine with Microdroid inside
RunMicrodroid {
@@ -179,6 +183,10 @@
/// Path to custom kernel image to use when booting Microdroid.
#[clap(long)]
kernel: Option<PathBuf>,
+
+ /// Path to disk image containing vendor-specific modules.
+ #[clap(long)]
+ vendor: Option<PathBuf>,
},
/// Run a virtual machine
Run {
@@ -299,6 +307,7 @@
extra_idsigs,
gdb,
kernel,
+ vendor,
} => command_run_app(
name,
get_service()?.as_ref(),
@@ -320,6 +329,7 @@
&extra_idsigs,
gdb,
kernel.as_deref(),
+ vendor.as_deref(),
),
Opt::RunMicrodroid {
name,
@@ -336,6 +346,7 @@
task_profiles,
gdb,
kernel,
+ vendor,
} => command_run_microdroid(
name,
get_service()?.as_ref(),
@@ -352,6 +363,7 @@
task_profiles,
gdb,
kernel.as_deref(),
+ vendor.as_deref(),
),
Opt::Run { name, config, cpu_topology, task_profiles, console, console_in, log, gdb } => {
command_run(
diff --git a/vm/src/run.rs b/vm/src/run.rs
index 84072ca..64da2d9 100644
--- a/vm/src/run.rs
+++ b/vm/src/run.rs
@@ -65,6 +65,7 @@
extra_idsigs: &[PathBuf],
gdb: Option<NonZeroU16>,
kernel: Option<&Path>,
+ vendor: Option<&Path>,
) -> Result<(), Error> {
let apk_file = File::open(apk).context("Failed to open APK file")?;
@@ -122,6 +123,8 @@
let kernel = kernel.map(|p| open_parcel_file(p, false)).transpose()?;
+ let vendor = vendor.map(|p| open_parcel_file(p, false)).transpose()?;
+
let extra_idsig_files: Result<Vec<File>, _> = extra_idsigs.iter().map(File::open).collect();
let extra_idsig_fds = extra_idsig_files?.into_iter().map(ParcelFileDescriptor::new).collect();
@@ -144,6 +147,7 @@
customKernelImage: kernel,
gdbPort: gdb.map(u16::from).unwrap_or(0) as i32, // 0 means no gdb
taskProfiles: task_profiles,
+ vendorImage: vendor,
};
let config = VirtualMachineConfig::AppConfig(VirtualMachineAppConfig {
@@ -203,6 +207,7 @@
task_profiles: Vec<String>,
gdb: Option<NonZeroU16>,
kernel: Option<&Path>,
+ vendor: Option<&Path>,
) -> Result<(), Error> {
let apk = find_empty_payload_apk_path()?;
println!("found path {}", apk.display());
@@ -236,6 +241,7 @@
&extra_sig,
gdb,
kernel,
+ vendor,
)
}
diff --git a/vmbase/entry.S b/vmbase/entry.S
index 9f6993a..9177a4a 100644
--- a/vmbase/entry.S
+++ b/vmbase/entry.S
@@ -63,72 +63,6 @@
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
-/* SMC function IDs */
-.set .L_SMCCC_VERSION_ID, 0x80000000
-.set .L_SMCCC_TRNG_VERSION_ID, 0x84000050
-.set .L_SMCCC_TRNG_FEATURES_ID, 0x84000051
-.set .L_SMCCC_TRNG_RND64_ID, 0xc4000053
-
-/* SMC function versions */
-.set .L_SMCCC_VERSION_1_1, 0x0101
-.set .L_SMCCC_TRNG_VERSION_1_0, 0x0100
-
-/* Bionic-compatible stack protector */
-.section .data.stack_protector, "aw"
-__bionic_tls:
- .zero 40
-.global __stack_chk_guard
-__stack_chk_guard:
- .quad 0
-
-/**
- * This macro stores a random value into a register.
- * If a TRNG backed is not present or if an error occurs, the value remains unchanged.
- */
-.macro rnd_reg reg:req
- mov x20, x0
- mov x21, x1
- mov x22, x2
- mov x23, x3
-
- /* Verify SMCCC version >=1.1 */
- hvc_call .L_SMCCC_VERSION_ID
- cmp w0, 0
- b.lt 100f
- cmp w0, .L_SMCCC_VERSION_1_1
- b.lt 100f
-
- /* Verify TRNG ABI version 1.x */
- hvc_call .L_SMCCC_TRNG_VERSION_ID
- cmp w0, 0
- b.lt 100f
- cmp w0, .L_SMCCC_TRNG_VERSION_1_0
- b.lt 100f
-
- /* Call TRNG_FEATURES, ensure TRNG_RND is implemented */
- mov_i x1, .L_SMCCC_TRNG_RND64_ID
- hvc_call .L_SMCCC_TRNG_FEATURES_ID
- cmp w0, 0
- b.lt 100f
-
- /* Call TRNG_RND, request 64 bits of entropy */
- mov x1, #64
- hvc_call .L_SMCCC_TRNG_RND64_ID
- cmp x0, 0
- b.lt 100f
-
- mov \reg, x3
- b 101f
-
-100:
- reset_or_hang
-101:
- mov x0, x20
- mov x1, x21
- mov x2, x22
- mov x3, x23
-.endm
-
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
@@ -222,18 +156,17 @@
adr x30, vector_table_el1
msr vbar_el1, x30
- /* Set up Bionic-compatible thread-local storage. */
+ /*
+ * Set up Bionic-compatible thread-local storage.
+ *
+ * Note that TPIDR_EL0 can't be configured from rust_entry because the
+ * compiler will dereference it during function entry to access
+ * __stack_chk_guard and Rust doesn't support LLVM's
+ * __attribute__((no_stack_protector)).
+ */
adr_l x30, __bionic_tls
msr tpidr_el0, x30
- /* Randomize stack protector. */
- rnd_reg x29
- adr_l x30, __stack_chk_guard
- str x29, [x30]
-
- /* Write a null byte to the top of the stack guard to act as a string terminator. */
- strb wzr, [x30]
-
/* Call into Rust code. */
bl rust_entry
diff --git a/vmbase/example/src/exceptions.rs b/vmbase/example/src/exceptions.rs
index 0522013..5d7768a 100644
--- a/vmbase/example/src/exceptions.rs
+++ b/vmbase/example/src/exceptions.rs
@@ -14,8 +14,7 @@
//! Exception handlers.
-use core::arch::asm;
-use vmbase::{eprintln, power::reboot};
+use vmbase::{eprintln, power::reboot, read_sysreg};
#[no_mangle]
extern "C" fn sync_exception_current(_elr: u64, _spsr: u64) {
@@ -71,9 +70,6 @@
#[inline]
fn print_esr() {
- let mut esr: u64;
- unsafe {
- asm!("mrs {esr}, esr_el1", esr = out(reg) esr);
- }
+ let esr = read_sysreg!("esr_el1");
eprintln!("esr={:#08x}", esr);
}
diff --git a/vmbase/example/src/layout.rs b/vmbase/example/src/layout.rs
index b032a30..fc578bc 100644
--- a/vmbase/example/src/layout.rs
+++ b/vmbase/example/src/layout.rs
@@ -15,7 +15,6 @@
//! Memory layout.
use aarch64_paging::paging::{MemoryRegion, VirtualAddress};
-use core::arch::asm;
use core::ops::Range;
use log::info;
use vmbase::layout;
@@ -55,13 +54,3 @@
boot_stack.end - boot_stack.start
);
}
-
-/// Bionic-compatible thread-local storage entry, at the given offset from TPIDR_EL0.
-pub fn bionic_tls(off: usize) -> u64 {
- let mut base: usize;
- unsafe {
- asm!("mrs {base}, tpidr_el0", base = out(reg) base);
- let ptr = (base + off) as *const u64;
- *ptr
- }
-}
diff --git a/vmbase/example/src/main.rs b/vmbase/example/src/main.rs
index 4176626..a6f3bfa 100644
--- a/vmbase/example/src/main.rs
+++ b/vmbase/example/src/main.rs
@@ -16,6 +16,8 @@
#![no_main]
#![no_std]
+#![deny(unsafe_op_in_unsafe_fn)]
+#![deny(clippy::undocumented_unsafe_blocks)]
mod exceptions;
mod layout;
@@ -23,7 +25,7 @@
extern crate alloc;
-use crate::layout::{bionic_tls, boot_stack_range, print_addresses, DEVICE_REGION};
+use crate::layout::{boot_stack_range, print_addresses, DEVICE_REGION};
use crate::pci::{check_pci, get_bar_region};
use aarch64_paging::paging::MemoryRegion;
use aarch64_paging::MapError;
@@ -32,9 +34,9 @@
use libfdt::Fdt;
use log::{debug, error, info, trace, warn, LevelFilter};
use vmbase::{
- configure_heap, cstr,
- layout::{dtb_range, rodata_range, scratch_range, stack_chk_guard, text_range},
- logger, main,
+ bionic, configure_heap, cstr,
+ layout::{dtb_range, rodata_range, scratch_range, text_range},
+ linker, logger, main,
memory::{PageTable, SIZE_64KB},
};
@@ -69,7 +71,7 @@
/// Entry point for VM bootloader.
pub fn main(arg0: u64, arg1: u64, arg2: u64, arg3: u64) {
- logger::init(LevelFilter::Debug).unwrap();
+ log::set_max_level(LevelFilter::Debug);
info!("Hello world");
info!("x0={:#018x}, x1={:#018x}, x2={:#018x}, x3={:#018x}", arg0, arg1, arg2, arg3);
@@ -80,8 +82,9 @@
info!("Checking FDT...");
let fdt = dtb_range();
- let fdt =
- unsafe { core::slice::from_raw_parts_mut(fdt.start.0 as *mut u8, fdt.end.0 - fdt.start.0) };
+ let fdt_size = fdt.end.0 - fdt.start.0;
+ // SAFETY: The DTB range is valid, writable memory, and we don't construct any aliases to it.
+ let fdt = unsafe { core::slice::from_raw_parts_mut(fdt.start.0 as *mut u8, fdt_size) };
let fdt = Fdt::from_mut_slice(fdt).unwrap();
info!("FDT passed verification.");
check_fdt(fdt);
@@ -98,6 +101,7 @@
check_data();
check_dice();
+ // SAFETY: This is the only place where `make_pci_root` is called.
let mut pci_root = unsafe { pci_info.make_pci_root() };
check_pci(&mut pci_root);
@@ -105,42 +109,58 @@
}
fn check_stack_guard() {
- const BIONIC_TLS_STACK_GRD_OFF: usize = 40;
-
info!("Testing stack guard");
- assert_eq!(bionic_tls(BIONIC_TLS_STACK_GRD_OFF), stack_chk_guard());
+ // SAFETY: No concurrency issue should occur when running these tests.
+ let stack_guard = unsafe { bionic::TLS.stack_guard };
+ assert_ne!(stack_guard, 0);
+ // Check that a NULL-terminating value is added for C functions consuming strings from stack.
+ assert_eq!(stack_guard.to_ne_bytes().last(), Some(&0));
+ // Check that the TLS and guard are properly accessible from the dedicated register.
+ assert_eq!(stack_guard, bionic::__get_tls().stack_guard);
+ // Check that the LLVM __stack_chk_guard alias is also properly set up.
+ assert_eq!(
+ stack_guard,
+ // SAFETY: No concurrency issue should occur when running these tests.
+ unsafe { linker::__stack_chk_guard },
+ );
}
fn check_data() {
info!("INITIALISED_DATA: {:?}", INITIALISED_DATA.as_ptr());
- unsafe {
- info!("ZEROED_DATA: {:?}", ZEROED_DATA.as_ptr());
- info!("MUTABLE_DATA: {:?}", MUTABLE_DATA.as_ptr());
- }
+ // SAFETY: We only print the addresses of the static mutable variable, not actually access it.
+ info!("ZEROED_DATA: {:?}", unsafe { ZEROED_DATA.as_ptr() });
+ // SAFETY: We only print the addresses of the static mutable variable, not actually access it.
+ info!("MUTABLE_DATA: {:?}", unsafe { MUTABLE_DATA.as_ptr() });
assert_eq!(INITIALISED_DATA[0], 1);
assert_eq!(INITIALISED_DATA[1], 2);
assert_eq!(INITIALISED_DATA[2], 3);
assert_eq!(INITIALISED_DATA[3], 4);
- unsafe {
- for element in ZEROED_DATA.iter() {
- assert_eq!(*element, 0);
- }
- ZEROED_DATA[0] = 13;
- assert_eq!(ZEROED_DATA[0], 13);
- ZEROED_DATA[0] = 0;
- assert_eq!(ZEROED_DATA[0], 0);
+ // SAFETY: Nowhere else in the program accesses this static mutable variable, so there is no
+ // chance of concurrent access.
+ let zeroed_data = unsafe { &mut ZEROED_DATA };
+ // SAFETY: Nowhere else in the program accesses this static mutable variable, so there is no
+ // chance of concurrent access.
+ let mutable_data = unsafe { &mut MUTABLE_DATA };
- assert_eq!(MUTABLE_DATA[0], 1);
- assert_eq!(MUTABLE_DATA[1], 2);
- assert_eq!(MUTABLE_DATA[2], 3);
- assert_eq!(MUTABLE_DATA[3], 4);
- MUTABLE_DATA[0] += 41;
- assert_eq!(MUTABLE_DATA[0], 42);
- MUTABLE_DATA[0] -= 41;
- assert_eq!(MUTABLE_DATA[0], 1);
+ for element in zeroed_data.iter() {
+ assert_eq!(*element, 0);
}
+ zeroed_data[0] = 13;
+ assert_eq!(zeroed_data[0], 13);
+ zeroed_data[0] = 0;
+ assert_eq!(zeroed_data[0], 0);
+
+ assert_eq!(mutable_data[0], 1);
+ assert_eq!(mutable_data[1], 2);
+ assert_eq!(mutable_data[2], 3);
+ assert_eq!(mutable_data[3], 4);
+ mutable_data[0] += 41;
+ assert_eq!(mutable_data[0], 42);
+ mutable_data[0] -= 41;
+ assert_eq!(mutable_data[0], 1);
+
info!("Data looks good");
}
diff --git a/vmbase/example/src/pci.rs b/vmbase/example/src/pci.rs
index 6d33215..26bc29b 100644
--- a/vmbase/example/src/pci.rs
+++ b/vmbase/example/src/pci.rs
@@ -120,11 +120,21 @@
struct HalImpl;
+/// SAFETY: See the 'Implementation Safety' comments on methods below for how they fulfill the
+/// safety requirements of the unsafe `Hal` trait.
unsafe impl Hal for HalImpl {
+ /// # Implementation Safety
+ ///
+ /// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
+ /// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
+ /// block of memory using `alloc_zeroed`, which is guaranteed to allocate valid, unique and
+ /// zeroed memory. We request an alignment of at least `PAGE_SIZE` from `alloc_zeroed`.
fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
debug!("dma_alloc: pages={}", pages);
- let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
- // Safe because the layout has a non-zero size.
+ let layout =
+ Layout::from_size_align(pages.checked_mul(PAGE_SIZE).unwrap(), PAGE_SIZE).unwrap();
+ assert_ne!(layout.size(), 0);
+ // SAFETY: We just checked that the layout has a non-zero size.
let vaddr = unsafe { alloc_zeroed(layout) };
let vaddr =
if let Some(vaddr) = NonNull::new(vaddr) { vaddr } else { handle_alloc_error(layout) };
@@ -135,14 +145,19 @@
unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
- // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
- // the layout is the same as was used then.
+ // SAFETY: The memory was allocated by `dma_alloc` above using the same allocator, and the
+ // layout is the same as was used then.
unsafe {
dealloc(vaddr.as_ptr(), layout);
}
0
}
+ /// # Implementation Safety
+ ///
+ /// The returned pointer must be valid because the `paddr` describes a valid MMIO region, and we
+ /// previously mapped the entire PCI MMIO range. It can't alias any other allocations because
+ /// the PCI MMIO range doesn't overlap with any other memory ranges.
unsafe fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
NonNull::new(paddr as _).unwrap()
}
diff --git a/vmbase/sections.ld b/vmbase/sections.ld
index 5232d30..c7ef0ec 100644
--- a/vmbase/sections.ld
+++ b/vmbase/sections.ld
@@ -107,6 +107,9 @@
. = init_stack_pointer;
} >writable_data
+ /* Make our Bionic stack protector compatible with mainline LLVM */
+ __stack_chk_guard = __bionic_tls + 40;
+
/*
* Remove unused sections from the image.
*/
diff --git a/vmbase/src/bionic.rs b/vmbase/src/bionic.rs
index 5af9ebc..2ce0e83 100644
--- a/vmbase/src/bionic.rs
+++ b/vmbase/src/bionic.rs
@@ -23,9 +23,36 @@
use crate::console;
use crate::eprintln;
+use crate::read_sysreg;
const EOF: c_int = -1;
+/// Bionic thread-local storage.
+#[repr(C)]
+pub struct Tls {
+ /// Unused.
+ _unused: [u8; 40],
+ /// Use by the compiler as stack canary value.
+ pub stack_guard: u64,
+}
+
+/// Bionic TLS.
+///
+/// Provides the TLS used by Bionic code. This is unique as vmbase only supports one thread.
+///
+/// Note that the linker script re-exports __bionic_tls.stack_guard as __stack_chk_guard for
+/// compatibility with non-Bionic LLVM.
+#[link_section = ".data.stack_protector"]
+#[export_name = "__bionic_tls"]
+pub static mut TLS: Tls = Tls { _unused: [0; 40], stack_guard: 0 };
+
+/// Gets a reference to the TLS from the dedicated system register.
+pub fn __get_tls() -> &'static mut Tls {
+ let tpidr = read_sysreg!("tpidr_el0");
+ // SAFETY: The register is currently only written to once, from entry.S, with a valid value.
+ unsafe { &mut *(tpidr as *mut Tls) }
+}
+
#[no_mangle]
extern "C" fn __stack_chk_fail() -> ! {
panic!("stack guard check failed");
diff --git a/vmbase/src/entry.rs b/vmbase/src/entry.rs
index 0a96d86..24b5035 100644
--- a/vmbase/src/entry.rs
+++ b/vmbase/src/entry.rs
@@ -14,14 +14,51 @@
//! Rust entry point.
-use crate::{console, heap, power::shutdown};
+use crate::{
+ bionic, console, heap, logger,
+ power::{reboot, shutdown},
+ rand,
+};
+use core::mem::size_of;
+use hyp::{self, get_mmio_guard};
+
+fn try_console_init() -> Result<(), hyp::Error> {
+ console::init();
+
+ if let Some(mmio_guard) = get_mmio_guard() {
+ mmio_guard.init()?;
+ mmio_guard.map(console::BASE_ADDRESS)?;
+ }
+
+ Ok(())
+}
/// This is the entry point to the Rust code, called from the binary entry point in `entry.S`.
#[no_mangle]
extern "C" fn rust_entry(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
// SAFETY: Only called once, from here, and inaccessible to client code.
unsafe { heap::init() };
- console::init();
+
+ if try_console_init().is_err() {
+ // Don't panic (or log) here to avoid accessing the console.
+ reboot()
+ }
+
+ logger::init().expect("Failed to initialize the logger");
+ // We initialize the logger to Off (like the log crate) and clients should log::set_max_level.
+
+ const SIZE_OF_STACK_GUARD: usize = size_of::<u64>();
+ let mut stack_guard = [0u8; SIZE_OF_STACK_GUARD];
+ // We keep a null byte at the top of the stack guard to act as a string terminator.
+ let random_guard = &mut stack_guard[..(SIZE_OF_STACK_GUARD - 1)];
+
+ rand::init().expect("Failed to initialize a source of entropy");
+ rand::fill_with_entropy(random_guard).expect("Failed to get stack canary entropy");
+ bionic::__get_tls().stack_guard = u64::from_ne_bytes(stack_guard);
+
+ // Note: If rust_entry ever returned (which it shouldn't by being -> !), the compiler-injected
+ // stack guard comparison would detect a mismatch and call __stack_chk_fail.
+
// SAFETY: `main` is provided by the application using the `main!` macro, and we make sure it
// has the right type.
unsafe {
@@ -37,16 +74,21 @@
/// Marks the main function of the binary.
///
+/// Once main is entered, it can assume that:
+/// - The panic_handler has been configured and panic!() and friends are available;
+/// - The global_allocator has been configured and heap memory is available;
+/// - The logger has been configured and the log::{info, warn, error, ...} macros are available.
+///
/// Example:
///
/// ```rust
-/// use vmbase::{logger, main};
+/// use vmbase::main;
/// use log::{info, LevelFilter};
///
/// main!(my_main);
///
/// fn my_main() {
-/// logger::init(LevelFilter::Info).unwrap();
+/// log::set_max_level(LevelFilter::Info);
/// info!("Hello world");
/// }
/// ```
diff --git a/vmbase/src/exceptions.rs b/vmbase/src/exceptions.rs
new file mode 100644
index 0000000..7833334
--- /dev/null
+++ b/vmbase/src/exceptions.rs
@@ -0,0 +1,139 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Helper functions and structs for exception handlers.
+
+use crate::{
+ console, eprintln,
+ memory::{page_4kb_of, MemoryTrackerError},
+ read_sysreg,
+};
+use aarch64_paging::paging::VirtualAddress;
+use core::fmt;
+
+const UART_PAGE: usize = page_4kb_of(console::BASE_ADDRESS);
+
+/// Represents an error that can occur while handling an exception.
+#[derive(Debug)]
+pub enum HandleExceptionError {
+ /// The page table is unavailable.
+ PageTableUnavailable,
+ /// The page table has not been initialized.
+ PageTableNotInitialized,
+ /// An internal error occurred in the memory tracker.
+ InternalError(MemoryTrackerError),
+ /// An unknown exception occurred.
+ UnknownException,
+}
+
+impl From<MemoryTrackerError> for HandleExceptionError {
+ fn from(other: MemoryTrackerError) -> Self {
+ Self::InternalError(other)
+ }
+}
+
+impl fmt::Display for HandleExceptionError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Self::PageTableUnavailable => write!(f, "Page table is not available."),
+ Self::PageTableNotInitialized => write!(f, "Page table is not initialized."),
+ Self::InternalError(e) => write!(f, "Error while updating page table: {e}"),
+ Self::UnknownException => write!(f, "An unknown exception occurred, not handled."),
+ }
+ }
+}
+
+/// Represents the possible types of exception syndrome register (ESR) values.
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum Esr {
+ /// Data abort due to translation fault.
+ DataAbortTranslationFault,
+ /// Data abort due to permission fault.
+ DataAbortPermissionFault,
+ /// Data abort due to a synchronous external abort.
+ DataAbortSyncExternalAbort,
+ /// An unknown ESR value.
+ Unknown(usize),
+}
+
+impl Esr {
+ const EXT_DABT_32BIT: usize = 0x96000010;
+ const TRANSL_FAULT_BASE_32BIT: usize = 0x96000004;
+ const TRANSL_FAULT_ISS_MASK_32BIT: usize = !0x143;
+ const PERM_FAULT_BASE_32BIT: usize = 0x9600004C;
+ const PERM_FAULT_ISS_MASK_32BIT: usize = !0x103;
+}
+
+impl From<usize> for Esr {
+ fn from(esr: usize) -> Self {
+ if esr == Self::EXT_DABT_32BIT {
+ Self::DataAbortSyncExternalAbort
+ } else if esr & Self::TRANSL_FAULT_ISS_MASK_32BIT == Self::TRANSL_FAULT_BASE_32BIT {
+ Self::DataAbortTranslationFault
+ } else if esr & Self::PERM_FAULT_ISS_MASK_32BIT == Self::PERM_FAULT_BASE_32BIT {
+ Self::DataAbortPermissionFault
+ } else {
+ Self::Unknown(esr)
+ }
+ }
+}
+
+impl fmt::Display for Esr {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Self::DataAbortSyncExternalAbort => write!(f, "Synchronous external abort"),
+ Self::DataAbortTranslationFault => write!(f, "Translation fault"),
+ Self::DataAbortPermissionFault => write!(f, "Permission fault"),
+ Self::Unknown(v) => write!(f, "Unknown exception esr={v:#08x}"),
+ }
+ }
+}
+/// A struct representing an Armv8 exception.
+pub struct ArmException {
+ /// The value of the exception syndrome register.
+ pub esr: Esr,
+ /// The faulting virtual address read from the fault address register.
+ pub far: VirtualAddress,
+}
+
+impl fmt::Display for ArmException {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "ArmException: esr={}, far={}", self.esr, self.far)
+ }
+}
+
+impl ArmException {
+ /// Reads the values of the EL1 exception syndrome register (`esr_el1`)
+ /// and fault address register (`far_el1`) and returns a new instance of
+ /// `ArmException` with these values.
+ pub fn from_el1_regs() -> Self {
+ let esr: Esr = read_sysreg!("esr_el1").into();
+ let far = read_sysreg!("far_el1");
+ Self { esr, far: VirtualAddress(far) }
+ }
+
+ /// Prints the details of an obj and the exception, excluding UART exceptions.
+ pub fn print<T: fmt::Display>(&self, exception_name: &str, obj: T, elr: u64) {
+ // Don't print to the UART if we are handling an exception it could raise.
+ if !self.is_uart_exception() {
+ eprintln!("{exception_name}");
+ eprintln!("{obj}");
+ eprintln!("{}, elr={:#08x}", self, elr);
+ }
+ }
+
+ fn is_uart_exception(&self) -> bool {
+ self.esr == Esr::DataAbortSyncExternalAbort && page_4kb_of(self.far.0) == UART_PAGE
+ }
+}
diff --git a/vmbase/src/hvc.rs b/vmbase/src/hvc.rs
index 9a5e716..ebd1625 100644
--- a/vmbase/src/hvc.rs
+++ b/vmbase/src/hvc.rs
@@ -22,20 +22,19 @@
};
const ARM_SMCCC_TRNG_VERSION: u32 = 0x8400_0050;
-#[allow(dead_code)]
const ARM_SMCCC_TRNG_FEATURES: u32 = 0x8400_0051;
#[allow(dead_code)]
const ARM_SMCCC_TRNG_GET_UUID: u32 = 0x8400_0052;
#[allow(dead_code)]
const ARM_SMCCC_TRNG_RND32: u32 = 0x8400_0053;
-const ARM_SMCCC_TRNG_RND64: u32 = 0xc400_0053;
+pub const ARM_SMCCC_TRNG_RND64: u32 = 0xc400_0053;
/// Returns the (major, minor) version tuple, as defined by the SMCCC TRNG.
-pub fn trng_version() -> trng::Result<(u16, u16)> {
+pub fn trng_version() -> trng::Result<trng::Version> {
let args = [0u64; 17];
let version = positive_or_error_64::<Error>(hvc64(ARM_SMCCC_TRNG_VERSION, args)[0])?;
- Ok(((version >> 16) as u16, version as u16))
+ (version as u32 as i32).try_into()
}
pub type TrngRng64Entropy = (u64, u64, u64);
@@ -49,3 +48,10 @@
Ok((regs[1], regs[2], regs[3]))
}
+
+pub fn trng_features(fid: u32) -> trng::Result<u64> {
+ let mut args = [0u64; 17];
+ args[0] = fid as u64;
+
+ positive_or_error_64::<Error>(hvc64(ARM_SMCCC_TRNG_FEATURES, args)[0])
+}
diff --git a/vmbase/src/hvc/trng.rs b/vmbase/src/hvc/trng.rs
index 6331d66..efb86f6 100644
--- a/vmbase/src/hvc/trng.rs
+++ b/vmbase/src/hvc/trng.rs
@@ -16,7 +16,7 @@
use core::result;
/// Standard SMCCC TRNG error values as described in DEN 0098 1.0 REL0.
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
pub enum Error {
/// The call is not supported by the implementation.
NotSupported,
@@ -55,3 +55,40 @@
}
pub type Result<T> = result::Result<T, Error>;
+
+/// A version of the SMCCC TRNG interface.
+#[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
+pub struct Version {
+ pub major: u16,
+ pub minor: u16,
+}
+
+impl fmt::Display for Version {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}.{}", self.major, self.minor)
+ }
+}
+
+impl fmt::Debug for Version {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl TryFrom<i32> for Version {
+ type Error = Error;
+
+ fn try_from(value: i32) -> core::result::Result<Self, Error> {
+ if value < 0 {
+ Err((value as i64).into())
+ } else {
+ Ok(Self { major: (value >> 16) as u16, minor: value as u16 })
+ }
+ }
+}
+
+impl From<Version> for u32 {
+ fn from(version: Version) -> Self {
+ (u32::from(version.major) << 16) | u32::from(version.minor)
+ }
+}
diff --git a/vmbase/src/lib.rs b/vmbase/src/lib.rs
index e490faa..ca8756d 100644
--- a/vmbase/src/lib.rs
+++ b/vmbase/src/lib.rs
@@ -21,14 +21,15 @@
extern crate alloc;
pub mod arch;
-mod bionic;
+pub mod bionic;
pub mod console;
mod entry;
+pub mod exceptions;
pub mod fdt;
pub mod heap;
mod hvc;
pub mod layout;
-mod linker;
+pub mod linker;
pub mod logger;
pub mod memory;
pub mod power;
diff --git a/vmbase/src/logger.rs b/vmbase/src/logger.rs
index 226d905..9130918 100644
--- a/vmbase/src/logger.rs
+++ b/vmbase/src/logger.rs
@@ -20,7 +20,7 @@
use crate::console::println;
use core::sync::atomic::{AtomicBool, Ordering};
-use log::{LevelFilter, Log, Metadata, Record, SetLoggerError};
+use log::{Log, Metadata, Record, SetLoggerError};
struct Logger {
is_enabled: AtomicBool,
@@ -70,9 +70,8 @@
}
/// Initialize vmbase logger with a given max logging level.
-pub fn init(max_level: LevelFilter) -> Result<(), SetLoggerError> {
+pub(crate) fn init() -> Result<(), SetLoggerError> {
log::set_logger(&LOGGER)?;
- log::set_max_level(max_level);
Ok(())
}
diff --git a/vmbase/src/memory/mod.rs b/vmbase/src/memory/mod.rs
index 5e78565..898aa10 100644
--- a/vmbase/src/memory/mod.rs
+++ b/vmbase/src/memory/mod.rs
@@ -22,7 +22,10 @@
pub use error::MemoryTrackerError;
pub use page_table::PageTable;
-pub use shared::{alloc_shared, dealloc_shared, MemoryRange, MemoryTracker, MEMORY};
+pub use shared::{
+ alloc_shared, dealloc_shared, handle_permission_fault, handle_translation_fault, MemoryRange,
+ MemoryTracker, MEMORY,
+};
pub use util::{
flush, flushed_zeroize, min_dcache_line_size, page_4kb_of, phys_to_virt, virt_to_phys,
PAGE_SIZE, SIZE_128KB, SIZE_2MB, SIZE_4KB, SIZE_4MB, SIZE_64KB,
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 1e29c79..173c0ec 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -19,6 +19,7 @@
use super::page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG};
use super::util::{page_4kb_of, virt_to_phys};
use crate::dsb;
+use crate::exceptions::HandleExceptionError;
use crate::util::RangeExt as _;
use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
@@ -26,11 +27,12 @@
use alloc::vec::Vec;
use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
use core::alloc::Layout;
+use core::mem::size_of;
use core::num::NonZeroUsize;
use core::ops::Range;
use core::ptr::NonNull;
use core::result;
-use hyp::{get_hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
+use hyp::{get_mem_sharer, get_mmio_guard, MMIO_GUARD_GRANULE_SIZE};
use log::{debug, error, trace};
use once_cell::race::OnceBox;
use spin::mutex::SpinMutex;
@@ -178,10 +180,17 @@
return Err(MemoryTrackerError::Full);
}
- self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
- error!("Error during MMIO device mapping: {e}");
- MemoryTrackerError::FailedToMap
- })?;
+ if get_mmio_guard().is_some() {
+ self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
+ error!("Error during lazy MMIO device mapping: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ } else {
+ self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
+ error!("Error during MMIO device mapping: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ }
if self.mmio_regions.try_push(range).is_some() {
return Err(MemoryTrackerError::Full);
@@ -218,10 +227,12 @@
///
/// Note that they are not unmapped from the page table.
pub fn mmio_unmap_all(&mut self) -> Result<()> {
- for range in &self.mmio_regions {
- self.page_table
- .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
- .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
+ if get_mmio_guard().is_some() {
+ for range in &self.mmio_regions {
+ self.page_table
+ .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
+ .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
+ }
}
Ok(())
}
@@ -263,6 +274,19 @@
Ok(())
}
+ /// Initialize the shared heap to use heap memory directly.
+ ///
+ /// When running on "non-protected" hypervisors which permit host direct accesses to guest
+ /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
+ /// dedicated region so this function instructs the shared pool to use the global allocator.
+ pub fn init_heap_shared_pool(&mut self) -> Result<()> {
+ // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
+ // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
+ // without any actual "dynamic memory sharing" taking place and, as such, the granule may
+ // be set to the one of the global_allocator i.e. a byte.
+ self.init_dynamic_shared_pool(size_of::<u8>())
+ }
+
/// Unshares any memory that may have been shared.
pub fn unshare_all_memory(&mut self) {
drop(SHARED_MEMORY.lock().take());
@@ -270,13 +294,14 @@
/// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
/// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
- pub fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
+ fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
let page_start = VirtualAddress(page_4kb_of(addr.0));
let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
+ let mmio_guard = get_mmio_guard().unwrap();
self.page_table
.modify_range(&page_range, &verify_lazy_mapped_block)
.map_err(|_| MemoryTrackerError::InvalidPte)?;
- get_hypervisor().mmio_guard_map(page_start.0)?;
+ mmio_guard.map(page_start.0)?;
// Maps a single device page, breaking up block mappings if necessary.
self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
}
@@ -301,7 +326,7 @@
/// Handles permission fault for read-only blocks by setting writable-dirty state.
/// In general, this should be called from the exception handler when hardware dirty
/// state management is disabled or unavailable.
- pub fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
+ fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
self.page_table
.modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
.map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
@@ -384,11 +409,11 @@
let base = shared.as_ptr() as usize;
let end = base.checked_add(layout.size()).unwrap();
- if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
+ if let Some(mem_sharer) = get_mem_sharer() {
trace!("Sharing memory region {:#x?}", base..end);
for vaddr in (base..end).step_by(self.granule) {
let vaddr = NonNull::new(vaddr as *mut _).unwrap();
- get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+ mem_sharer.share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
}
}
@@ -400,12 +425,12 @@
impl Drop for MemorySharer {
fn drop(&mut self) {
while let Some((base, layout)) = self.frames.pop() {
- if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
+ if let Some(mem_sharer) = get_mem_sharer() {
let end = base.checked_add(layout.size()).unwrap();
trace!("Unsharing memory region {:#x?}", base..end);
for vaddr in (base..end).step_by(self.granule) {
let vaddr = NonNull::new(vaddr as *mut _).unwrap();
- get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+ mem_sharer.unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
}
}
@@ -461,9 +486,25 @@
// Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
// should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
// virt_to_phys here, and just pass page_base instead.
- get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
+ get_mmio_guard().unwrap().unmap(page_base).map_err(|e| {
error!("Error MMIO guard unmapping: {e}");
})?;
}
Ok(())
}
+
+/// Handles a translation fault with the given fault address register (FAR).
+#[inline]
+pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
+ let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
+ let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
+ Ok(memory.handle_mmio_fault(far)?)
+}
+
+/// Handles a permission fault with the given fault address register (FAR).
+#[inline]
+pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
+ let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
+ let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
+ Ok(memory.handle_permission_fault(far)?)
+}
diff --git a/vmbase/src/rand.rs b/vmbase/src/rand.rs
index 26fb51a..6b8d7e0 100644
--- a/vmbase/src/rand.rs
+++ b/vmbase/src/rand.rs
@@ -14,16 +14,29 @@
//! Functions and drivers for obtaining true entropy.
-use crate::hvc;
+use crate::hvc::{self, TrngRng64Entropy};
use core::fmt;
use core::mem::size_of;
+use smccc::{self, Hvc};
/// Error type for rand operations.
pub enum Error {
+ /// No source of entropy found.
+ NoEntropySource,
+ /// Error during architectural SMCCC call.
+ Smccc(smccc::arch::Error),
/// Error during SMCCC TRNG call.
Trng(hvc::trng::Error),
+ /// Unsupported SMCCC version.
+ UnsupportedSmcccVersion(smccc::arch::Version),
/// Unsupported SMCCC TRNG version.
- UnsupportedVersion((u16, u16)),
+ UnsupportedTrngVersion(hvc::trng::Version),
+}
+
+impl From<smccc::arch::Error> for Error {
+ fn from(e: smccc::arch::Error) -> Self {
+ Self::Smccc(e)
+ }
}
impl From<hvc::trng::Error> for Error {
@@ -38,10 +51,11 @@
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
+ Self::NoEntropySource => write!(f, "No source of entropy available"),
+ Self::Smccc(e) => write!(f, "Architectural SMCCC error: {e}"),
Self::Trng(e) => write!(f, "SMCCC TRNG error: {e}"),
- Self::UnsupportedVersion((x, y)) => {
- write!(f, "Unsupported SMCCC TRNG version v{x}.{y}")
- }
+ Self::UnsupportedSmcccVersion(v) => write!(f, "Unsupported SMCCC version {v}"),
+ Self::UnsupportedTrngVersion(v) => write!(f, "Unsupported SMCCC TRNG version {v}"),
}
}
}
@@ -53,15 +67,35 @@
}
/// Configure the source of entropy.
-pub fn init() -> Result<()> {
- match hvc::trng_version()? {
- (1, _) => Ok(()),
- version => Err(Error::UnsupportedVersion(version)),
+pub(crate) fn init() -> Result<()> {
+ // SMCCC TRNG requires SMCCC v1.1.
+ match smccc::arch::version::<Hvc>()? {
+ smccc::arch::Version { major: 1, minor } if minor >= 1 => (),
+ version => return Err(Error::UnsupportedSmcccVersion(version)),
}
+
+ // TRNG_RND requires SMCCC TRNG v1.0.
+ match hvc::trng_version()? {
+ hvc::trng::Version { major: 1, minor: _ } => (),
+ version => return Err(Error::UnsupportedTrngVersion(version)),
+ }
+
+ // TRNG_RND64 doesn't define any special capabilities so ignore the successful result.
+ let _ = hvc::trng_features(hvc::ARM_SMCCC_TRNG_RND64).map_err(|e| {
+ if e == hvc::trng::Error::NotSupported {
+ // SMCCC TRNG is currently our only source of entropy.
+ Error::NoEntropySource
+ } else {
+ e.into()
+ }
+ })?;
+
+ Ok(())
}
-fn fill_with_entropy(s: &mut [u8]) -> Result<()> {
- const MAX_BYTES_PER_CALL: usize = size_of::<hvc::TrngRng64Entropy>();
+/// Fills a slice of bytes with true entropy.
+pub fn fill_with_entropy(s: &mut [u8]) -> Result<()> {
+ const MAX_BYTES_PER_CALL: usize = size_of::<TrngRng64Entropy>();
let (aligned, remainder) = s.split_at_mut(s.len() - s.len() % MAX_BYTES_PER_CALL);
@@ -89,13 +123,14 @@
Ok(())
}
-fn repeat_trng_rnd(n_bytes: usize) -> hvc::trng::Result<hvc::TrngRng64Entropy> {
+fn repeat_trng_rnd(n_bytes: usize) -> Result<TrngRng64Entropy> {
let bits = usize::try_from(u8::BITS).unwrap();
let n_bits = (n_bytes * bits).try_into().unwrap();
loop {
match hvc::trng_rnd64(n_bits) {
- Err(hvc::trng::Error::NoEntropy) => continue,
- res => return res,
+ Ok(entropy) => return Ok(entropy),
+ Err(hvc::trng::Error::NoEntropy) => (),
+ Err(e) => return Err(e.into()),
}
}
}