Merge "[vmbase] Move current exception handling helper functions to vmbase" into main
diff --git a/libs/hyp/Android.bp b/libs/hyp/Android.bp
index 1bb8722..8baf9dd 100644
--- a/libs/hyp/Android.bp
+++ b/libs/hyp/Android.bp
@@ -8,7 +8,6 @@
     srcs: ["src/lib.rs"],
     prefer_rlib: true,
     rustlibs: [
-        "libbitflags",
         "libonce_cell_nostd",
         "libsmccc",
         "libuuid_nostd",
diff --git a/libs/hyp/src/hypervisor/common.rs b/libs/hyp/src/hypervisor/common.rs
index ec7d168..7c030a1 100644
--- a/libs/hyp/src/hypervisor/common.rs
+++ b/libs/hyp/src/hypervisor/common.rs
@@ -16,47 +16,49 @@
 
 use crate::error::Result;
 use crate::util::SIZE_4KB;
-use bitflags::bitflags;
 
 /// Expected MMIO guard granule size, validated during MMIO guard initialization.
 pub const MMIO_GUARD_GRANULE_SIZE: usize = SIZE_4KB;
 
-bitflags! {
-    /// Capabilities that Hypervisor backends can declare support for.
-    pub struct HypervisorCap: u32 {
-        /// Capability for guest to share its memory with host at runtime.
-        const DYNAMIC_MEM_SHARE = 0b1;
+/// Trait for the hypervisor.
+pub trait Hypervisor {
+    /// Returns the hypervisor's MMIO_GUARD implementation, if any.
+    fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+        None
+    }
+
+    /// Returns the hypervisor's dynamic memory sharing implementation, if any.
+    fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+        None
     }
 }
 
-/// Trait for the hypervisor.
-pub trait Hypervisor {
+pub trait MmioGuardedHypervisor {
     /// Initializes the hypervisor by enrolling a MMIO guard and checking the memory granule size.
     /// By enrolling, all MMIO will be blocked unless allow-listed with `mmio_guard_map`.
     /// Protected VMs are auto-enrolled.
-    fn mmio_guard_init(&self) -> Result<()>;
+    fn init(&self) -> Result<()>;
 
     /// Maps a page containing the given memory address to the hypervisor MMIO guard.
     /// The page size corresponds to the MMIO guard granule size.
-    fn mmio_guard_map(&self, addr: usize) -> Result<()>;
+    fn map(&self, addr: usize) -> Result<()>;
 
     /// Unmaps a page containing the given memory address from the hypervisor MMIO guard.
     /// The page size corresponds to the MMIO guard granule size.
-    fn mmio_guard_unmap(&self, addr: usize) -> Result<()>;
+    fn unmap(&self, addr: usize) -> Result<()>;
+}
 
+pub trait MemSharingHypervisor {
     /// Shares a region of memory with host, granting it read, write and execute permissions.
     /// The size of the region is equal to the memory protection granule returned by
     /// [`hyp_meminfo`].
-    fn mem_share(&self, base_ipa: u64) -> Result<()>;
+    fn share(&self, base_ipa: u64) -> Result<()>;
 
     /// Revokes access permission from host to a memory region previously shared with
     /// [`mem_share`]. The size of the region is equal to the memory protection granule returned by
     /// [`hyp_meminfo`].
-    fn mem_unshare(&self, base_ipa: u64) -> Result<()>;
+    fn unshare(&self, base_ipa: u64) -> Result<()>;
 
     /// Returns the memory protection granule size in bytes.
-    fn memory_protection_granule(&self) -> Result<usize>;
-
-    /// Check if required capabilities are supported.
-    fn has_cap(&self, cap: HypervisorCap) -> bool;
+    fn granule(&self) -> Result<usize>;
 }
diff --git a/libs/hyp/src/hypervisor/geniezone.rs b/libs/hyp/src/hypervisor/geniezone.rs
index 0741978..24eb89e 100644
--- a/libs/hyp/src/hypervisor/geniezone.rs
+++ b/libs/hyp/src/hypervisor/geniezone.rs
@@ -14,7 +14,9 @@
 
 //! Wrappers around calls to the GenieZone hypervisor.
 
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
+use super::common::{
+    Hypervisor, MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE,
+};
 use crate::error::{Error, Result};
 use crate::util::page_address;
 use core::fmt::{self, Display, Formatter};
@@ -40,7 +42,6 @@
     // and share the same identification along with guest VMs.
     // The previous uuid was removed due to duplication elsewhere.
     pub const UUID: Uuid = uuid!("7e134ed0-3b82-488d-8cee-69c19211dbe7");
-    const CAPABILITIES: HypervisorCap = HypervisorCap::DYNAMIC_MEM_SHARE;
 }
 
 /// Error from a GenieZone HVC call.
@@ -85,7 +86,17 @@
 }
 
 impl Hypervisor for GeniezoneHypervisor {
-    fn mmio_guard_init(&self) -> Result<()> {
+    fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+        Some(self)
+    }
+
+    fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+        Some(self)
+    }
+}
+
+impl MmioGuardedHypervisor for GeniezoneHypervisor {
+    fn init(&self) -> Result<()> {
         mmio_guard_enroll()?;
         let mmio_granule = mmio_guard_granule()?;
         if mmio_granule != MMIO_GUARD_GRANULE_SIZE {
@@ -94,43 +105,41 @@
         Ok(())
     }
 
-    fn mmio_guard_map(&self, addr: usize) -> Result<()> {
+    fn map(&self, addr: usize) -> Result<()> {
         let mut args = [0u64; 17];
         args[0] = page_address(addr);
 
         checked_hvc64_expect_zero(VENDOR_HYP_GZVM_MMIO_GUARD_MAP_FUNC_ID, args)
     }
 
-    fn mmio_guard_unmap(&self, addr: usize) -> Result<()> {
+    fn unmap(&self, addr: usize) -> Result<()> {
         let mut args = [0u64; 17];
         args[0] = page_address(addr);
 
         checked_hvc64_expect_zero(VENDOR_HYP_GZVM_MMIO_GUARD_UNMAP_FUNC_ID, args)
     }
+}
 
-    fn mem_share(&self, base_ipa: u64) -> Result<()> {
+impl MemSharingHypervisor for GeniezoneHypervisor {
+    fn share(&self, base_ipa: u64) -> Result<()> {
         let mut args = [0u64; 17];
         args[0] = base_ipa;
 
         checked_hvc64_expect_zero(ARM_SMCCC_GZVM_FUNC_MEM_SHARE, args)
     }
 
-    fn mem_unshare(&self, base_ipa: u64) -> Result<()> {
+    fn unshare(&self, base_ipa: u64) -> Result<()> {
         let mut args = [0u64; 17];
         args[0] = base_ipa;
 
         checked_hvc64_expect_zero(ARM_SMCCC_GZVM_FUNC_MEM_UNSHARE, args)
     }
 
-    fn memory_protection_granule(&self) -> Result<usize> {
+    fn granule(&self) -> Result<usize> {
         let args = [0u64; 17];
         let granule = checked_hvc64(ARM_SMCCC_GZVM_FUNC_HYP_MEMINFO, args)?;
         Ok(granule.try_into().unwrap())
     }
-
-    fn has_cap(&self, cap: HypervisorCap) -> bool {
-        Self::CAPABILITIES.contains(cap)
-    }
 }
 
 fn mmio_guard_granule() -> Result<usize> {
diff --git a/libs/hyp/src/hypervisor/gunyah.rs b/libs/hyp/src/hypervisor/gunyah.rs
index 252430f..45c01bf 100644
--- a/libs/hyp/src/hypervisor/gunyah.rs
+++ b/libs/hyp/src/hypervisor/gunyah.rs
@@ -1,5 +1,4 @@
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
-use crate::error::Result;
+use super::common::Hypervisor;
 use uuid::{uuid, Uuid};
 
 pub(super) struct GunyahHypervisor;
@@ -8,32 +7,4 @@
     pub const UUID: Uuid = uuid!("c1d58fcd-a453-5fdb-9265-ce36673d5f14");
 }
 
-impl Hypervisor for GunyahHypervisor {
-    fn mmio_guard_init(&self) -> Result<()> {
-        Ok(())
-    }
-
-    fn mmio_guard_map(&self, _addr: usize) -> Result<()> {
-        Ok(())
-    }
-
-    fn mmio_guard_unmap(&self, _addr: usize) -> Result<()> {
-        Ok(())
-    }
-
-    fn mem_share(&self, _base_ipa: u64) -> Result<()> {
-        unimplemented!();
-    }
-
-    fn mem_unshare(&self, _base_ipa: u64) -> Result<()> {
-        unimplemented!();
-    }
-
-    fn memory_protection_granule(&self) -> Result<usize> {
-        Ok(MMIO_GUARD_GRANULE_SIZE)
-    }
-
-    fn has_cap(&self, _cap: HypervisorCap) -> bool {
-        false
-    }
-}
+impl Hypervisor for GunyahHypervisor {}
diff --git a/libs/hyp/src/hypervisor/kvm.rs b/libs/hyp/src/hypervisor/kvm.rs
index a89f9b8..a95b8de 100644
--- a/libs/hyp/src/hypervisor/kvm.rs
+++ b/libs/hyp/src/hypervisor/kvm.rs
@@ -14,7 +14,9 @@
 
 //! Wrappers around calls to the KVM hypervisor.
 
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
+use super::common::{
+    Hypervisor, MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE,
+};
 use crate::error::{Error, Result};
 use crate::util::page_address;
 use core::fmt::{self, Display, Formatter};
@@ -70,17 +72,30 @@
 const VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID: u32 = 0xc6000007;
 const VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID: u32 = 0xc6000008;
 
-pub(super) struct KvmHypervisor;
+pub(super) struct RegularKvmHypervisor;
 
-impl KvmHypervisor {
+impl RegularKvmHypervisor {
     // Based on ARM_SMCCC_VENDOR_HYP_UID_KVM_REG values listed in Linux kernel source:
     // https://github.com/torvalds/linux/blob/master/include/linux/arm-smccc.h
     pub(super) const UUID: Uuid = uuid!("28b46fb6-2ec5-11e9-a9ca-4b564d003a74");
-    const CAPABILITIES: HypervisorCap = HypervisorCap::DYNAMIC_MEM_SHARE;
 }
 
-impl Hypervisor for KvmHypervisor {
-    fn mmio_guard_init(&self) -> Result<()> {
+impl Hypervisor for RegularKvmHypervisor {}
+
+pub(super) struct ProtectedKvmHypervisor;
+
+impl Hypervisor for ProtectedKvmHypervisor {
+    fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+        Some(self)
+    }
+
+    fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+        Some(self)
+    }
+}
+
+impl MmioGuardedHypervisor for ProtectedKvmHypervisor {
+    fn init(&self) -> Result<()> {
         mmio_guard_enroll()?;
         let mmio_granule = mmio_guard_granule()?;
         if mmio_granule != MMIO_GUARD_GRANULE_SIZE {
@@ -89,7 +104,7 @@
         Ok(())
     }
 
-    fn mmio_guard_map(&self, addr: usize) -> Result<()> {
+    fn map(&self, addr: usize) -> Result<()> {
         let mut args = [0u64; 17];
         args[0] = page_address(addr);
 
@@ -99,7 +114,7 @@
             .map_err(|e| Error::KvmError(e, VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID))
     }
 
-    fn mmio_guard_unmap(&self, addr: usize) -> Result<()> {
+    fn unmap(&self, addr: usize) -> Result<()> {
         let mut args = [0u64; 17];
         args[0] = page_address(addr);
 
@@ -110,30 +125,28 @@
             Err(e) => Err(Error::KvmError(e, VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID)),
         }
     }
+}
 
-    fn mem_share(&self, base_ipa: u64) -> Result<()> {
+impl MemSharingHypervisor for ProtectedKvmHypervisor {
+    fn share(&self, base_ipa: u64) -> Result<()> {
         let mut args = [0u64; 17];
         args[0] = base_ipa;
 
         checked_hvc64_expect_zero(ARM_SMCCC_KVM_FUNC_MEM_SHARE, args)
     }
 
-    fn mem_unshare(&self, base_ipa: u64) -> Result<()> {
+    fn unshare(&self, base_ipa: u64) -> Result<()> {
         let mut args = [0u64; 17];
         args[0] = base_ipa;
 
         checked_hvc64_expect_zero(ARM_SMCCC_KVM_FUNC_MEM_UNSHARE, args)
     }
 
-    fn memory_protection_granule(&self) -> Result<usize> {
+    fn granule(&self) -> Result<usize> {
         let args = [0u64; 17];
         let granule = checked_hvc64(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO, args)?;
         Ok(granule.try_into().unwrap())
     }
-
-    fn has_cap(&self, cap: HypervisorCap) -> bool {
-        Self::CAPABILITIES.contains(cap)
-    }
 }
 
 fn mmio_guard_granule() -> Result<usize> {
diff --git a/libs/hyp/src/hypervisor/mod.rs b/libs/hyp/src/hypervisor/mod.rs
index 93d53fe..bc9e406 100644
--- a/libs/hyp/src/hypervisor/mod.rs
+++ b/libs/hyp/src/hypervisor/mod.rs
@@ -23,30 +23,31 @@
 
 use crate::error::{Error, Result};
 use alloc::boxed::Box;
-pub use common::Hypervisor;
-pub use common::HypervisorCap;
-pub use common::MMIO_GUARD_GRANULE_SIZE;
+use common::Hypervisor;
+pub use common::{MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE};
 pub use geniezone::GeniezoneError;
 use geniezone::GeniezoneHypervisor;
 use gunyah::GunyahHypervisor;
 pub use kvm::KvmError;
-use kvm::KvmHypervisor;
+use kvm::{ProtectedKvmHypervisor, RegularKvmHypervisor};
 use once_cell::race::OnceBox;
 use smccc::hvc64;
 use uuid::Uuid;
 
 enum HypervisorBackend {
-    Kvm,
+    RegularKvm,
     Gunyah,
     Geniezone,
+    ProtectedKvm,
 }
 
 impl HypervisorBackend {
     fn get_hypervisor(&self) -> &'static dyn Hypervisor {
         match self {
-            Self::Kvm => &KvmHypervisor,
+            Self::RegularKvm => &RegularKvmHypervisor,
             Self::Gunyah => &GunyahHypervisor,
             Self::Geniezone => &GeniezoneHypervisor,
+            Self::ProtectedKvm => &ProtectedKvmHypervisor,
         }
     }
 }
@@ -58,7 +59,16 @@
         match uuid {
             GeniezoneHypervisor::UUID => Ok(HypervisorBackend::Geniezone),
             GunyahHypervisor::UUID => Ok(HypervisorBackend::Gunyah),
-            KvmHypervisor::UUID => Ok(HypervisorBackend::Kvm),
+            RegularKvmHypervisor::UUID => {
+                // Protected KVM has the same UUID so differentiate based on MEM_SHARE.
+                match ProtectedKvmHypervisor.as_mem_sharer().unwrap().granule() {
+                    Ok(_) => Ok(HypervisorBackend::ProtectedKvm),
+                    Err(Error::KvmError(KvmError::NotSupported, _)) => {
+                        Ok(HypervisorBackend::RegularKvm)
+                    }
+                    Err(e) => Err(e),
+                }
+            }
             u => Err(Error::UnsupportedHypervisorUuid(u)),
         }
     }
@@ -95,8 +105,18 @@
 }
 
 /// Gets the hypervisor singleton.
-pub fn get_hypervisor() -> &'static dyn Hypervisor {
+fn get_hypervisor() -> &'static dyn Hypervisor {
     static HYPERVISOR: OnceBox<HypervisorBackend> = OnceBox::new();
 
     HYPERVISOR.get_or_init(|| Box::new(detect_hypervisor())).get_hypervisor()
 }
+
+/// Gets the MMIO_GUARD hypervisor singleton, if any.
+pub fn get_mmio_guard() -> Option<&'static dyn MmioGuardedHypervisor> {
+    get_hypervisor().as_mmio_guard()
+}
+
+/// Gets the dynamic memory sharing hypervisor singleton, if any.
+pub fn get_mem_sharer() -> Option<&'static dyn MemSharingHypervisor> {
+    get_hypervisor().as_mem_sharer()
+}
diff --git a/libs/hyp/src/lib.rs b/libs/hyp/src/lib.rs
index 32a59d1..486a181 100644
--- a/libs/hyp/src/lib.rs
+++ b/libs/hyp/src/lib.rs
@@ -21,6 +21,6 @@
 mod util;
 
 pub use error::{Error, Result};
-pub use hypervisor::{get_hypervisor, Hypervisor, HypervisorCap, KvmError, MMIO_GUARD_GRANULE_SIZE};
+pub use hypervisor::{get_mem_sharer, get_mmio_guard, KvmError, MMIO_GUARD_GRANULE_SIZE};
 
 use hypervisor::GeniezoneError;
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 3d2fea8..2d1c418 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -23,7 +23,7 @@
 use core::num::NonZeroUsize;
 use core::ops::Range;
 use core::slice;
-use hyp::{get_hypervisor, HypervisorCap};
+use hyp::{get_mem_sharer, get_mmio_guard};
 use log::debug;
 use log::error;
 use log::info;
@@ -33,10 +33,9 @@
 use vmbase::{
     configure_heap, console,
     layout::{self, crosvm},
-    logger, main,
+    main,
     memory::{min_dcache_line_size, MemoryTracker, MEMORY, SIZE_128KB, SIZE_4KB},
     power::reboot,
-    rand,
 };
 use zeroize::Zeroize;
 
@@ -112,8 +111,8 @@
             RebootReason::InvalidFdt
         })?;
 
-        if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
-            let granule = get_hypervisor().memory_protection_granule().map_err(|e| {
+        if let Some(mem_sharer) = get_mem_sharer() {
+            let granule = mem_sharer.granule().map_err(|e| {
                 error!("Failed to get memory protection granule: {e}");
                 RebootReason::InternalError
             })?;
@@ -192,21 +191,7 @@
     // - only perform logging once the logger has been initialized
     // - only access non-pvmfw memory once (and while) it has been mapped
 
-    logger::init(LevelFilter::Info).map_err(|_| RebootReason::InternalError)?;
-
-    // Use debug!() to avoid printing to the UART if we failed to configure it as only local
-    // builds that have tweaked the logger::init() call will actually attempt to log the message.
-
-    get_hypervisor().mmio_guard_init().map_err(|e| {
-        debug!("{e}");
-        RebootReason::InternalError
-    })?;
-
-    get_hypervisor().mmio_guard_map(console::BASE_ADDRESS).map_err(|e| {
-        debug!("Failed to configure the UART: {e}");
-        RebootReason::InternalError
-    })?;
-
+    log::set_max_level(LevelFilter::Info);
     crypto::init();
 
     let page_table = memory::init_page_table().map_err(|e| {
@@ -235,11 +220,6 @@
 
     let slices = MemorySlices::new(fdt, payload, payload_size)?;
 
-    rand::init().map_err(|e| {
-        error!("Failed to initialize rand: {e}");
-        RebootReason::InternalError
-    })?;
-
     // This wrapper allows main() to be blissfully ignorant of platform details.
     let next_bcc = crate::main(slices.fdt, slices.kernel, slices.ramdisk, bcc_slice, debug_policy)?;
 
@@ -253,10 +233,12 @@
     })?;
     // Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
     MEMORY.lock().as_mut().unwrap().unshare_all_memory();
-    get_hypervisor().mmio_guard_unmap(console::BASE_ADDRESS).map_err(|e| {
-        error!("Failed to unshare the UART: {e}");
-        RebootReason::InternalError
-    })?;
+    if let Some(mmio_guard) = get_mmio_guard() {
+        mmio_guard.unmap(console::BASE_ADDRESS).map_err(|e| {
+            error!("Failed to unshare the UART: {e}");
+            RebootReason::InternalError
+        })?;
+    }
 
     // Drop MemoryTracker and deactivate page table.
     drop(MEMORY.lock().take());
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 5930ec9..27ab719 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -30,7 +30,7 @@
 
 /// Returns memory range reserved for the appended payload.
 pub fn appended_payload_range() -> Range<VirtualAddress> {
-    let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
+    let start = align_up(layout::binary_end().0, SIZE_4KB).unwrap();
     // pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
     let end = align_up(start, SIZE_2MB).unwrap();
     VirtualAddress(start)..VirtualAddress(end)
diff --git a/rialto/src/error.rs b/rialto/src/error.rs
index 84228c4..c326566 100644
--- a/rialto/src/error.rs
+++ b/rialto/src/error.rs
@@ -29,8 +29,6 @@
     Hypervisor(HypervisorError),
     /// Failed when attempting to map some range in the page table.
     PageTableMapping(MapError),
-    /// Failed to initialize the logger.
-    LoggerInit,
     /// Invalid FDT.
     InvalidFdt(FdtError),
     /// Invalid PCI.
@@ -48,7 +46,6 @@
             Self::PageTableMapping(e) => {
                 write!(f, "Failed when attempting to map some range in the page table: {e}.")
             }
-            Self::LoggerInit => write!(f, "Failed to initialize the logger."),
             Self::InvalidFdt(e) => write!(f, "Invalid FDT: {e}"),
             Self::InvalidPci(e) => write!(f, "Invalid PCI: {e}"),
             Self::MemoryOperationFailed(e) => write!(f, "Failed memory operation: {e}"),
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index 5e693c8..3e0485d 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -24,10 +24,9 @@
 
 use crate::error::{Error, Result};
 use core::num::NonZeroUsize;
-use core::result;
 use core::slice;
 use fdtpci::PciInfo;
-use hyp::{get_hypervisor, HypervisorCap, KvmError};
+use hyp::{get_mem_sharer, get_mmio_guard};
 use libfdt::FdtError;
 use log::{debug, error, info};
 use vmbase::{
@@ -52,21 +51,6 @@
     Ok(page_table)
 }
 
-fn try_init_logger() -> Result<bool> {
-    let mmio_guard_supported = match get_hypervisor().mmio_guard_init() {
-        // pKVM blocks MMIO by default, we need to enable MMIO guard to support logging.
-        Ok(()) => {
-            get_hypervisor().mmio_guard_map(vmbase::console::BASE_ADDRESS)?;
-            true
-        }
-        // MMIO guard enroll is not supported in unprotected VM.
-        Err(hyp::Error::MmioGuardNotsupported) => false,
-        Err(e) => return Err(e.into()),
-    };
-    vmbase::logger::init(log::LevelFilter::Debug).map_err(|_| Error::LoggerInit)?;
-    Ok(mmio_guard_supported)
-}
-
 /// # Safety
 ///
 /// Behavior is undefined if any of the following conditions are violated:
@@ -98,14 +82,14 @@
         e
     })?;
 
-    if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
-        let granule = memory_protection_granule()?;
+    if let Some(mem_sharer) = get_mem_sharer() {
+        let granule = mem_sharer.granule()?;
         MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
             error!("Failed to initialize dynamically shared pool.");
             e
         })?;
-    } else {
-        let range = SwiotlbInfo::new_from_fdt(fdt)?.fixed_range().ok_or_else(|| {
+    } else if let Ok(swiotlb_info) = SwiotlbInfo::new_from_fdt(fdt) {
+        let range = swiotlb_info.fixed_range().ok_or_else(|| {
             error!("Pre-shared pool range not specified in swiotlb node");
             Error::from(FdtError::BadValue)
         })?;
@@ -113,6 +97,12 @@
             error!("Failed to initialize pre-shared pool.");
             e
         })?;
+    } else {
+        info!("No MEM_SHARE capability detected or swiotlb found: allocating buffers from heap.");
+        MEMORY.lock().as_mut().unwrap().init_heap_shared_pool().map_err(|e| {
+            error!("Failed to initialize heap-based pseudo-shared pool.");
+            e
+        })?;
     }
 
     let pci_info = PciInfo::from_fdt(fdt)?;
@@ -123,46 +113,34 @@
     Ok(())
 }
 
-fn memory_protection_granule() -> result::Result<usize, hyp::Error> {
-    match get_hypervisor().memory_protection_granule() {
-        Ok(granule) => Ok(granule),
-        // Take the default page size when KVM call is not supported in non-protected VMs.
-        Err(hyp::Error::KvmError(KvmError::NotSupported, _)) => Ok(PAGE_SIZE),
-        Err(e) => Err(e),
-    }
-}
-
-fn try_unshare_all_memory(mmio_guard_supported: bool) -> Result<()> {
+fn try_unshare_all_memory() -> Result<()> {
     info!("Starting unsharing memory...");
 
     // No logging after unmapping UART.
-    if mmio_guard_supported {
-        get_hypervisor().mmio_guard_unmap(vmbase::console::BASE_ADDRESS)?;
+    if let Some(mmio_guard) = get_mmio_guard() {
+        mmio_guard.unmap(vmbase::console::BASE_ADDRESS)?;
     }
     // Unshares all memory and deactivates page table.
     drop(MEMORY.lock().take());
     Ok(())
 }
 
-fn unshare_all_memory(mmio_guard_supported: bool) {
-    if let Err(e) = try_unshare_all_memory(mmio_guard_supported) {
+fn unshare_all_memory() {
+    if let Err(e) = try_unshare_all_memory() {
         error!("Failed to unshare the memory: {e}");
     }
 }
 
 /// Entry point for Rialto.
 pub fn main(fdt_addr: u64, _a1: u64, _a2: u64, _a3: u64) {
-    let Ok(mmio_guard_supported) = try_init_logger() else {
-        // Don't log anything if the logger initialization fails.
-        reboot();
-    };
+    log::set_max_level(log::LevelFilter::Debug);
     // SAFETY: `fdt_addr` is supposed to be a valid pointer and points to
     // a valid `Fdt`.
     match unsafe { try_main(fdt_addr as usize) } {
-        Ok(()) => unshare_all_memory(mmio_guard_supported),
+        Ok(()) => unshare_all_memory(),
         Err(e) => {
             error!("Rialto failed with {e}");
-            unshare_all_memory(mmio_guard_supported);
+            unshare_all_memory();
             reboot()
         }
     }
diff --git a/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java b/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
index 014f9f0..9cf28c7 100644
--- a/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
@@ -115,7 +115,7 @@
                 mAndroidDevice.supportsMicrodroid(/* protectedVm= */ true));
         assumeFalse("Test requires setprop for using custom pvmfw and adb root", isUserBuild());
 
-        mAndroidDevice.enableAdbRoot();
+        assumeTrue("Skip if adb root fails", mAndroidDevice.enableAdbRoot());
 
         // tradefed copies the test artfacts under /tmp when running tests,
         // so we should *find* the artifacts with the file name.
diff --git a/vmbase/entry.S b/vmbase/entry.S
index 9f6993a..9177a4a 100644
--- a/vmbase/entry.S
+++ b/vmbase/entry.S
@@ -63,72 +63,6 @@
 .set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
 .set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
 
-/* SMC function IDs */
-.set .L_SMCCC_VERSION_ID, 0x80000000
-.set .L_SMCCC_TRNG_VERSION_ID, 0x84000050
-.set .L_SMCCC_TRNG_FEATURES_ID, 0x84000051
-.set .L_SMCCC_TRNG_RND64_ID, 0xc4000053
-
-/* SMC function versions */
-.set .L_SMCCC_VERSION_1_1, 0x0101
-.set .L_SMCCC_TRNG_VERSION_1_0, 0x0100
-
-/* Bionic-compatible stack protector */
-.section .data.stack_protector, "aw"
-__bionic_tls:
-	.zero	40
-.global __stack_chk_guard
-__stack_chk_guard:
-	.quad	0
-
-/**
- * This macro stores a random value into a register.
- * If a TRNG backed is not present or if an error occurs, the value remains unchanged.
- */
-.macro rnd_reg reg:req
-	mov x20, x0
-	mov x21, x1
-	mov x22, x2
-	mov x23, x3
-
-	/* Verify SMCCC version >=1.1 */
-	hvc_call .L_SMCCC_VERSION_ID
-	cmp w0, 0
-	b.lt 100f
-	cmp w0, .L_SMCCC_VERSION_1_1
-	b.lt 100f
-
-	/* Verify TRNG ABI version 1.x */
-	hvc_call .L_SMCCC_TRNG_VERSION_ID
-	cmp w0, 0
-	b.lt 100f
-	cmp w0, .L_SMCCC_TRNG_VERSION_1_0
-	b.lt 100f
-
-	/* Call TRNG_FEATURES, ensure TRNG_RND is implemented */
-	mov_i x1, .L_SMCCC_TRNG_RND64_ID
-	hvc_call .L_SMCCC_TRNG_FEATURES_ID
-	cmp w0, 0
-	b.lt 100f
-
-	/* Call TRNG_RND, request 64 bits of entropy */
-	mov x1, #64
-	hvc_call .L_SMCCC_TRNG_RND64_ID
-	cmp x0, 0
-	b.lt 100f
-
-	mov \reg, x3
-	b 101f
-
-100:
-	reset_or_hang
-101:
-	mov x0, x20
-	mov x1, x21
-	mov x2, x22
-	mov x3, x23
-.endm
-
 /**
  * This is a generic entry point for an image. It carries out the operations required to prepare the
  * loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
@@ -222,18 +156,17 @@
 	adr x30, vector_table_el1
 	msr vbar_el1, x30
 
-	/* Set up Bionic-compatible thread-local storage. */
+	/*
+	 * Set up Bionic-compatible thread-local storage.
+	 *
+	 * Note that TPIDR_EL0 can't be configured from rust_entry because the
+	 * compiler will dereference it during function entry to access
+	 * __stack_chk_guard and Rust doesn't support LLVM's
+	 * __attribute__((no_stack_protector)).
+	 */
 	adr_l x30, __bionic_tls
 	msr tpidr_el0, x30
 
-	/* Randomize stack protector. */
-	rnd_reg x29
-	adr_l x30, __stack_chk_guard
-	str x29, [x30]
-
-	/* Write a null byte to the top of the stack guard to act as a string terminator. */
-	strb wzr, [x30]
-
 	/* Call into Rust code. */
 	bl rust_entry
 
diff --git a/vmbase/example/src/layout.rs b/vmbase/example/src/layout.rs
index 4d14b1c..fc578bc 100644
--- a/vmbase/example/src/layout.rs
+++ b/vmbase/example/src/layout.rs
@@ -15,7 +15,6 @@
 //! Memory layout.
 
 use aarch64_paging::paging::{MemoryRegion, VirtualAddress};
-use core::arch::asm;
 use core::ops::Range;
 use log::info;
 use vmbase::layout;
@@ -29,14 +28,6 @@
     layout::stack_range(40 * PAGE_SIZE)
 }
 
-fn data_load_address() -> VirtualAddress {
-    VirtualAddress(layout::data_load_address())
-}
-
-fn binary_end() -> VirtualAddress {
-    VirtualAddress(layout::binary_end())
-}
-
 pub fn print_addresses() {
     let dtb = layout::dtb_range();
     info!("dtb:        {}..{} ({} bytes)", dtb.start, dtb.end, dtb.end - dtb.start);
@@ -44,14 +35,14 @@
     info!("text:       {}..{} ({} bytes)", text.start, text.end, text.end - text.start);
     let rodata = layout::rodata_range();
     info!("rodata:     {}..{} ({} bytes)", rodata.start, rodata.end, rodata.end - rodata.start);
-    info!("binary end: {}", binary_end());
+    info!("binary end: {}", layout::binary_end());
     let data = layout::data_range();
     info!(
         "data:       {}..{} ({} bytes, loaded at {})",
         data.start,
         data.end,
         data.end - data.start,
-        data_load_address(),
+        layout::data_load_address(),
     );
     let bss = layout::bss_range();
     info!("bss:        {}..{} ({} bytes)", bss.start, bss.end, bss.end - bss.start);
@@ -63,13 +54,3 @@
         boot_stack.end - boot_stack.start
     );
 }
-
-/// Bionic-compatible thread-local storage entry, at the given offset from TPIDR_EL0.
-pub fn bionic_tls(off: usize) -> u64 {
-    let mut base: usize;
-    unsafe {
-        asm!("mrs {base}, tpidr_el0", base = out(reg) base);
-        let ptr = (base + off) as *const u64;
-        *ptr
-    }
-}
diff --git a/vmbase/example/src/main.rs b/vmbase/example/src/main.rs
index 4176626..8086885 100644
--- a/vmbase/example/src/main.rs
+++ b/vmbase/example/src/main.rs
@@ -23,7 +23,7 @@
 
 extern crate alloc;
 
-use crate::layout::{bionic_tls, boot_stack_range, print_addresses, DEVICE_REGION};
+use crate::layout::{boot_stack_range, print_addresses, DEVICE_REGION};
 use crate::pci::{check_pci, get_bar_region};
 use aarch64_paging::paging::MemoryRegion;
 use aarch64_paging::MapError;
@@ -32,9 +32,9 @@
 use libfdt::Fdt;
 use log::{debug, error, info, trace, warn, LevelFilter};
 use vmbase::{
-    configure_heap, cstr,
-    layout::{dtb_range, rodata_range, scratch_range, stack_chk_guard, text_range},
-    logger, main,
+    bionic, configure_heap, cstr,
+    layout::{dtb_range, rodata_range, scratch_range, text_range},
+    linker, logger, main,
     memory::{PageTable, SIZE_64KB},
 };
 
@@ -69,7 +69,7 @@
 
 /// Entry point for VM bootloader.
 pub fn main(arg0: u64, arg1: u64, arg2: u64, arg3: u64) {
-    logger::init(LevelFilter::Debug).unwrap();
+    log::set_max_level(LevelFilter::Debug);
 
     info!("Hello world");
     info!("x0={:#018x}, x1={:#018x}, x2={:#018x}, x3={:#018x}", arg0, arg1, arg2, arg3);
@@ -105,10 +105,20 @@
 }
 
 fn check_stack_guard() {
-    const BIONIC_TLS_STACK_GRD_OFF: usize = 40;
-
     info!("Testing stack guard");
-    assert_eq!(bionic_tls(BIONIC_TLS_STACK_GRD_OFF), stack_chk_guard());
+    // SAFETY: No concurrency issue should occur when running these tests.
+    let stack_guard = unsafe { bionic::TLS.stack_guard };
+    assert_ne!(stack_guard, 0);
+    // Check that a NULL-terminating value is added for C functions consuming strings from stack.
+    assert_eq!(stack_guard.to_ne_bytes().last(), Some(&0));
+    // Check that the TLS and guard are properly accessible from the dedicated register.
+    assert_eq!(stack_guard, bionic::__get_tls().stack_guard);
+    // Check that the LLVM __stack_chk_guard alias is also properly set up.
+    assert_eq!(
+        stack_guard,
+        // SAFETY: No concurrency issue should occur when running these tests.
+        unsafe { linker::__stack_chk_guard },
+    );
 }
 
 fn check_data() {
diff --git a/vmbase/sections.ld b/vmbase/sections.ld
index 5232d30..c7ef0ec 100644
--- a/vmbase/sections.ld
+++ b/vmbase/sections.ld
@@ -107,6 +107,9 @@
 		. = init_stack_pointer;
 	} >writable_data
 
+	/* Make our Bionic stack protector compatible with mainline LLVM */
+	__stack_chk_guard = __bionic_tls + 40;
+
 	/*
 	 * Remove unused sections from the image.
 	 */
diff --git a/vmbase/src/bionic.rs b/vmbase/src/bionic.rs
index 5af9ebc..2ce0e83 100644
--- a/vmbase/src/bionic.rs
+++ b/vmbase/src/bionic.rs
@@ -23,9 +23,36 @@
 
 use crate::console;
 use crate::eprintln;
+use crate::read_sysreg;
 
 const EOF: c_int = -1;
 
+/// Bionic thread-local storage.
+#[repr(C)]
+pub struct Tls {
+    /// Unused.
+    _unused: [u8; 40],
+    /// Use by the compiler as stack canary value.
+    pub stack_guard: u64,
+}
+
+/// Bionic TLS.
+///
+/// Provides the TLS used by Bionic code. This is unique as vmbase only supports one thread.
+///
+/// Note that the linker script re-exports __bionic_tls.stack_guard as __stack_chk_guard for
+/// compatibility with non-Bionic LLVM.
+#[link_section = ".data.stack_protector"]
+#[export_name = "__bionic_tls"]
+pub static mut TLS: Tls = Tls { _unused: [0; 40], stack_guard: 0 };
+
+/// Gets a reference to the TLS from the dedicated system register.
+pub fn __get_tls() -> &'static mut Tls {
+    let tpidr = read_sysreg!("tpidr_el0");
+    // SAFETY: The register is currently only written to once, from entry.S, with a valid value.
+    unsafe { &mut *(tpidr as *mut Tls) }
+}
+
 #[no_mangle]
 extern "C" fn __stack_chk_fail() -> ! {
     panic!("stack guard check failed");
diff --git a/vmbase/src/entry.rs b/vmbase/src/entry.rs
index 0a96d86..24b5035 100644
--- a/vmbase/src/entry.rs
+++ b/vmbase/src/entry.rs
@@ -14,14 +14,51 @@
 
 //! Rust entry point.
 
-use crate::{console, heap, power::shutdown};
+use crate::{
+    bionic, console, heap, logger,
+    power::{reboot, shutdown},
+    rand,
+};
+use core::mem::size_of;
+use hyp::{self, get_mmio_guard};
+
+fn try_console_init() -> Result<(), hyp::Error> {
+    console::init();
+
+    if let Some(mmio_guard) = get_mmio_guard() {
+        mmio_guard.init()?;
+        mmio_guard.map(console::BASE_ADDRESS)?;
+    }
+
+    Ok(())
+}
 
 /// This is the entry point to the Rust code, called from the binary entry point in `entry.S`.
 #[no_mangle]
 extern "C" fn rust_entry(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
     // SAFETY: Only called once, from here, and inaccessible to client code.
     unsafe { heap::init() };
-    console::init();
+
+    if try_console_init().is_err() {
+        // Don't panic (or log) here to avoid accessing the console.
+        reboot()
+    }
+
+    logger::init().expect("Failed to initialize the logger");
+    // We initialize the logger to Off (like the log crate) and clients should log::set_max_level.
+
+    const SIZE_OF_STACK_GUARD: usize = size_of::<u64>();
+    let mut stack_guard = [0u8; SIZE_OF_STACK_GUARD];
+    // We keep a null byte at the top of the stack guard to act as a string terminator.
+    let random_guard = &mut stack_guard[..(SIZE_OF_STACK_GUARD - 1)];
+
+    rand::init().expect("Failed to initialize a source of entropy");
+    rand::fill_with_entropy(random_guard).expect("Failed to get stack canary entropy");
+    bionic::__get_tls().stack_guard = u64::from_ne_bytes(stack_guard);
+
+    // Note: If rust_entry ever returned (which it shouldn't by being -> !), the compiler-injected
+    // stack guard comparison would detect a mismatch and call __stack_chk_fail.
+
     // SAFETY: `main` is provided by the application using the `main!` macro, and we make sure it
     // has the right type.
     unsafe {
@@ -37,16 +74,21 @@
 
 /// Marks the main function of the binary.
 ///
+/// Once main is entered, it can assume that:
+/// - The panic_handler has been configured and panic!() and friends are available;
+/// - The global_allocator has been configured and heap memory is available;
+/// - The logger has been configured and the log::{info, warn, error, ...} macros are available.
+///
 /// Example:
 ///
 /// ```rust
-/// use vmbase::{logger, main};
+/// use vmbase::main;
 /// use log::{info, LevelFilter};
 ///
 /// main!(my_main);
 ///
 /// fn my_main() {
-///     logger::init(LevelFilter::Info).unwrap();
+///     log::set_max_level(LevelFilter::Info);
 ///     info!("Hello world");
 /// }
 /// ```
diff --git a/vmbase/src/hvc.rs b/vmbase/src/hvc.rs
index 9a5e716..ebd1625 100644
--- a/vmbase/src/hvc.rs
+++ b/vmbase/src/hvc.rs
@@ -22,20 +22,19 @@
 };
 
 const ARM_SMCCC_TRNG_VERSION: u32 = 0x8400_0050;
-#[allow(dead_code)]
 const ARM_SMCCC_TRNG_FEATURES: u32 = 0x8400_0051;
 #[allow(dead_code)]
 const ARM_SMCCC_TRNG_GET_UUID: u32 = 0x8400_0052;
 #[allow(dead_code)]
 const ARM_SMCCC_TRNG_RND32: u32 = 0x8400_0053;
-const ARM_SMCCC_TRNG_RND64: u32 = 0xc400_0053;
+pub const ARM_SMCCC_TRNG_RND64: u32 = 0xc400_0053;
 
 /// Returns the (major, minor) version tuple, as defined by the SMCCC TRNG.
-pub fn trng_version() -> trng::Result<(u16, u16)> {
+pub fn trng_version() -> trng::Result<trng::Version> {
     let args = [0u64; 17];
 
     let version = positive_or_error_64::<Error>(hvc64(ARM_SMCCC_TRNG_VERSION, args)[0])?;
-    Ok(((version >> 16) as u16, version as u16))
+    (version as u32 as i32).try_into()
 }
 
 pub type TrngRng64Entropy = (u64, u64, u64);
@@ -49,3 +48,10 @@
 
     Ok((regs[1], regs[2], regs[3]))
 }
+
+pub fn trng_features(fid: u32) -> trng::Result<u64> {
+    let mut args = [0u64; 17];
+    args[0] = fid as u64;
+
+    positive_or_error_64::<Error>(hvc64(ARM_SMCCC_TRNG_FEATURES, args)[0])
+}
diff --git a/vmbase/src/hvc/trng.rs b/vmbase/src/hvc/trng.rs
index 6331d66..efb86f6 100644
--- a/vmbase/src/hvc/trng.rs
+++ b/vmbase/src/hvc/trng.rs
@@ -16,7 +16,7 @@
 use core::result;
 
 /// Standard SMCCC TRNG error values as described in DEN 0098 1.0 REL0.
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
 pub enum Error {
     /// The call is not supported by the implementation.
     NotSupported,
@@ -55,3 +55,40 @@
 }
 
 pub type Result<T> = result::Result<T, Error>;
+
+/// A version of the SMCCC TRNG interface.
+#[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
+pub struct Version {
+    pub major: u16,
+    pub minor: u16,
+}
+
+impl fmt::Display for Version {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "{}.{}", self.major, self.minor)
+    }
+}
+
+impl fmt::Debug for Version {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(self, f)
+    }
+}
+
+impl TryFrom<i32> for Version {
+    type Error = Error;
+
+    fn try_from(value: i32) -> core::result::Result<Self, Error> {
+        if value < 0 {
+            Err((value as i64).into())
+        } else {
+            Ok(Self { major: (value >> 16) as u16, minor: value as u16 })
+        }
+    }
+}
+
+impl From<Version> for u32 {
+    fn from(version: Version) -> Self {
+        (u32::from(version.major) << 16) | u32::from(version.minor)
+    }
+}
diff --git a/vmbase/src/layout/mod.rs b/vmbase/src/layout/mod.rs
index ffa29e7..f7e8170 100644
--- a/vmbase/src/layout/mod.rs
+++ b/vmbase/src/layout/mod.rs
@@ -31,7 +31,8 @@
     ($symbol:ident) => {{
         // SAFETY: We're just getting the address of an extern static symbol provided by the linker,
         // not dereferencing it.
-        unsafe { addr_of!($crate::linker::$symbol) as usize }
+        let addr = unsafe { addr_of!($crate::linker::$symbol) as usize };
+        VirtualAddress(addr)
     }};
 }
 
@@ -42,7 +43,7 @@
         let start = linker_addr!($begin);
         let end = linker_addr!($end);
 
-        VirtualAddress(start)..VirtualAddress(end)
+        start..end
     }};
 }
 
@@ -74,10 +75,10 @@
 /// Writable data region for the stack.
 pub fn stack_range(stack_size: usize) -> Range<VirtualAddress> {
     let end = linker_addr!(init_stack_pointer);
-    let start = end.checked_sub(stack_size).unwrap();
+    let start = VirtualAddress(end.0.checked_sub(stack_size).unwrap());
     assert!(start >= linker_addr!(stack_limit));
 
-    VirtualAddress(start)..VirtualAddress(end)
+    start..end
 }
 
 /// All writable sections, excluding the stack.
@@ -93,12 +94,12 @@
 }
 
 /// Read-write data (original).
-pub fn data_load_address() -> usize {
+pub fn data_load_address() -> VirtualAddress {
     linker_addr!(data_lma)
 }
 
 /// End of the binary image.
-pub fn binary_end() -> usize {
+pub fn binary_end() -> VirtualAddress {
     linker_addr!(bin_end)
 }
 
diff --git a/vmbase/src/lib.rs b/vmbase/src/lib.rs
index f637a40..ca8756d 100644
--- a/vmbase/src/lib.rs
+++ b/vmbase/src/lib.rs
@@ -21,7 +21,7 @@
 extern crate alloc;
 
 pub mod arch;
-mod bionic;
+pub mod bionic;
 pub mod console;
 mod entry;
 pub mod exceptions;
@@ -29,7 +29,7 @@
 pub mod heap;
 mod hvc;
 pub mod layout;
-mod linker;
+pub mod linker;
 pub mod logger;
 pub mod memory;
 pub mod power;
diff --git a/vmbase/src/logger.rs b/vmbase/src/logger.rs
index 226d905..9130918 100644
--- a/vmbase/src/logger.rs
+++ b/vmbase/src/logger.rs
@@ -20,7 +20,7 @@
 
 use crate::console::println;
 use core::sync::atomic::{AtomicBool, Ordering};
-use log::{LevelFilter, Log, Metadata, Record, SetLoggerError};
+use log::{Log, Metadata, Record, SetLoggerError};
 
 struct Logger {
     is_enabled: AtomicBool,
@@ -70,9 +70,8 @@
 }
 
 /// Initialize vmbase logger with a given max logging level.
-pub fn init(max_level: LevelFilter) -> Result<(), SetLoggerError> {
+pub(crate) fn init() -> Result<(), SetLoggerError> {
     log::set_logger(&LOGGER)?;
-    log::set_max_level(max_level);
     Ok(())
 }
 
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index ebb55df..173c0ec 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -27,11 +27,12 @@
 use alloc::vec::Vec;
 use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
 use core::alloc::Layout;
+use core::mem::size_of;
 use core::num::NonZeroUsize;
 use core::ops::Range;
 use core::ptr::NonNull;
 use core::result;
-use hyp::{get_hypervisor, MMIO_GUARD_GRANULE_SIZE};
+use hyp::{get_mem_sharer, get_mmio_guard, MMIO_GUARD_GRANULE_SIZE};
 use log::{debug, error, trace};
 use once_cell::race::OnceBox;
 use spin::mutex::SpinMutex;
@@ -179,10 +180,17 @@
             return Err(MemoryTrackerError::Full);
         }
 
-        self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
-            error!("Error during MMIO device mapping: {e}");
-            MemoryTrackerError::FailedToMap
-        })?;
+        if get_mmio_guard().is_some() {
+            self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
+                error!("Error during lazy MMIO device mapping: {e}");
+                MemoryTrackerError::FailedToMap
+            })?;
+        } else {
+            self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
+                error!("Error during MMIO device mapping: {e}");
+                MemoryTrackerError::FailedToMap
+            })?;
+        }
 
         if self.mmio_regions.try_push(range).is_some() {
             return Err(MemoryTrackerError::Full);
@@ -219,10 +227,12 @@
     ///
     /// Note that they are not unmapped from the page table.
     pub fn mmio_unmap_all(&mut self) -> Result<()> {
-        for range in &self.mmio_regions {
-            self.page_table
-                .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
-                .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
+        if get_mmio_guard().is_some() {
+            for range in &self.mmio_regions {
+                self.page_table
+                    .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
+                    .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
+            }
         }
         Ok(())
     }
@@ -264,6 +274,19 @@
         Ok(())
     }
 
+    /// Initialize the shared heap to use heap memory directly.
+    ///
+    /// When running on "non-protected" hypervisors which permit host direct accesses to guest
+    /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
+    /// dedicated region so this function instructs the shared pool to use the global allocator.
+    pub fn init_heap_shared_pool(&mut self) -> Result<()> {
+        // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
+        // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
+        // without any actual "dynamic memory sharing" taking place and, as such, the granule may
+        // be set to the one of the global_allocator i.e. a byte.
+        self.init_dynamic_shared_pool(size_of::<u8>())
+    }
+
     /// Unshares any memory that may have been shared.
     pub fn unshare_all_memory(&mut self) {
         drop(SHARED_MEMORY.lock().take());
@@ -274,10 +297,11 @@
     fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
         let page_start = VirtualAddress(page_4kb_of(addr.0));
         let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
+        let mmio_guard = get_mmio_guard().unwrap();
         self.page_table
             .modify_range(&page_range, &verify_lazy_mapped_block)
             .map_err(|_| MemoryTrackerError::InvalidPte)?;
-        get_hypervisor().mmio_guard_map(page_start.0)?;
+        mmio_guard.map(page_start.0)?;
         // Maps a single device page, breaking up block mappings if necessary.
         self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
     }
@@ -362,7 +386,7 @@
 /// Unshares all pages when dropped.
 struct MemorySharer {
     granule: usize,
-    shared_regions: Vec<(usize, Layout)>,
+    frames: Vec<(usize, Layout)>,
 }
 
 impl MemorySharer {
@@ -370,7 +394,7 @@
     /// `granule` must be a power of 2.
     fn new(granule: usize, capacity: usize) -> Self {
         assert!(granule.is_power_of_two());
-        Self { granule, shared_regions: Vec::with_capacity(capacity) }
+        Self { granule, frames: Vec::with_capacity(capacity) }
     }
 
     /// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
@@ -384,25 +408,30 @@
 
         let base = shared.as_ptr() as usize;
         let end = base.checked_add(layout.size()).unwrap();
-        trace!("Sharing memory region {:#x?}", base..end);
-        for vaddr in (base..end).step_by(self.granule) {
-            let vaddr = NonNull::new(vaddr as *mut _).unwrap();
-            get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
-        }
-        self.shared_regions.push((base, layout));
 
+        if let Some(mem_sharer) = get_mem_sharer() {
+            trace!("Sharing memory region {:#x?}", base..end);
+            for vaddr in (base..end).step_by(self.granule) {
+                let vaddr = NonNull::new(vaddr as *mut _).unwrap();
+                mem_sharer.share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+            }
+        }
+
+        self.frames.push((base, layout));
         pool.add_frame(base, end);
     }
 }
 
 impl Drop for MemorySharer {
     fn drop(&mut self) {
-        while let Some((base, layout)) = self.shared_regions.pop() {
-            let end = base.checked_add(layout.size()).unwrap();
-            trace!("Unsharing memory region {:#x?}", base..end);
-            for vaddr in (base..end).step_by(self.granule) {
-                let vaddr = NonNull::new(vaddr as *mut _).unwrap();
-                get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+        while let Some((base, layout)) = self.frames.pop() {
+            if let Some(mem_sharer) = get_mem_sharer() {
+                let end = base.checked_add(layout.size()).unwrap();
+                trace!("Unsharing memory region {:#x?}", base..end);
+                for vaddr in (base..end).step_by(self.granule) {
+                    let vaddr = NonNull::new(vaddr as *mut _).unwrap();
+                    mem_sharer.unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+                }
             }
 
             // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.
@@ -457,7 +486,7 @@
         // Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
         // should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
         // virt_to_phys here, and just pass page_base instead.
-        get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
+        get_mmio_guard().unwrap().unmap(page_base).map_err(|e| {
             error!("Error MMIO guard unmapping: {e}");
         })?;
     }
diff --git a/vmbase/src/rand.rs b/vmbase/src/rand.rs
index 26fb51a..6b8d7e0 100644
--- a/vmbase/src/rand.rs
+++ b/vmbase/src/rand.rs
@@ -14,16 +14,29 @@
 
 //! Functions and drivers for obtaining true entropy.
 
-use crate::hvc;
+use crate::hvc::{self, TrngRng64Entropy};
 use core::fmt;
 use core::mem::size_of;
+use smccc::{self, Hvc};
 
 /// Error type for rand operations.
 pub enum Error {
+    /// No source of entropy found.
+    NoEntropySource,
+    /// Error during architectural SMCCC call.
+    Smccc(smccc::arch::Error),
     /// Error during SMCCC TRNG call.
     Trng(hvc::trng::Error),
+    /// Unsupported SMCCC version.
+    UnsupportedSmcccVersion(smccc::arch::Version),
     /// Unsupported SMCCC TRNG version.
-    UnsupportedVersion((u16, u16)),
+    UnsupportedTrngVersion(hvc::trng::Version),
+}
+
+impl From<smccc::arch::Error> for Error {
+    fn from(e: smccc::arch::Error) -> Self {
+        Self::Smccc(e)
+    }
 }
 
 impl From<hvc::trng::Error> for Error {
@@ -38,10 +51,11 @@
 impl fmt::Display for Error {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self {
+            Self::NoEntropySource => write!(f, "No source of entropy available"),
+            Self::Smccc(e) => write!(f, "Architectural SMCCC error: {e}"),
             Self::Trng(e) => write!(f, "SMCCC TRNG error: {e}"),
-            Self::UnsupportedVersion((x, y)) => {
-                write!(f, "Unsupported SMCCC TRNG version v{x}.{y}")
-            }
+            Self::UnsupportedSmcccVersion(v) => write!(f, "Unsupported SMCCC version {v}"),
+            Self::UnsupportedTrngVersion(v) => write!(f, "Unsupported SMCCC TRNG version {v}"),
         }
     }
 }
@@ -53,15 +67,35 @@
 }
 
 /// Configure the source of entropy.
-pub fn init() -> Result<()> {
-    match hvc::trng_version()? {
-        (1, _) => Ok(()),
-        version => Err(Error::UnsupportedVersion(version)),
+pub(crate) fn init() -> Result<()> {
+    // SMCCC TRNG requires SMCCC v1.1.
+    match smccc::arch::version::<Hvc>()? {
+        smccc::arch::Version { major: 1, minor } if minor >= 1 => (),
+        version => return Err(Error::UnsupportedSmcccVersion(version)),
     }
+
+    // TRNG_RND requires SMCCC TRNG v1.0.
+    match hvc::trng_version()? {
+        hvc::trng::Version { major: 1, minor: _ } => (),
+        version => return Err(Error::UnsupportedTrngVersion(version)),
+    }
+
+    // TRNG_RND64 doesn't define any special capabilities so ignore the successful result.
+    let _ = hvc::trng_features(hvc::ARM_SMCCC_TRNG_RND64).map_err(|e| {
+        if e == hvc::trng::Error::NotSupported {
+            // SMCCC TRNG is currently our only source of entropy.
+            Error::NoEntropySource
+        } else {
+            e.into()
+        }
+    })?;
+
+    Ok(())
 }
 
-fn fill_with_entropy(s: &mut [u8]) -> Result<()> {
-    const MAX_BYTES_PER_CALL: usize = size_of::<hvc::TrngRng64Entropy>();
+/// Fills a slice of bytes with true entropy.
+pub fn fill_with_entropy(s: &mut [u8]) -> Result<()> {
+    const MAX_BYTES_PER_CALL: usize = size_of::<TrngRng64Entropy>();
 
     let (aligned, remainder) = s.split_at_mut(s.len() - s.len() % MAX_BYTES_PER_CALL);
 
@@ -89,13 +123,14 @@
     Ok(())
 }
 
-fn repeat_trng_rnd(n_bytes: usize) -> hvc::trng::Result<hvc::TrngRng64Entropy> {
+fn repeat_trng_rnd(n_bytes: usize) -> Result<TrngRng64Entropy> {
     let bits = usize::try_from(u8::BITS).unwrap();
     let n_bits = (n_bytes * bits).try_into().unwrap();
     loop {
         match hvc::trng_rnd64(n_bits) {
-            Err(hvc::trng::Error::NoEntropy) => continue,
-            res => return res,
+            Ok(entropy) => return Ok(entropy),
+            Err(hvc::trng::Error::NoEntropy) => (),
+            Err(e) => return Err(e.into()),
         }
     }
 }