Merge "rialto: Get memory regions from vmbase::layout"
diff --git a/pvmfw/Android.bp b/pvmfw/Android.bp
index 7ea1189..0ae2203 100644
--- a/pvmfw/Android.bp
+++ b/pvmfw/Android.bp
@@ -17,6 +17,8 @@
         "libaarch64_paging",
         "libbssl_ffi_nostd",
         "libbuddy_system_allocator",
+        "libciborium_nostd",
+        "libciborium_io_nostd",
         "libdiced_open_dice_nostd",
         "libfdtpci",
         "libhyp",
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index a773f1a..1c22861 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -37,6 +37,7 @@
 mod virtio;
 
 use alloc::boxed::Box;
+use alloc::string::ToString;
 
 use crate::dice::PartialInputs;
 use crate::entry::RebootReason;
@@ -46,6 +47,7 @@
 use crate::instance::get_or_generate_instance_salt;
 use crate::memory::MemoryTracker;
 use crate::virtio::pci;
+use ciborium::{de::from_reader, value::Value};
 use diced_open_dice::bcc_handover_main_flow;
 use diced_open_dice::bcc_handover_parse;
 use diced_open_dice::DiceArtifacts;
@@ -58,6 +60,19 @@
 
 const NEXT_BCC_SIZE: usize = GUEST_PAGE_SIZE;
 
+type CiboriumError = ciborium::de::Error<ciborium_io::EndOfFile>;
+
+/// Decodes the provided binary CBOR-encoded value and returns a
+/// ciborium::Value struct wrapped in Result.
+fn value_from_bytes(mut bytes: &[u8]) -> Result<Value, CiboriumError> {
+    let value = from_reader(&mut bytes)?;
+    // Ciborium tries to read one Value, but doesn't care if there is trailing data. We do.
+    if !bytes.is_empty() {
+        return Err(CiboriumError::Semantic(Some(0), "unexpected trailing data".to_string()));
+    }
+    Ok(value)
+}
+
 fn main(
     fdt: &mut Fdt,
     signed_kernel: &[u8],
@@ -81,6 +96,18 @@
     })?;
     trace!("BCC: {bcc_handover:x?}");
 
+    // Minimal BCC verification - check the BCC exists & is valid CBOR.
+    // TODO(alanstokes): Do something more useful.
+    if let Some(bytes) = bcc_handover.bcc() {
+        let _ = value_from_bytes(bytes).map_err(|e| {
+            error!("Invalid BCC: {e:?}");
+            RebootReason::InvalidBcc
+        })?;
+    } else {
+        error!("Missing BCC");
+        return Err(RebootReason::InvalidBcc);
+    }
+
     // Set up PCI bus for VirtIO devices.
     let pci_info = PciInfo::from_fdt(fdt).map_err(handle_pci_error)?;
     debug!("PCI: {:#x?}", pci_info);
diff --git a/pvmfw/src/virtio/hal.rs b/pvmfw/src/virtio/hal.rs
index 5f70b33..7598a55 100644
--- a/pvmfw/src/virtio/hal.rs
+++ b/pvmfw/src/virtio/hal.rs
@@ -9,7 +9,21 @@
 
 pub struct HalImpl;
 
-impl Hal for HalImpl {
+/// Implements the `Hal` trait for `HalImpl`.
+///
+/// # Safety
+///
+/// Callers of this implementatation must follow the safety requirements documented for the unsafe
+/// methods.
+unsafe impl Hal for HalImpl {
+    /// Allocates the given number of contiguous physical pages of DMA memory for VirtIO use.
+    ///
+    /// # Implementation Safety
+    ///
+    /// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
+    ///  reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
+    ///  block of memory using `alloc_shared` and returning a non-null pointer to it that is
+    ///  aligned to `PAGE_SIZE`.
     fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
         debug!("dma_alloc: pages={}", pages);
         let size = pages * PAGE_SIZE;
@@ -19,7 +33,7 @@
         (paddr, vaddr)
     }
 
-    fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
+    unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
         debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
         let size = pages * PAGE_SIZE;
         // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
@@ -30,7 +44,13 @@
         0
     }
 
-    fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
+    /// Converts a physical address used for MMIO to a virtual address which the driver can access.
+    ///
+    /// # Implementation Safety
+    ///
+    /// `mmio_phys_to_virt` satisfies the requirement by checking that the mapped memory region
+    /// is within the PCI MMIO range.
+    unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
         let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised");
         // Check that the region is within the PCI MMIO range that we read from the device tree. If
         // not, the host is probably trying to do something malicious.
@@ -48,7 +68,7 @@
         phys_to_virt(paddr)
     }
 
-    fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
+    unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
         let size = buffer.len();
 
         // TODO: Copy to a pre-shared region rather than allocating and sharing each time.
@@ -63,7 +83,7 @@
         virt_to_phys(copy)
     }
 
-    fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
+    unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
         let vaddr = phys_to_virt(paddr);
         let size = buffer.len();
         if direction == BufferDirection::DeviceToDriver {
diff --git a/vmbase/example/src/pci.rs b/vmbase/example/src/pci.rs
index 117cbc8..41a3ff4 100644
--- a/vmbase/example/src/pci.rs
+++ b/vmbase/example/src/pci.rs
@@ -98,7 +98,7 @@
 
 struct HalImpl;
 
-impl Hal for HalImpl {
+unsafe impl Hal for HalImpl {
     fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
         debug!("dma_alloc: pages={}", pages);
         let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
@@ -110,7 +110,7 @@
         (paddr, vaddr)
     }
 
-    fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
+    unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
         debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
         let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
         // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
@@ -121,17 +121,17 @@
         0
     }
 
-    fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
+    unsafe fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
         NonNull::new(paddr as _).unwrap()
     }
 
-    fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
+    unsafe fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
         let vaddr = buffer.cast();
         // Nothing to do, as the host already has access to all memory.
         virt_to_phys(vaddr)
     }
 
-    fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
+    unsafe fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
         // Nothing to do, as the host already has access to all memory and we didn't copy the buffer
         // anywhere else.
     }