Try reading block device. Share queues and buffers with host.

For now the host will be able to both read and write all parts of the
virtqueues, but the buffers themselves will be copied so the host can't
write over buffer it shouldn't.

Bug: 237250092
Bug: 261439403
Test: Ran pVM firmware manually with a block device
Change-Id: I38d965e92342e86e39b5cc8b9cf32ad3bc90417b
diff --git a/pvmfw/src/heap.rs b/pvmfw/src/heap.rs
index eab3bc4..2dc60f4 100644
--- a/pvmfw/src/heap.rs
+++ b/pvmfw/src/heap.rs
@@ -27,7 +27,7 @@
 #[global_allocator]
 static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::<32>::new();
 
-static mut HEAP: [u8; 65536] = [0; 65536];
+static mut HEAP: [u8; 131072] = [0; 131072];
 
 pub unsafe fn init() {
     HEAP_ALLOCATOR.lock().init(HEAP.as_mut_ptr() as usize, HEAP.len());
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index 9b14644..ada9a63 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -19,6 +19,8 @@
 #![feature(default_alloc_error_handler)]
 #![feature(ptr_const_cast)] // Stabilized in 1.65.0
 
+extern crate alloc;
+
 mod avb;
 mod config;
 mod entry;
@@ -30,14 +32,14 @@
 mod memory;
 mod mmio_guard;
 mod mmu;
-mod pci;
 mod smccc;
+mod virtio;
 
 use crate::{
     avb::PUBLIC_KEY,
     entry::RebootReason,
     memory::MemoryTracker,
-    pci::{find_virtio_devices, map_mmio},
+    virtio::pci::{find_virtio_devices, map_mmio},
 };
 use dice::bcc;
 use fdtpci::{PciError, PciInfo};
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 5e4874f..604aa80 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -14,16 +14,23 @@
 
 //! Low-level allocation and tracking of main memory.
 
-use crate::helpers::{self, align_down, page_4kb_of, SIZE_4KB};
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use crate::helpers::{self, align_down, align_up, page_4kb_of, SIZE_4KB};
 use crate::hvc::{hyp_meminfo, mem_share, mem_unshare};
 use crate::mmio_guard;
 use crate::mmu;
 use crate::smccc;
+use alloc::alloc::alloc_zeroed;
+use alloc::alloc::dealloc;
+use alloc::alloc::handle_alloc_error;
+use core::alloc::Layout;
 use core::cmp::max;
 use core::cmp::min;
 use core::fmt;
 use core::num::NonZeroUsize;
 use core::ops::Range;
+use core::ptr::NonNull;
 use core::result;
 use log::error;
 use tinyvec::ArrayVec;
@@ -272,9 +279,7 @@
 /// Gives the KVM host read, write and execute permissions on the given memory range. If the range
 /// is not aligned with the memory protection granule then it will be extended on either end to
 /// align.
-#[allow(unused)]
-pub fn share_range(range: &MemoryRange) -> smccc::Result<()> {
-    let granule = hyp_meminfo()? as usize;
+fn share_range(range: &MemoryRange, granule: usize) -> smccc::Result<()> {
     for base in (align_down(range.start, granule)
         .expect("Memory protection granule was not a power of two")..range.end)
         .step_by(granule)
@@ -287,9 +292,7 @@
 /// Removes permission from the KVM host to access the given memory range which was previously
 /// shared. If the range is not aligned with the memory protection granule then it will be extended
 /// on either end to align.
-#[allow(unused)]
-pub fn unshare_range(range: &MemoryRange) -> smccc::Result<()> {
-    let granule = hyp_meminfo()? as usize;
+fn unshare_range(range: &MemoryRange, granule: usize) -> smccc::Result<()> {
     for base in (align_down(range.start, granule)
         .expect("Memory protection granule was not a power of two")..range.end)
         .step_by(granule)
@@ -299,7 +302,78 @@
     Ok(())
 }
 
+/// Allocates a memory range of at least the given size from the global allocator, and shares it
+/// with the host. Returns a pointer to the buffer.
+///
+/// It will be aligned to the memory sharing granule size supported by the hypervisor.
+pub fn alloc_shared(size: usize) -> smccc::Result<NonNull<u8>> {
+    let layout = shared_buffer_layout(size)?;
+    let granule = layout.align();
+
+    // Safe because `shared_buffer_layout` panics if the size is 0, so the layout must have a
+    // non-zero size.
+    let buffer = unsafe { alloc_zeroed(layout) };
+
+    // TODO: Use let-else once we have Rust 1.65 in AOSP.
+    let buffer = if let Some(buffer) = NonNull::new(buffer) {
+        buffer
+    } else {
+        handle_alloc_error(layout);
+    };
+
+    let vaddr = buffer.as_ptr() as usize;
+    let paddr = virt_to_phys(vaddr);
+    // If share_range fails then we will leak the allocation, but that seems better than having it
+    // be reused while maybe still partially shared with the host.
+    share_range(&(paddr..paddr + layout.size()), granule)?;
+
+    Ok(buffer)
+}
+
+/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
+///
+/// The size passed in must be the size passed to the original `alloc_shared` call.
+///
+/// # Safety
+///
+/// The memory must have been allocated by `alloc_shared` with the same size, and not yet
+/// deallocated.
+pub unsafe fn dealloc_shared(vaddr: usize, size: usize) -> smccc::Result<()> {
+    let layout = shared_buffer_layout(size)?;
+    let granule = layout.align();
+
+    let paddr = virt_to_phys(vaddr);
+    unshare_range(&(paddr..paddr + layout.size()), granule)?;
+    // Safe because the memory was allocated by `alloc_shared` above using the same allocator, and
+    // the layout is the same as was used then.
+    unsafe { dealloc(vaddr as *mut u8, layout) };
+
+    Ok(())
+}
+
+/// Returns the layout to use for allocating a buffer of at least the given size shared with the
+/// host.
+///
+/// It will be aligned to the memory sharing granule size supported by the hypervisor.
+///
+/// Panics if `size` is 0.
+fn shared_buffer_layout(size: usize) -> smccc::Result<Layout> {
+    assert_ne!(size, 0);
+    let granule = hyp_meminfo()? as usize;
+    let allocated_size =
+        align_up(size, granule).expect("Memory protection granule was not a power of two");
+    Ok(Layout::from_size_align(allocated_size, granule).unwrap())
+}
+
 /// Returns an iterator which yields the base address of each 4 KiB page within the given range.
 fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> {
     (page_4kb_of(range.start)..range.end).step_by(SIZE_4KB)
 }
+
+/// Returns the intermediate physical address corresponding to the given virtual address.
+///
+/// As we use identity mapping for everything, this is just the identity function, but it's useful
+/// to use it to be explicit about where we are converting from virtual to physical address.
+pub fn virt_to_phys(vaddr: usize) -> usize {
+    vaddr
+}
diff --git a/pvmfw/src/virtio.rs b/pvmfw/src/virtio.rs
new file mode 100644
index 0000000..df916bc
--- /dev/null
+++ b/pvmfw/src/virtio.rs
@@ -0,0 +1,18 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Modules for working with VirtIO devices.
+
+mod hal;
+pub mod pci;
diff --git a/pvmfw/src/virtio/hal.rs b/pvmfw/src/virtio/hal.rs
new file mode 100644
index 0000000..c1c8ae6
--- /dev/null
+++ b/pvmfw/src/virtio/hal.rs
@@ -0,0 +1,71 @@
+use crate::memory::{alloc_shared, dealloc_shared, virt_to_phys};
+use core::ptr::{copy_nonoverlapping, NonNull};
+use log::debug;
+use virtio_drivers::{BufferDirection, Hal, PhysAddr, VirtAddr, PAGE_SIZE};
+
+pub struct HalImpl;
+
+impl Hal for HalImpl {
+    fn dma_alloc(pages: usize) -> PhysAddr {
+        debug!("dma_alloc: pages={}", pages);
+        let size = pages * PAGE_SIZE;
+        let vaddr = alloc_shared(size)
+            .expect("Failed to allocate and share VirtIO DMA range with host")
+            .as_ptr() as VirtAddr;
+        virt_to_phys(vaddr)
+    }
+
+    fn dma_dealloc(paddr: PhysAddr, pages: usize) -> i32 {
+        debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
+        let vaddr = Self::phys_to_virt(paddr);
+        let size = pages * PAGE_SIZE;
+        // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
+        // the layout is the same as was used then.
+        unsafe {
+            dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO DMA range with host");
+        }
+        0
+    }
+
+    fn phys_to_virt(paddr: PhysAddr) -> VirtAddr {
+        paddr
+    }
+
+    fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
+        let size = buffer.len();
+
+        // TODO: Copy to a pre-shared region rather than allocating and sharing each time.
+        // Allocate a range of pages, copy the buffer if necessary, and share the new range instead.
+        let copy =
+            alloc_shared(size).expect("Failed to allocate and share VirtIO buffer with host");
+        if direction == BufferDirection::DriverToDevice {
+            unsafe {
+                copy_nonoverlapping(buffer.as_ptr() as *mut u8, copy.as_ptr(), size);
+            }
+        }
+        virt_to_phys(copy.as_ptr() as VirtAddr)
+    }
+
+    fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
+        let vaddr = Self::phys_to_virt(paddr);
+        let size = buffer.len();
+        if direction == BufferDirection::DeviceToDriver {
+            debug!(
+                "Copying VirtIO buffer back from {:#x} to {:#x}.",
+                paddr,
+                buffer.as_ptr() as *mut u8 as usize
+            );
+            unsafe {
+                copy_nonoverlapping(vaddr as *const u8, buffer.as_ptr() as *mut u8, size);
+            }
+        }
+
+        // Unshare and deallocate the shared copy of the buffer.
+        debug!("Unsharing VirtIO buffer {:#x}", paddr);
+        // Safe because the memory was allocated by `share` using `alloc_shared`, and the size is
+        // the same as was used then.
+        unsafe {
+            dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO buffer with host");
+        }
+    }
+}
diff --git a/pvmfw/src/pci.rs b/pvmfw/src/virtio/pci.rs
similarity index 66%
rename from pvmfw/src/pci.rs
rename to pvmfw/src/virtio/pci.rs
index 2b81772..f9d36c6 100644
--- a/pvmfw/src/pci.rs
+++ b/pvmfw/src/virtio/pci.rs
@@ -14,10 +14,17 @@
 
 //! Functions to scan the PCI bus for VirtIO devices.
 
+use super::hal::HalImpl;
 use crate::{entry::RebootReason, memory::MemoryTracker};
 use fdtpci::{PciError, PciInfo};
-use log::{debug, error};
-use virtio_drivers::transport::pci::{bus::PciRoot, virtio_device_type};
+use log::{debug, error, info};
+use virtio_drivers::{
+    device::blk::VirtIOBlk,
+    transport::{
+        pci::{bus::PciRoot, virtio_device_type, PciTransport},
+        DeviceType, Transport,
+    },
+};
 
 /// Maps the CAM and BAR range in the page table and MMIO guard.
 pub fn map_mmio(pci_info: &PciInfo, memory: &mut MemoryTracker) -> Result<(), RebootReason> {
@@ -46,6 +53,19 @@
         );
         if let Some(virtio_type) = virtio_device_type(&info) {
             debug!("  VirtIO {:?}", virtio_type);
+            let mut transport = PciTransport::new::<HalImpl>(pci_root, device_function).unwrap();
+            info!(
+                "Detected virtio PCI device with device type {:?}, features {:#018x}",
+                transport.device_type(),
+                transport.read_device_features(),
+            );
+            if virtio_type == DeviceType::Block {
+                let mut blk =
+                    VirtIOBlk::<HalImpl, _>::new(transport).expect("failed to create blk driver");
+                info!("Found {} KiB block device.", blk.capacity() * 512 / 1024);
+                let mut data = [0; 512];
+                blk.read_block(0, &mut data).expect("Failed to read block device");
+            }
         }
     }