[virtio] Move virtio module from pvmfw to vmbase

for reuse in both pvmfw and rialo.

No code change in this cl.

Bug: 284462758
Test: m pvmfw_img
Change-Id: I225b495a2f8c1a49cb63ea69d60f01949bab3f05
diff --git a/vmbase/src/virtio/hal.rs b/vmbase/src/virtio/hal.rs
new file mode 100644
index 0000000..ac5b967
--- /dev/null
+++ b/vmbase/src/virtio/hal.rs
@@ -0,0 +1,128 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! HAL for the virtio_drivers crate.
+
+use super::pci::PCI_INFO;
+use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys};
+use crate::util::RangeExt as _;
+use core::alloc::Layout;
+use core::mem::size_of;
+use core::ptr::{copy_nonoverlapping, NonNull};
+use log::trace;
+use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
+
+/// The alignment to use for the temporary buffers allocated by `HalImpl::share`. There doesn't seem
+/// to be any particular alignment required by VirtIO for these, so 16 bytes should be enough to
+/// allow appropriate alignment for whatever fields are accessed. `alloc_shared` will increase the
+/// alignment to the memory sharing granule size anyway.
+const SHARED_BUFFER_ALIGNMENT: usize = size_of::<u128>();
+
+/// HAL implementation for the virtio_drivers crate.
+pub struct HalImpl;
+
+/// # Safety
+///
+/// See the 'Implementation Safety' comments on methods below for how they fulfill the safety
+/// requirements of the unsafe `Hal` trait.
+unsafe impl Hal for HalImpl {
+    /// # Implementation Safety
+    ///
+    /// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
+    /// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
+    /// block of memory using `alloc_shared`, which is guaranteed to allocate valid and unique
+    /// memory. We request an alignment of at least `PAGE_SIZE` from `alloc_shared`.
+    fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
+        let vaddr = alloc_shared(dma_layout(pages))
+            .expect("Failed to allocate and share VirtIO DMA range with host");
+        // TODO(ptosi): Move this zeroing to virtio_drivers, if it silently wants a zeroed region.
+        // SAFETY - vaddr points to a region allocated for the caller so is safe to access.
+        unsafe { core::ptr::write_bytes(vaddr.as_ptr(), 0, dma_layout(pages).size()) };
+        let paddr = virt_to_phys(vaddr);
+        (paddr, vaddr)
+    }
+
+    unsafe fn dma_dealloc(_paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
+        // SAFETY - Memory was allocated by `dma_alloc` using `alloc_shared` with the same layout.
+        unsafe { dealloc_shared(vaddr, dma_layout(pages)) }
+            .expect("Failed to unshare VirtIO DMA range with host");
+        0
+    }
+
+    /// # Implementation Safety
+    ///
+    /// The returned pointer must be valid because the `paddr` describes a valid MMIO region, we
+    /// check that it is within the PCI MMIO range, and we previously mapped the entire PCI MMIO
+    /// range. It can't alias any other allocations because we previously validated in
+    /// `map_mmio_range` that the PCI MMIO range didn't overlap with any other memory ranges.
+    unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
+        let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised");
+        let bar_range = {
+            let start = pci_info.bar_range.start.try_into().unwrap();
+            let end = pci_info.bar_range.end.try_into().unwrap();
+
+            start..end
+        };
+        let mmio_range = paddr..paddr.checked_add(size).expect("PCI MMIO region end overflowed");
+
+        // Check that the region is within the PCI MMIO range that we read from the device tree. If
+        // not, the host is probably trying to do something malicious.
+        assert!(
+            mmio_range.is_within(&bar_range),
+            "PCI MMIO region was outside of expected BAR range.",
+        );
+
+        phys_to_virt(paddr)
+    }
+
+    unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
+        let size = buffer.len();
+
+        let bounce = alloc_shared(bb_layout(size))
+            .expect("Failed to allocate and share VirtIO bounce buffer with host");
+        let paddr = virt_to_phys(bounce);
+        if direction == BufferDirection::DriverToDevice {
+            let src = buffer.cast::<u8>().as_ptr().cast_const();
+            trace!("VirtIO bounce buffer at {bounce:?} (PA:{paddr:#x}) initialized from {src:?}");
+            // SAFETY - Both regions are valid, properly aligned, and don't overlap.
+            unsafe { copy_nonoverlapping(src, bounce.as_ptr(), size) };
+        }
+
+        paddr
+    }
+
+    unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
+        let bounce = phys_to_virt(paddr);
+        let size = buffer.len();
+        if direction == BufferDirection::DeviceToDriver {
+            let dest = buffer.cast::<u8>().as_ptr();
+            trace!("VirtIO bounce buffer at {bounce:?} (PA:{paddr:#x}) copied back to {dest:?}");
+            // SAFETY - Both regions are valid, properly aligned, and don't overlap.
+            unsafe { copy_nonoverlapping(bounce.as_ptr(), dest, size) };
+        }
+
+        // SAFETY - Memory was allocated by `share` using `alloc_shared` with the same layout.
+        unsafe { dealloc_shared(bounce, bb_layout(size)) }
+            .expect("Failed to unshare and deallocate VirtIO bounce buffer");
+    }
+}
+
+fn dma_layout(pages: usize) -> Layout {
+    let size = pages.checked_mul(PAGE_SIZE).unwrap();
+    Layout::from_size_align(size, PAGE_SIZE).unwrap()
+}
+
+fn bb_layout(size: usize) -> Layout {
+    Layout::from_size_align(size, SHARED_BUFFER_ALIGNMENT).unwrap()
+}