blob: 7598a556711a2a91ea59526b5e69058b2554c698 [file] [log] [blame]
use super::pci::PCI_INFO;
use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys};
use core::{
ops::Range,
ptr::{copy_nonoverlapping, NonNull},
};
use log::debug;
use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
pub struct HalImpl;
/// Implements the `Hal` trait for `HalImpl`.
///
/// # Safety
///
/// Callers of this implementatation must follow the safety requirements documented for the unsafe
/// methods.
unsafe impl Hal for HalImpl {
/// Allocates the given number of contiguous physical pages of DMA memory for VirtIO use.
///
/// # Implementation Safety
///
/// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
/// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
/// block of memory using `alloc_shared` and returning a non-null pointer to it that is
/// aligned to `PAGE_SIZE`.
fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
debug!("dma_alloc: pages={}", pages);
let size = pages * PAGE_SIZE;
let vaddr =
alloc_shared(size).expect("Failed to allocate and share VirtIO DMA range with host");
let paddr = virt_to_phys(vaddr);
(paddr, vaddr)
}
unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
let size = pages * PAGE_SIZE;
// Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
// the layout is the same as was used then.
unsafe {
dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO DMA range with host");
}
0
}
/// Converts a physical address used for MMIO to a virtual address which the driver can access.
///
/// # Implementation Safety
///
/// `mmio_phys_to_virt` satisfies the requirement by checking that the mapped memory region
/// is within the PCI MMIO range.
unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised");
// Check that the region is within the PCI MMIO range that we read from the device tree. If
// not, the host is probably trying to do something malicious.
if !contains_range(
&pci_info.bar_range,
&(paddr.try_into().expect("PCI MMIO region start was outside of 32-bit address space")
..paddr
.checked_add(size)
.expect("PCI MMIO region end overflowed")
.try_into()
.expect("PCI MMIO region end was outside of 32-bit address space")),
) {
panic!("PCI MMIO region was outside of expected BAR range.");
}
phys_to_virt(paddr)
}
unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
let size = buffer.len();
// TODO: Copy to a pre-shared region rather than allocating and sharing each time.
// Allocate a range of pages, copy the buffer if necessary, and share the new range instead.
let copy =
alloc_shared(size).expect("Failed to allocate and share VirtIO buffer with host");
if direction == BufferDirection::DriverToDevice {
unsafe {
copy_nonoverlapping(buffer.as_ptr() as *mut u8, copy.as_ptr(), size);
}
}
virt_to_phys(copy)
}
unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
let vaddr = phys_to_virt(paddr);
let size = buffer.len();
if direction == BufferDirection::DeviceToDriver {
debug!(
"Copying VirtIO buffer back from {:#x} to {:#x}.",
paddr,
buffer.as_ptr() as *mut u8 as usize
);
unsafe {
copy_nonoverlapping(vaddr.as_ptr(), buffer.as_ptr() as *mut u8, size);
}
}
// Unshare and deallocate the shared copy of the buffer.
debug!("Unsharing VirtIO buffer {:#x}", paddr);
// Safe because the memory was allocated by `share` using `alloc_shared`, and the size is
// the same as was used then.
unsafe {
dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO buffer with host");
}
}
}
/// Returns true if `inner` is entirely contained within `outer`.
fn contains_range(outer: &Range<u32>, inner: &Range<u32>) -> bool {
inner.start >= outer.start && inner.end <= outer.end
}