blob: 5f70b337864154f407179a0b24fa5be0f67c0274 [file] [log] [blame]
Andrew Walbranb398fc82023-01-24 14:45:46 +00001use super::pci::PCI_INFO;
Andrew Walbran272bd7a2023-01-24 14:02:36 +00002use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys};
Andrew Walbranb398fc82023-01-24 14:45:46 +00003use core::{
4 ops::Range,
5 ptr::{copy_nonoverlapping, NonNull},
6};
Andrew Walbran848decf2022-12-15 14:39:38 +00007use log::debug;
Andrew Walbran272bd7a2023-01-24 14:02:36 +00008use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
Andrew Walbran848decf2022-12-15 14:39:38 +00009
10pub struct HalImpl;
11
12impl Hal for HalImpl {
Andrew Walbran272bd7a2023-01-24 14:02:36 +000013 fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
Andrew Walbran848decf2022-12-15 14:39:38 +000014 debug!("dma_alloc: pages={}", pages);
15 let size = pages * PAGE_SIZE;
Andrew Walbran272bd7a2023-01-24 14:02:36 +000016 let vaddr =
17 alloc_shared(size).expect("Failed to allocate and share VirtIO DMA range with host");
18 let paddr = virt_to_phys(vaddr);
19 (paddr, vaddr)
Andrew Walbran848decf2022-12-15 14:39:38 +000020 }
21
Andrew Walbran272bd7a2023-01-24 14:02:36 +000022 fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
Andrew Walbran848decf2022-12-15 14:39:38 +000023 debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
Andrew Walbran848decf2022-12-15 14:39:38 +000024 let size = pages * PAGE_SIZE;
25 // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
26 // the layout is the same as was used then.
27 unsafe {
28 dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO DMA range with host");
29 }
30 0
31 }
32
Andrew Walbranb398fc82023-01-24 14:45:46 +000033 fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
34 let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised");
35 // Check that the region is within the PCI MMIO range that we read from the device tree. If
36 // not, the host is probably trying to do something malicious.
37 if !contains_range(
38 &pci_info.bar_range,
39 &(paddr.try_into().expect("PCI MMIO region start was outside of 32-bit address space")
40 ..paddr
41 .checked_add(size)
42 .expect("PCI MMIO region end overflowed")
43 .try_into()
44 .expect("PCI MMIO region end was outside of 32-bit address space")),
45 ) {
46 panic!("PCI MMIO region was outside of expected BAR range.");
47 }
Andrew Walbran272bd7a2023-01-24 14:02:36 +000048 phys_to_virt(paddr)
Andrew Walbran848decf2022-12-15 14:39:38 +000049 }
50
51 fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
52 let size = buffer.len();
53
54 // TODO: Copy to a pre-shared region rather than allocating and sharing each time.
55 // Allocate a range of pages, copy the buffer if necessary, and share the new range instead.
56 let copy =
57 alloc_shared(size).expect("Failed to allocate and share VirtIO buffer with host");
58 if direction == BufferDirection::DriverToDevice {
59 unsafe {
60 copy_nonoverlapping(buffer.as_ptr() as *mut u8, copy.as_ptr(), size);
61 }
62 }
Andrew Walbran272bd7a2023-01-24 14:02:36 +000063 virt_to_phys(copy)
Andrew Walbran848decf2022-12-15 14:39:38 +000064 }
65
66 fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
Andrew Walbran272bd7a2023-01-24 14:02:36 +000067 let vaddr = phys_to_virt(paddr);
Andrew Walbran848decf2022-12-15 14:39:38 +000068 let size = buffer.len();
69 if direction == BufferDirection::DeviceToDriver {
70 debug!(
71 "Copying VirtIO buffer back from {:#x} to {:#x}.",
72 paddr,
73 buffer.as_ptr() as *mut u8 as usize
74 );
75 unsafe {
Andrew Walbran272bd7a2023-01-24 14:02:36 +000076 copy_nonoverlapping(vaddr.as_ptr(), buffer.as_ptr() as *mut u8, size);
Andrew Walbran848decf2022-12-15 14:39:38 +000077 }
78 }
79
80 // Unshare and deallocate the shared copy of the buffer.
81 debug!("Unsharing VirtIO buffer {:#x}", paddr);
82 // Safe because the memory was allocated by `share` using `alloc_shared`, and the size is
83 // the same as was used then.
84 unsafe {
85 dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO buffer with host");
86 }
87 }
88}
Andrew Walbranb398fc82023-01-24 14:45:46 +000089
90/// Returns true if `inner` is entirely contained within `outer`.
91fn contains_range(outer: &Range<u32>, inner: &Range<u32>) -> bool {
92 inner.start >= outer.start && inner.end <= outer.end
93}