blob: 7598a556711a2a91ea59526b5e69058b2554c698 [file] [log] [blame]
Andrew Walbranb398fc82023-01-24 14:45:46 +00001use super::pci::PCI_INFO;
Andrew Walbran272bd7a2023-01-24 14:02:36 +00002use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys};
Andrew Walbranb398fc82023-01-24 14:45:46 +00003use core::{
4 ops::Range,
5 ptr::{copy_nonoverlapping, NonNull},
6};
Andrew Walbran848decf2022-12-15 14:39:38 +00007use log::debug;
Andrew Walbran272bd7a2023-01-24 14:02:36 +00008use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
Andrew Walbran848decf2022-12-15 14:39:38 +00009
10pub struct HalImpl;
11
Alice Wang63e0d0d2023-04-20 14:15:32 +000012/// Implements the `Hal` trait for `HalImpl`.
13///
14/// # Safety
15///
16/// Callers of this implementatation must follow the safety requirements documented for the unsafe
17/// methods.
18unsafe impl Hal for HalImpl {
19 /// Allocates the given number of contiguous physical pages of DMA memory for VirtIO use.
20 ///
21 /// # Implementation Safety
22 ///
23 /// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
24 /// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
25 /// block of memory using `alloc_shared` and returning a non-null pointer to it that is
26 /// aligned to `PAGE_SIZE`.
Andrew Walbran272bd7a2023-01-24 14:02:36 +000027 fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
Andrew Walbran848decf2022-12-15 14:39:38 +000028 debug!("dma_alloc: pages={}", pages);
29 let size = pages * PAGE_SIZE;
Andrew Walbran272bd7a2023-01-24 14:02:36 +000030 let vaddr =
31 alloc_shared(size).expect("Failed to allocate and share VirtIO DMA range with host");
32 let paddr = virt_to_phys(vaddr);
33 (paddr, vaddr)
Andrew Walbran848decf2022-12-15 14:39:38 +000034 }
35
Alice Wang63e0d0d2023-04-20 14:15:32 +000036 unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
Andrew Walbran848decf2022-12-15 14:39:38 +000037 debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
Andrew Walbran848decf2022-12-15 14:39:38 +000038 let size = pages * PAGE_SIZE;
39 // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
40 // the layout is the same as was used then.
41 unsafe {
42 dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO DMA range with host");
43 }
44 0
45 }
46
Alice Wang63e0d0d2023-04-20 14:15:32 +000047 /// Converts a physical address used for MMIO to a virtual address which the driver can access.
48 ///
49 /// # Implementation Safety
50 ///
51 /// `mmio_phys_to_virt` satisfies the requirement by checking that the mapped memory region
52 /// is within the PCI MMIO range.
53 unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
Andrew Walbranb398fc82023-01-24 14:45:46 +000054 let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised");
55 // Check that the region is within the PCI MMIO range that we read from the device tree. If
56 // not, the host is probably trying to do something malicious.
57 if !contains_range(
58 &pci_info.bar_range,
59 &(paddr.try_into().expect("PCI MMIO region start was outside of 32-bit address space")
60 ..paddr
61 .checked_add(size)
62 .expect("PCI MMIO region end overflowed")
63 .try_into()
64 .expect("PCI MMIO region end was outside of 32-bit address space")),
65 ) {
66 panic!("PCI MMIO region was outside of expected BAR range.");
67 }
Andrew Walbran272bd7a2023-01-24 14:02:36 +000068 phys_to_virt(paddr)
Andrew Walbran848decf2022-12-15 14:39:38 +000069 }
70
Alice Wang63e0d0d2023-04-20 14:15:32 +000071 unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
Andrew Walbran848decf2022-12-15 14:39:38 +000072 let size = buffer.len();
73
74 // TODO: Copy to a pre-shared region rather than allocating and sharing each time.
75 // Allocate a range of pages, copy the buffer if necessary, and share the new range instead.
76 let copy =
77 alloc_shared(size).expect("Failed to allocate and share VirtIO buffer with host");
78 if direction == BufferDirection::DriverToDevice {
79 unsafe {
80 copy_nonoverlapping(buffer.as_ptr() as *mut u8, copy.as_ptr(), size);
81 }
82 }
Andrew Walbran272bd7a2023-01-24 14:02:36 +000083 virt_to_phys(copy)
Andrew Walbran848decf2022-12-15 14:39:38 +000084 }
85
Alice Wang63e0d0d2023-04-20 14:15:32 +000086 unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
Andrew Walbran272bd7a2023-01-24 14:02:36 +000087 let vaddr = phys_to_virt(paddr);
Andrew Walbran848decf2022-12-15 14:39:38 +000088 let size = buffer.len();
89 if direction == BufferDirection::DeviceToDriver {
90 debug!(
91 "Copying VirtIO buffer back from {:#x} to {:#x}.",
92 paddr,
93 buffer.as_ptr() as *mut u8 as usize
94 );
95 unsafe {
Andrew Walbran272bd7a2023-01-24 14:02:36 +000096 copy_nonoverlapping(vaddr.as_ptr(), buffer.as_ptr() as *mut u8, size);
Andrew Walbran848decf2022-12-15 14:39:38 +000097 }
98 }
99
100 // Unshare and deallocate the shared copy of the buffer.
101 debug!("Unsharing VirtIO buffer {:#x}", paddr);
102 // Safe because the memory was allocated by `share` using `alloc_shared`, and the size is
103 // the same as was used then.
104 unsafe {
105 dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO buffer with host");
106 }
107 }
108}
Andrew Walbranb398fc82023-01-24 14:45:46 +0000109
110/// Returns true if `inner` is entirely contained within `outer`.
111fn contains_range(outer: &Range<u32>, inner: &Range<u32>) -> bool {
112 inner.start >= outer.start && inner.end <= outer.end
113}