Andrew Walbran | b398fc8 | 2023-01-24 14:45:46 +0000 | [diff] [blame] | 1 | use super::pci::PCI_INFO; |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 2 | use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys}; |
Andrew Walbran | b398fc8 | 2023-01-24 14:45:46 +0000 | [diff] [blame] | 3 | use core::{ |
| 4 | ops::Range, |
| 5 | ptr::{copy_nonoverlapping, NonNull}, |
| 6 | }; |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 7 | use log::debug; |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 8 | use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE}; |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 9 | |
| 10 | pub struct HalImpl; |
| 11 | |
Alice Wang | 63e0d0d | 2023-04-20 14:15:32 +0000 | [diff] [blame] | 12 | /// Implements the `Hal` trait for `HalImpl`. |
| 13 | /// |
| 14 | /// # Safety |
| 15 | /// |
| 16 | /// Callers of this implementatation must follow the safety requirements documented for the unsafe |
| 17 | /// methods. |
| 18 | unsafe impl Hal for HalImpl { |
| 19 | /// Allocates the given number of contiguous physical pages of DMA memory for VirtIO use. |
| 20 | /// |
| 21 | /// # Implementation Safety |
| 22 | /// |
| 23 | /// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or |
| 24 | /// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique |
| 25 | /// block of memory using `alloc_shared` and returning a non-null pointer to it that is |
| 26 | /// aligned to `PAGE_SIZE`. |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 27 | fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) { |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 28 | debug!("dma_alloc: pages={}", pages); |
| 29 | let size = pages * PAGE_SIZE; |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 30 | let vaddr = |
| 31 | alloc_shared(size).expect("Failed to allocate and share VirtIO DMA range with host"); |
| 32 | let paddr = virt_to_phys(vaddr); |
| 33 | (paddr, vaddr) |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 34 | } |
| 35 | |
Alice Wang | 63e0d0d | 2023-04-20 14:15:32 +0000 | [diff] [blame] | 36 | unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 { |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 37 | debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages); |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 38 | let size = pages * PAGE_SIZE; |
| 39 | // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and |
| 40 | // the layout is the same as was used then. |
| 41 | unsafe { |
| 42 | dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO DMA range with host"); |
| 43 | } |
| 44 | 0 |
| 45 | } |
| 46 | |
Alice Wang | 63e0d0d | 2023-04-20 14:15:32 +0000 | [diff] [blame] | 47 | /// Converts a physical address used for MMIO to a virtual address which the driver can access. |
| 48 | /// |
| 49 | /// # Implementation Safety |
| 50 | /// |
| 51 | /// `mmio_phys_to_virt` satisfies the requirement by checking that the mapped memory region |
| 52 | /// is within the PCI MMIO range. |
| 53 | unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> { |
Andrew Walbran | b398fc8 | 2023-01-24 14:45:46 +0000 | [diff] [blame] | 54 | let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised"); |
| 55 | // Check that the region is within the PCI MMIO range that we read from the device tree. If |
| 56 | // not, the host is probably trying to do something malicious. |
| 57 | if !contains_range( |
| 58 | &pci_info.bar_range, |
| 59 | &(paddr.try_into().expect("PCI MMIO region start was outside of 32-bit address space") |
| 60 | ..paddr |
| 61 | .checked_add(size) |
| 62 | .expect("PCI MMIO region end overflowed") |
| 63 | .try_into() |
| 64 | .expect("PCI MMIO region end was outside of 32-bit address space")), |
| 65 | ) { |
| 66 | panic!("PCI MMIO region was outside of expected BAR range."); |
| 67 | } |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 68 | phys_to_virt(paddr) |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 69 | } |
| 70 | |
Alice Wang | 63e0d0d | 2023-04-20 14:15:32 +0000 | [diff] [blame] | 71 | unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr { |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 72 | let size = buffer.len(); |
| 73 | |
| 74 | // TODO: Copy to a pre-shared region rather than allocating and sharing each time. |
| 75 | // Allocate a range of pages, copy the buffer if necessary, and share the new range instead. |
| 76 | let copy = |
| 77 | alloc_shared(size).expect("Failed to allocate and share VirtIO buffer with host"); |
| 78 | if direction == BufferDirection::DriverToDevice { |
| 79 | unsafe { |
| 80 | copy_nonoverlapping(buffer.as_ptr() as *mut u8, copy.as_ptr(), size); |
| 81 | } |
| 82 | } |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 83 | virt_to_phys(copy) |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 84 | } |
| 85 | |
Alice Wang | 63e0d0d | 2023-04-20 14:15:32 +0000 | [diff] [blame] | 86 | unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) { |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 87 | let vaddr = phys_to_virt(paddr); |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 88 | let size = buffer.len(); |
| 89 | if direction == BufferDirection::DeviceToDriver { |
| 90 | debug!( |
| 91 | "Copying VirtIO buffer back from {:#x} to {:#x}.", |
| 92 | paddr, |
| 93 | buffer.as_ptr() as *mut u8 as usize |
| 94 | ); |
| 95 | unsafe { |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 96 | copy_nonoverlapping(vaddr.as_ptr(), buffer.as_ptr() as *mut u8, size); |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 97 | } |
| 98 | } |
| 99 | |
| 100 | // Unshare and deallocate the shared copy of the buffer. |
| 101 | debug!("Unsharing VirtIO buffer {:#x}", paddr); |
| 102 | // Safe because the memory was allocated by `share` using `alloc_shared`, and the size is |
| 103 | // the same as was used then. |
| 104 | unsafe { |
| 105 | dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO buffer with host"); |
| 106 | } |
| 107 | } |
| 108 | } |
Andrew Walbran | b398fc8 | 2023-01-24 14:45:46 +0000 | [diff] [blame] | 109 | |
| 110 | /// Returns true if `inner` is entirely contained within `outer`. |
| 111 | fn contains_range(outer: &Range<u32>, inner: &Range<u32>) -> bool { |
| 112 | inner.start >= outer.start && inner.end <= outer.end |
| 113 | } |