Andrew Walbran | b398fc8 | 2023-01-24 14:45:46 +0000 | [diff] [blame] | 1 | use super::pci::PCI_INFO; |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 2 | use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys}; |
Andrew Walbran | b398fc8 | 2023-01-24 14:45:46 +0000 | [diff] [blame] | 3 | use core::{ |
| 4 | ops::Range, |
| 5 | ptr::{copy_nonoverlapping, NonNull}, |
| 6 | }; |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 7 | use log::debug; |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 8 | use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE}; |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 9 | |
| 10 | pub struct HalImpl; |
| 11 | |
| 12 | impl Hal for HalImpl { |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 13 | fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) { |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 14 | debug!("dma_alloc: pages={}", pages); |
| 15 | let size = pages * PAGE_SIZE; |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 16 | let vaddr = |
| 17 | alloc_shared(size).expect("Failed to allocate and share VirtIO DMA range with host"); |
| 18 | let paddr = virt_to_phys(vaddr); |
| 19 | (paddr, vaddr) |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 20 | } |
| 21 | |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 22 | fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 { |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 23 | debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages); |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 24 | let size = pages * PAGE_SIZE; |
| 25 | // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and |
| 26 | // the layout is the same as was used then. |
| 27 | unsafe { |
| 28 | dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO DMA range with host"); |
| 29 | } |
| 30 | 0 |
| 31 | } |
| 32 | |
Andrew Walbran | b398fc8 | 2023-01-24 14:45:46 +0000 | [diff] [blame] | 33 | fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> { |
| 34 | let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised"); |
| 35 | // Check that the region is within the PCI MMIO range that we read from the device tree. If |
| 36 | // not, the host is probably trying to do something malicious. |
| 37 | if !contains_range( |
| 38 | &pci_info.bar_range, |
| 39 | &(paddr.try_into().expect("PCI MMIO region start was outside of 32-bit address space") |
| 40 | ..paddr |
| 41 | .checked_add(size) |
| 42 | .expect("PCI MMIO region end overflowed") |
| 43 | .try_into() |
| 44 | .expect("PCI MMIO region end was outside of 32-bit address space")), |
| 45 | ) { |
| 46 | panic!("PCI MMIO region was outside of expected BAR range."); |
| 47 | } |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 48 | phys_to_virt(paddr) |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 49 | } |
| 50 | |
| 51 | fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr { |
| 52 | let size = buffer.len(); |
| 53 | |
| 54 | // TODO: Copy to a pre-shared region rather than allocating and sharing each time. |
| 55 | // Allocate a range of pages, copy the buffer if necessary, and share the new range instead. |
| 56 | let copy = |
| 57 | alloc_shared(size).expect("Failed to allocate and share VirtIO buffer with host"); |
| 58 | if direction == BufferDirection::DriverToDevice { |
| 59 | unsafe { |
| 60 | copy_nonoverlapping(buffer.as_ptr() as *mut u8, copy.as_ptr(), size); |
| 61 | } |
| 62 | } |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 63 | virt_to_phys(copy) |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) { |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 67 | let vaddr = phys_to_virt(paddr); |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 68 | let size = buffer.len(); |
| 69 | if direction == BufferDirection::DeviceToDriver { |
| 70 | debug!( |
| 71 | "Copying VirtIO buffer back from {:#x} to {:#x}.", |
| 72 | paddr, |
| 73 | buffer.as_ptr() as *mut u8 as usize |
| 74 | ); |
| 75 | unsafe { |
Andrew Walbran | 272bd7a | 2023-01-24 14:02:36 +0000 | [diff] [blame] | 76 | copy_nonoverlapping(vaddr.as_ptr(), buffer.as_ptr() as *mut u8, size); |
Andrew Walbran | 848decf | 2022-12-15 14:39:38 +0000 | [diff] [blame] | 77 | } |
| 78 | } |
| 79 | |
| 80 | // Unshare and deallocate the shared copy of the buffer. |
| 81 | debug!("Unsharing VirtIO buffer {:#x}", paddr); |
| 82 | // Safe because the memory was allocated by `share` using `alloc_shared`, and the size is |
| 83 | // the same as was used then. |
| 84 | unsafe { |
| 85 | dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO buffer with host"); |
| 86 | } |
| 87 | } |
| 88 | } |
Andrew Walbran | b398fc8 | 2023-01-24 14:45:46 +0000 | [diff] [blame] | 89 | |
| 90 | /// Returns true if `inner` is entirely contained within `outer`. |
| 91 | fn contains_range(outer: &Range<u32>, inner: &Range<u32>) -> bool { |
| 92 | inner.start >= outer.start && inner.end <= outer.end |
| 93 | } |