blob: c6c7a994bb94f81e9e6c2a8aa3f0ea0484da8027 [file] [log] [blame]
Andrew Walbran272bd7a2023-01-24 14:02:36 +00001use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys};
Andrew Walbran848decf2022-12-15 14:39:38 +00002use core::ptr::{copy_nonoverlapping, NonNull};
3use log::debug;
Andrew Walbran272bd7a2023-01-24 14:02:36 +00004use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
Andrew Walbran848decf2022-12-15 14:39:38 +00005
6pub struct HalImpl;
7
8impl Hal for HalImpl {
Andrew Walbran272bd7a2023-01-24 14:02:36 +00009 fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
Andrew Walbran848decf2022-12-15 14:39:38 +000010 debug!("dma_alloc: pages={}", pages);
11 let size = pages * PAGE_SIZE;
Andrew Walbran272bd7a2023-01-24 14:02:36 +000012 let vaddr =
13 alloc_shared(size).expect("Failed to allocate and share VirtIO DMA range with host");
14 let paddr = virt_to_phys(vaddr);
15 (paddr, vaddr)
Andrew Walbran848decf2022-12-15 14:39:38 +000016 }
17
Andrew Walbran272bd7a2023-01-24 14:02:36 +000018 fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
Andrew Walbran848decf2022-12-15 14:39:38 +000019 debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
Andrew Walbran848decf2022-12-15 14:39:38 +000020 let size = pages * PAGE_SIZE;
21 // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
22 // the layout is the same as was used then.
23 unsafe {
24 dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO DMA range with host");
25 }
26 0
27 }
28
Andrew Walbran272bd7a2023-01-24 14:02:36 +000029 fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
30 phys_to_virt(paddr)
Andrew Walbran848decf2022-12-15 14:39:38 +000031 }
32
33 fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
34 let size = buffer.len();
35
36 // TODO: Copy to a pre-shared region rather than allocating and sharing each time.
37 // Allocate a range of pages, copy the buffer if necessary, and share the new range instead.
38 let copy =
39 alloc_shared(size).expect("Failed to allocate and share VirtIO buffer with host");
40 if direction == BufferDirection::DriverToDevice {
41 unsafe {
42 copy_nonoverlapping(buffer.as_ptr() as *mut u8, copy.as_ptr(), size);
43 }
44 }
Andrew Walbran272bd7a2023-01-24 14:02:36 +000045 virt_to_phys(copy)
Andrew Walbran848decf2022-12-15 14:39:38 +000046 }
47
48 fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
Andrew Walbran272bd7a2023-01-24 14:02:36 +000049 let vaddr = phys_to_virt(paddr);
Andrew Walbran848decf2022-12-15 14:39:38 +000050 let size = buffer.len();
51 if direction == BufferDirection::DeviceToDriver {
52 debug!(
53 "Copying VirtIO buffer back from {:#x} to {:#x}.",
54 paddr,
55 buffer.as_ptr() as *mut u8 as usize
56 );
57 unsafe {
Andrew Walbran272bd7a2023-01-24 14:02:36 +000058 copy_nonoverlapping(vaddr.as_ptr(), buffer.as_ptr() as *mut u8, size);
Andrew Walbran848decf2022-12-15 14:39:38 +000059 }
60 }
61
62 // Unshare and deallocate the shared copy of the buffer.
63 debug!("Unsharing VirtIO buffer {:#x}", paddr);
64 // Safe because the memory was allocated by `share` using `alloc_shared`, and the size is
65 // the same as was used then.
66 unsafe {
67 dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO buffer with host");
68 }
69 }
70}