blob: 82ea7cc8cd0ebc3e9acfa761908b11d7ae713c8d [file] [log] [blame]
Andrew Walbran8217d062022-11-22 16:56:18 +00001// Copyright 2022, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Functions to scan the PCI bus for VirtIO device and allocate BARs.
16
17use aarch64_paging::paging::MemoryRegion;
18use alloc::alloc::{alloc, dealloc, Layout};
Andrew Walbranb713baa2022-12-07 14:34:49 +000019use core::{ffi::CStr, mem::size_of};
Andrew Walbran0d8b54d2022-12-08 16:32:33 +000020use libfdt::{AddressRange, Fdt, FdtNode, Reg};
Andrew Walbran8217d062022-11-22 16:56:18 +000021use log::{debug, info};
22use virtio_drivers::{
23 pci::{
24 bus::{BarInfo, Cam, Command, DeviceFunction, MemoryBarType, PciRoot},
25 virtio_device_type, PciTransport,
26 },
27 DeviceType, Hal, PhysAddr, Transport, VirtAddr, VirtIOBlk, PAGE_SIZE,
28};
29
30/// The standard sector size of a VirtIO block device, in bytes.
Andrew Walbranb713baa2022-12-07 14:34:49 +000031const SECTOR_SIZE_BYTES: usize = 512;
32
33/// The size in sectors of the test block device we expect.
34const EXPECTED_SECTOR_COUNT: usize = 4;
Andrew Walbran8217d062022-11-22 16:56:18 +000035
36/// Finds an FDT node with compatible=pci-host-cam-generic.
37pub fn pci_node(fdt: &Fdt) -> FdtNode {
38 fdt.compatible_nodes(CStr::from_bytes_with_nul(b"pci-host-cam-generic\0").unwrap())
39 .unwrap()
40 .next()
41 .unwrap()
42}
43
44pub fn check_pci(reg: Reg<u64>, allocator: &mut PciMemory32Allocator) {
45 let mut pci_root = unsafe { PciRoot::new(reg.addr as *mut u8, Cam::MmioCam) };
Andrew Walbranb713baa2022-12-07 14:34:49 +000046 let mut checked_virtio_device_count = 0;
Andrew Walbran8217d062022-11-22 16:56:18 +000047 for (device_function, info) in pci_root.enumerate_bus(0) {
48 let (status, command) = pci_root.get_status_command(device_function);
49 info!("Found {} at {}, status {:?} command {:?}", info, device_function, status, command);
50 if let Some(virtio_type) = virtio_device_type(&info) {
51 info!(" VirtIO {:?}", virtio_type);
52 allocate_bars(&mut pci_root, device_function, allocator);
53 let mut transport =
54 PciTransport::new::<HalImpl>(&mut pci_root, device_function).unwrap();
55 info!(
56 "Detected virtio PCI device with device type {:?}, features {:#018x}",
57 transport.device_type(),
58 transport.read_device_features(),
59 );
Andrew Walbranb713baa2022-12-07 14:34:49 +000060 if check_virtio_device(transport, virtio_type) {
61 checked_virtio_device_count += 1;
62 }
Andrew Walbran8217d062022-11-22 16:56:18 +000063 }
64 }
Andrew Walbranb713baa2022-12-07 14:34:49 +000065
66 assert_eq!(checked_virtio_device_count, 1);
Andrew Walbran8217d062022-11-22 16:56:18 +000067}
68
Andrew Walbranb713baa2022-12-07 14:34:49 +000069/// Checks the given VirtIO device, if we know how to.
70///
71/// Returns true if the device was checked, or false if it was ignored.
72fn check_virtio_device(transport: impl Transport, device_type: DeviceType) -> bool {
Andrew Walbran8217d062022-11-22 16:56:18 +000073 if device_type == DeviceType::Block {
Andrew Walbranb713baa2022-12-07 14:34:49 +000074 let mut blk = VirtIOBlk::<HalImpl, _>::new(transport).expect("failed to create blk driver");
75 info!("Found {} KiB block device.", blk.capacity() * SECTOR_SIZE_BYTES as u64 / 1024);
76 assert_eq!(blk.capacity(), EXPECTED_SECTOR_COUNT as u64);
77 let mut data = [0; SECTOR_SIZE_BYTES * EXPECTED_SECTOR_COUNT];
78 for i in 0..EXPECTED_SECTOR_COUNT {
79 blk.read_block(i, &mut data[i * SECTOR_SIZE_BYTES..(i + 1) * SECTOR_SIZE_BYTES])
80 .expect("Failed to read block device.");
81 }
82 for (i, chunk) in data.chunks(size_of::<u32>()).enumerate() {
83 assert_eq!(chunk, &(i as u32).to_le_bytes());
84 }
85 info!("Read expected data from block device.");
86 true
87 } else {
88 false
Andrew Walbran8217d062022-11-22 16:56:18 +000089 }
90}
91
92#[derive(Copy, Clone, Debug, Eq, PartialEq)]
93struct PciMemoryFlags(u32);
94
95impl PciMemoryFlags {
96 pub fn prefetchable(self) -> bool {
97 self.0 & 0x80000000 != 0
98 }
99
100 pub fn range_type(self) -> PciRangeType {
101 PciRangeType::from((self.0 & 0x3000000) >> 24)
102 }
103}
104
105/// Allocates 32-bit memory addresses for PCI BARs.
106pub struct PciMemory32Allocator {
107 start: u32,
108 end: u32,
109}
110
111impl PciMemory32Allocator {
112 /// Creates a new allocator based on the ranges property of the given PCI node.
113 pub fn for_pci_ranges(pci_node: &FdtNode) -> Self {
114 let mut memory_32_address = 0;
115 let mut memory_32_size = 0;
Andrew Walbran0d8b54d2022-12-08 16:32:33 +0000116 for AddressRange { addr: (flags, bus_address), parent_addr: cpu_physical, size } in pci_node
117 .ranges::<(u32, u64), u64, u64>()
Andrew Walbran8217d062022-11-22 16:56:18 +0000118 .expect("Error getting ranges property from PCI node")
119 .expect("PCI node missing ranges property.")
120 {
Andrew Walbran0d8b54d2022-12-08 16:32:33 +0000121 let flags = PciMemoryFlags(flags);
Andrew Walbran8217d062022-11-22 16:56:18 +0000122 let prefetchable = flags.prefetchable();
123 let range_type = flags.range_type();
Andrew Walbran8217d062022-11-22 16:56:18 +0000124 info!(
125 "range: {:?} {}prefetchable bus address: {:#018x} host physical address: {:#018x} size: {:#018x}",
126 range_type,
127 if prefetchable { "" } else { "non-" },
128 bus_address,
129 cpu_physical,
130 size,
131 );
132 if !prefetchable
133 && ((range_type == PciRangeType::Memory32 && size > memory_32_size.into())
134 || (range_type == PciRangeType::Memory64
135 && size > memory_32_size.into()
136 && bus_address + size < u32::MAX.into()))
137 {
138 // Use the 64-bit range for 32-bit memory, if it is low enough.
139 assert_eq!(bus_address, cpu_physical);
140 memory_32_address = u32::try_from(cpu_physical).unwrap();
141 memory_32_size = u32::try_from(size).unwrap();
142 }
143 }
144 if memory_32_size == 0 {
145 panic!("No PCI memory regions found.");
146 }
147
148 Self { start: memory_32_address, end: memory_32_address + memory_32_size }
149 }
150
151 /// Gets a memory region covering the address space from which this allocator will allocate.
152 pub fn get_region(&self) -> MemoryRegion {
153 MemoryRegion::new(self.start as usize, self.end as usize)
154 }
155
156 /// Allocates a 32-bit memory address region for a PCI BAR of the given power-of-2 size.
157 ///
158 /// It will have alignment matching the size. The size must be a power of 2.
159 pub fn allocate_memory_32(&mut self, size: u32) -> Option<u32> {
160 assert!(size.is_power_of_two());
161 let allocated_address = align_up(self.start, size);
162 if allocated_address + size <= self.end {
163 self.start = allocated_address + size;
164 Some(allocated_address)
165 } else {
166 None
167 }
168 }
169}
170
171#[derive(Copy, Clone, Debug, Eq, PartialEq)]
172enum PciRangeType {
173 ConfigurationSpace,
174 IoSpace,
175 Memory32,
176 Memory64,
177}
178
179impl From<u32> for PciRangeType {
180 fn from(value: u32) -> Self {
181 match value {
182 0 => Self::ConfigurationSpace,
183 1 => Self::IoSpace,
184 2 => Self::Memory32,
185 3 => Self::Memory64,
186 _ => panic!("Tried to convert invalid range type {}", value),
187 }
188 }
189}
190
191/// Allocates appropriately-sized memory regions and assigns them to the device's BARs.
192fn allocate_bars(
193 root: &mut PciRoot,
194 device_function: DeviceFunction,
195 allocator: &mut PciMemory32Allocator,
196) {
197 let mut bar_index = 0;
198 while bar_index < 6 {
199 let info = root.bar_info(device_function, bar_index).unwrap();
200 debug!("BAR {}: {}", bar_index, info);
201 // Ignore I/O bars, as they aren't required for the VirtIO driver.
202 if let BarInfo::Memory { address_type, size, .. } = info {
203 match address_type {
204 _ if size == 0 => {}
205 MemoryBarType::Width32 => {
206 let address = allocator.allocate_memory_32(size).unwrap();
207 debug!("Allocated address {:#010x}", address);
208 root.set_bar_32(device_function, bar_index, address);
209 }
210 MemoryBarType::Width64 => {
211 let address = allocator.allocate_memory_32(size).unwrap();
212 debug!("Allocated address {:#010x}", address);
213 root.set_bar_64(device_function, bar_index, address.into());
214 }
215 _ => panic!("Memory BAR address type {:?} not supported.", address_type),
216 }
217 }
218
219 bar_index += 1;
220 if info.takes_two_entries() {
221 bar_index += 1;
222 }
223 }
224
225 // Enable the device to use its BARs.
226 root.set_command(
227 device_function,
228 Command::IO_SPACE | Command::MEMORY_SPACE | Command::BUS_MASTER,
229 );
230 let (status, command) = root.get_status_command(device_function);
231 debug!("Allocated BARs and enabled device, status {:?} command {:?}", status, command);
232}
233
234const fn align_up(value: u32, alignment: u32) -> u32 {
235 ((value - 1) | (alignment - 1)) + 1
236}
237
238struct HalImpl;
239
240impl Hal for HalImpl {
241 fn dma_alloc(pages: usize) -> PhysAddr {
242 debug!("dma_alloc: pages={}", pages);
243 let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
244 // Safe because the layout has a non-zero size.
245 let vaddr = unsafe { alloc(layout) } as VirtAddr;
246 Self::virt_to_phys(vaddr)
247 }
248
249 fn dma_dealloc(paddr: PhysAddr, pages: usize) -> i32 {
250 debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
251 let vaddr = Self::phys_to_virt(paddr);
252 let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
253 // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
254 // the layout is the same as was used then.
255 unsafe {
256 dealloc(vaddr as *mut u8, layout);
257 }
258 0
259 }
260
261 fn phys_to_virt(paddr: PhysAddr) -> VirtAddr {
262 paddr
263 }
264
265 fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
266 vaddr
267 }
268}