blob: 10a67b964996fdb52991f315d7c88b7abd6e6735 [file] [log] [blame]
Andrew Walbran8217d062022-11-22 16:56:18 +00001// Copyright 2022, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Functions to scan the PCI bus for VirtIO device and allocate BARs.
16
17use aarch64_paging::paging::MemoryRegion;
18use alloc::alloc::{alloc, dealloc, Layout};
Andrew Walbranb713baa2022-12-07 14:34:49 +000019use core::{ffi::CStr, mem::size_of};
Andrew Walbran8217d062022-11-22 16:56:18 +000020use libfdt::{Fdt, FdtNode, Reg};
21use log::{debug, info};
22use virtio_drivers::{
23 pci::{
24 bus::{BarInfo, Cam, Command, DeviceFunction, MemoryBarType, PciRoot},
25 virtio_device_type, PciTransport,
26 },
27 DeviceType, Hal, PhysAddr, Transport, VirtAddr, VirtIOBlk, PAGE_SIZE,
28};
29
30/// The standard sector size of a VirtIO block device, in bytes.
Andrew Walbranb713baa2022-12-07 14:34:49 +000031const SECTOR_SIZE_BYTES: usize = 512;
32
33/// The size in sectors of the test block device we expect.
34const EXPECTED_SECTOR_COUNT: usize = 4;
Andrew Walbran8217d062022-11-22 16:56:18 +000035
36/// Finds an FDT node with compatible=pci-host-cam-generic.
37pub fn pci_node(fdt: &Fdt) -> FdtNode {
38 fdt.compatible_nodes(CStr::from_bytes_with_nul(b"pci-host-cam-generic\0").unwrap())
39 .unwrap()
40 .next()
41 .unwrap()
42}
43
44pub fn check_pci(reg: Reg<u64>, allocator: &mut PciMemory32Allocator) {
45 let mut pci_root = unsafe { PciRoot::new(reg.addr as *mut u8, Cam::MmioCam) };
Andrew Walbranb713baa2022-12-07 14:34:49 +000046 let mut checked_virtio_device_count = 0;
Andrew Walbran8217d062022-11-22 16:56:18 +000047 for (device_function, info) in pci_root.enumerate_bus(0) {
48 let (status, command) = pci_root.get_status_command(device_function);
49 info!("Found {} at {}, status {:?} command {:?}", info, device_function, status, command);
50 if let Some(virtio_type) = virtio_device_type(&info) {
51 info!(" VirtIO {:?}", virtio_type);
52 allocate_bars(&mut pci_root, device_function, allocator);
53 let mut transport =
54 PciTransport::new::<HalImpl>(&mut pci_root, device_function).unwrap();
55 info!(
56 "Detected virtio PCI device with device type {:?}, features {:#018x}",
57 transport.device_type(),
58 transport.read_device_features(),
59 );
Andrew Walbranb713baa2022-12-07 14:34:49 +000060 if check_virtio_device(transport, virtio_type) {
61 checked_virtio_device_count += 1;
62 }
Andrew Walbran8217d062022-11-22 16:56:18 +000063 }
64 }
Andrew Walbranb713baa2022-12-07 14:34:49 +000065
66 assert_eq!(checked_virtio_device_count, 1);
Andrew Walbran8217d062022-11-22 16:56:18 +000067}
68
Andrew Walbranb713baa2022-12-07 14:34:49 +000069/// Checks the given VirtIO device, if we know how to.
70///
71/// Returns true if the device was checked, or false if it was ignored.
72fn check_virtio_device(transport: impl Transport, device_type: DeviceType) -> bool {
Andrew Walbran8217d062022-11-22 16:56:18 +000073 if device_type == DeviceType::Block {
Andrew Walbranb713baa2022-12-07 14:34:49 +000074 let mut blk = VirtIOBlk::<HalImpl, _>::new(transport).expect("failed to create blk driver");
75 info!("Found {} KiB block device.", blk.capacity() * SECTOR_SIZE_BYTES as u64 / 1024);
76 assert_eq!(blk.capacity(), EXPECTED_SECTOR_COUNT as u64);
77 let mut data = [0; SECTOR_SIZE_BYTES * EXPECTED_SECTOR_COUNT];
78 for i in 0..EXPECTED_SECTOR_COUNT {
79 blk.read_block(i, &mut data[i * SECTOR_SIZE_BYTES..(i + 1) * SECTOR_SIZE_BYTES])
80 .expect("Failed to read block device.");
81 }
82 for (i, chunk) in data.chunks(size_of::<u32>()).enumerate() {
83 assert_eq!(chunk, &(i as u32).to_le_bytes());
84 }
85 info!("Read expected data from block device.");
86 true
87 } else {
88 false
Andrew Walbran8217d062022-11-22 16:56:18 +000089 }
90}
91
92#[derive(Copy, Clone, Debug, Eq, PartialEq)]
93struct PciMemoryFlags(u32);
94
95impl PciMemoryFlags {
96 pub fn prefetchable(self) -> bool {
97 self.0 & 0x80000000 != 0
98 }
99
100 pub fn range_type(self) -> PciRangeType {
101 PciRangeType::from((self.0 & 0x3000000) >> 24)
102 }
103}
104
105/// Allocates 32-bit memory addresses for PCI BARs.
106pub struct PciMemory32Allocator {
107 start: u32,
108 end: u32,
109}
110
111impl PciMemory32Allocator {
112 /// Creates a new allocator based on the ranges property of the given PCI node.
113 pub fn for_pci_ranges(pci_node: &FdtNode) -> Self {
114 let mut memory_32_address = 0;
115 let mut memory_32_size = 0;
116 for range in pci_node
117 .ranges::<u128, u64, u64>()
118 .expect("Error getting ranges property from PCI node")
119 .expect("PCI node missing ranges property.")
120 {
121 let flags = PciMemoryFlags((range.addr >> 64) as u32);
122 let prefetchable = flags.prefetchable();
123 let range_type = flags.range_type();
124 let bus_address = range.addr as u64;
125 let cpu_physical = range.parent_addr;
126 let size = range.size;
127 info!(
128 "range: {:?} {}prefetchable bus address: {:#018x} host physical address: {:#018x} size: {:#018x}",
129 range_type,
130 if prefetchable { "" } else { "non-" },
131 bus_address,
132 cpu_physical,
133 size,
134 );
135 if !prefetchable
136 && ((range_type == PciRangeType::Memory32 && size > memory_32_size.into())
137 || (range_type == PciRangeType::Memory64
138 && size > memory_32_size.into()
139 && bus_address + size < u32::MAX.into()))
140 {
141 // Use the 64-bit range for 32-bit memory, if it is low enough.
142 assert_eq!(bus_address, cpu_physical);
143 memory_32_address = u32::try_from(cpu_physical).unwrap();
144 memory_32_size = u32::try_from(size).unwrap();
145 }
146 }
147 if memory_32_size == 0 {
148 panic!("No PCI memory regions found.");
149 }
150
151 Self { start: memory_32_address, end: memory_32_address + memory_32_size }
152 }
153
154 /// Gets a memory region covering the address space from which this allocator will allocate.
155 pub fn get_region(&self) -> MemoryRegion {
156 MemoryRegion::new(self.start as usize, self.end as usize)
157 }
158
159 /// Allocates a 32-bit memory address region for a PCI BAR of the given power-of-2 size.
160 ///
161 /// It will have alignment matching the size. The size must be a power of 2.
162 pub fn allocate_memory_32(&mut self, size: u32) -> Option<u32> {
163 assert!(size.is_power_of_two());
164 let allocated_address = align_up(self.start, size);
165 if allocated_address + size <= self.end {
166 self.start = allocated_address + size;
167 Some(allocated_address)
168 } else {
169 None
170 }
171 }
172}
173
174#[derive(Copy, Clone, Debug, Eq, PartialEq)]
175enum PciRangeType {
176 ConfigurationSpace,
177 IoSpace,
178 Memory32,
179 Memory64,
180}
181
182impl From<u32> for PciRangeType {
183 fn from(value: u32) -> Self {
184 match value {
185 0 => Self::ConfigurationSpace,
186 1 => Self::IoSpace,
187 2 => Self::Memory32,
188 3 => Self::Memory64,
189 _ => panic!("Tried to convert invalid range type {}", value),
190 }
191 }
192}
193
194/// Allocates appropriately-sized memory regions and assigns them to the device's BARs.
195fn allocate_bars(
196 root: &mut PciRoot,
197 device_function: DeviceFunction,
198 allocator: &mut PciMemory32Allocator,
199) {
200 let mut bar_index = 0;
201 while bar_index < 6 {
202 let info = root.bar_info(device_function, bar_index).unwrap();
203 debug!("BAR {}: {}", bar_index, info);
204 // Ignore I/O bars, as they aren't required for the VirtIO driver.
205 if let BarInfo::Memory { address_type, size, .. } = info {
206 match address_type {
207 _ if size == 0 => {}
208 MemoryBarType::Width32 => {
209 let address = allocator.allocate_memory_32(size).unwrap();
210 debug!("Allocated address {:#010x}", address);
211 root.set_bar_32(device_function, bar_index, address);
212 }
213 MemoryBarType::Width64 => {
214 let address = allocator.allocate_memory_32(size).unwrap();
215 debug!("Allocated address {:#010x}", address);
216 root.set_bar_64(device_function, bar_index, address.into());
217 }
218 _ => panic!("Memory BAR address type {:?} not supported.", address_type),
219 }
220 }
221
222 bar_index += 1;
223 if info.takes_two_entries() {
224 bar_index += 1;
225 }
226 }
227
228 // Enable the device to use its BARs.
229 root.set_command(
230 device_function,
231 Command::IO_SPACE | Command::MEMORY_SPACE | Command::BUS_MASTER,
232 );
233 let (status, command) = root.get_status_command(device_function);
234 debug!("Allocated BARs and enabled device, status {:?} command {:?}", status, command);
235}
236
237const fn align_up(value: u32, alignment: u32) -> u32 {
238 ((value - 1) | (alignment - 1)) + 1
239}
240
241struct HalImpl;
242
243impl Hal for HalImpl {
244 fn dma_alloc(pages: usize) -> PhysAddr {
245 debug!("dma_alloc: pages={}", pages);
246 let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
247 // Safe because the layout has a non-zero size.
248 let vaddr = unsafe { alloc(layout) } as VirtAddr;
249 Self::virt_to_phys(vaddr)
250 }
251
252 fn dma_dealloc(paddr: PhysAddr, pages: usize) -> i32 {
253 debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
254 let vaddr = Self::phys_to_virt(paddr);
255 let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
256 // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
257 // the layout is the same as was used then.
258 unsafe {
259 dealloc(vaddr as *mut u8, layout);
260 }
261 0
262 }
263
264 fn phys_to_virt(paddr: PhysAddr) -> VirtAddr {
265 paddr
266 }
267
268 fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
269 vaddr
270 }
271}