blob: f82616797238fa249e44337f154341ad50522ccc [file] [log] [blame]
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001// Copyright 2023, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Validate device assignment written in crosvm DT with VM DTBO, and apply it
16//! to platform DT.
17//! Declared in separated libs for adding unit tests, which requires libstd.
18
19#[cfg(test)]
20extern crate alloc;
21
Jaewan Kim51ccfed2023-11-08 13:51:58 +090022use alloc::collections::{BTreeMap, BTreeSet};
Jaewan Kimc6e023b2023-10-12 15:11:05 +090023use alloc::ffi::CString;
24use alloc::fmt;
25use alloc::vec;
26use alloc::vec::Vec;
27use core::ffi::CStr;
28use core::iter::Iterator;
29use core::mem;
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +000030use core::ops::Range;
Pierre-Clément Tosie5cca922024-04-30 17:54:08 +010031// TODO(ptosi): Remove the need for this workaround.
32#[cfg(not(test))]
Jaewan Kim52477ae2023-11-21 21:20:52 +090033use hyp::DeviceAssigningHypervisor;
Jaewan Kim8f6f4662023-12-12 17:38:47 +090034use libfdt::{Fdt, FdtError, FdtNode, FdtNodeMut, Phandle, Reg};
Jaewan Kim52477ae2023-11-21 21:20:52 +090035use log::error;
Jaewan Kim8f6f4662023-12-12 17:38:47 +090036use zerocopy::byteorder::big_endian::U32;
37use zerocopy::FromBytes as _;
Jaewan Kimc6e023b2023-10-12 15:11:05 +090038
Jaewan Kimc6e023b2023-10-12 15:11:05 +090039// TODO(b/308694211): Use cstr! from vmbase instead.
40macro_rules! cstr {
41 ($str:literal) => {{
Pierre-Clément Tosid701a0b2023-11-07 15:38:59 +000042 const S: &str = concat!($str, "\0");
43 const C: &::core::ffi::CStr = match ::core::ffi::CStr::from_bytes_with_nul(S.as_bytes()) {
44 Ok(v) => v,
45 Err(_) => panic!("string contains interior NUL"),
46 };
47 C
Jaewan Kimc6e023b2023-10-12 15:11:05 +090048 }};
49}
50
Jaewan Kimc6e023b2023-10-12 15:11:05 +090051// TODO(b/277993056): Keep constants derived from platform.dts in one place.
52const CELLS_PER_INTERRUPT: usize = 3; // from /intc node in platform.dts
53
54/// Errors in device assignment.
55#[derive(Clone, Copy, Debug, Eq, PartialEq)]
56pub enum DeviceAssignmentError {
Jaewan Kim52477ae2023-11-21 21:20:52 +090057 /// Invalid VM DTBO
Jaewan Kimc6e023b2023-10-12 15:11:05 +090058 InvalidDtbo,
59 /// Invalid __symbols__
60 InvalidSymbols,
Jaewan Kim19b984f2023-12-04 15:16:50 +090061 /// Malformed <reg>. Can't parse.
62 MalformedReg,
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +000063 /// Invalid physical <reg> of assigned device.
64 InvalidPhysReg(u64, u64),
65 /// Invalid virtual <reg> of assigned device.
66 InvalidReg(u64, u64),
Jaewan Kimc6e023b2023-10-12 15:11:05 +090067 /// Invalid <interrupts>
68 InvalidInterrupts,
Jaewan Kim19b984f2023-12-04 15:16:50 +090069 /// Malformed <iommus>
70 MalformedIommus,
Jaewan Kim51ccfed2023-11-08 13:51:58 +090071 /// Invalid <iommus>
72 InvalidIommus,
Jaewan Kim19b984f2023-12-04 15:16:50 +090073 /// Invalid phys IOMMU node
74 InvalidPhysIommu,
Jaewan Kima9200492023-11-21 20:45:31 +090075 /// Invalid pvIOMMU node
76 InvalidPvIommu,
Jaewan Kim51ccfed2023-11-08 13:51:58 +090077 /// Too many pvIOMMU
78 TooManyPvIommu,
Jaewan Kim19b984f2023-12-04 15:16:50 +090079 /// Duplicated phys IOMMU IDs exist
80 DuplicatedIommuIds,
Jaewan Kim51ccfed2023-11-08 13:51:58 +090081 /// Duplicated pvIOMMU IDs exist
82 DuplicatedPvIommuIds,
Jaewan Kimf8abbb52023-12-12 22:11:39 +090083 /// Unsupported path format. Only supports full path.
84 UnsupportedPathFormat,
Jaewan Kimc6e023b2023-10-12 15:11:05 +090085 /// Unsupported overlay target syntax. Only supports <target-path> with full path.
86 UnsupportedOverlayTarget,
Jaewan Kim19b984f2023-12-04 15:16:50 +090087 /// Unsupported PhysIommu,
88 UnsupportedPhysIommu,
89 /// Unsupported (pvIOMMU id, vSID) duplication. Currently the pair should be unique.
90 UnsupportedPvIommusDuplication,
91 /// Unsupported (IOMMU token, SID) duplication. Currently the pair should be unique.
92 UnsupportedIommusDuplication,
Jaewan Kim51ccfed2023-11-08 13:51:58 +090093 /// Internal error
94 Internal,
Jaewan Kimc6e023b2023-10-12 15:11:05 +090095 /// Unexpected error from libfdt
96 UnexpectedFdtError(FdtError),
97}
98
99impl From<FdtError> for DeviceAssignmentError {
100 fn from(e: FdtError) -> Self {
101 DeviceAssignmentError::UnexpectedFdtError(e)
102 }
103}
104
105impl fmt::Display for DeviceAssignmentError {
106 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
107 match self {
108 Self::InvalidDtbo => write!(f, "Invalid DTBO"),
109 Self::InvalidSymbols => write!(
110 f,
111 "Invalid property in /__symbols__. Must point to valid assignable device node."
112 ),
Jaewan Kim19b984f2023-12-04 15:16:50 +0900113 Self::MalformedReg => write!(f, "Malformed <reg>. Can't parse"),
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000114 Self::InvalidReg(addr, size) => {
115 write!(f, "Invalid guest MMIO region (addr: {addr:#x}, size: {size:#x})")
116 }
117 Self::InvalidPhysReg(addr, size) => {
118 write!(f, "Invalid physical MMIO region (addr: {addr:#x}, size: {size:#x})")
119 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900120 Self::InvalidInterrupts => write!(f, "Invalid <interrupts>"),
Jaewan Kim19b984f2023-12-04 15:16:50 +0900121 Self::MalformedIommus => write!(f, "Malformed <iommus>. Can't parse."),
122 Self::InvalidIommus => {
123 write!(f, "Invalid <iommus>. Failed to validate with hypervisor")
124 }
125 Self::InvalidPhysIommu => write!(f, "Invalid phys IOMMU node"),
Jaewan Kima9200492023-11-21 20:45:31 +0900126 Self::InvalidPvIommu => write!(f, "Invalid pvIOMMU node"),
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900127 Self::TooManyPvIommu => write!(
128 f,
129 "Too many pvIOMMU node. Insufficient pre-populated pvIOMMUs in platform DT"
130 ),
Jaewan Kim19b984f2023-12-04 15:16:50 +0900131 Self::DuplicatedIommuIds => {
132 write!(f, "Duplicated IOMMU IDs exist. IDs must unique among iommu node")
133 }
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900134 Self::DuplicatedPvIommuIds => {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900135 write!(f, "Duplicated pvIOMMU IDs exist. IDs must unique among iommu node")
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900136 }
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900137 Self::UnsupportedPathFormat => {
138 write!(f, "Unsupported UnsupportedPathFormat. Only supports full path")
139 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900140 Self::UnsupportedOverlayTarget => {
141 write!(f, "Unsupported overlay target. Only supports 'target-path = \"/\"'")
142 }
Jaewan Kim19b984f2023-12-04 15:16:50 +0900143 Self::UnsupportedPhysIommu => {
144 write!(f, "Unsupported Phys IOMMU. Currently only supports #iommu-cells = <1>")
145 }
146 Self::UnsupportedPvIommusDuplication => {
147 write!(f, "Unsupported (pvIOMMU id, vSID) duplication. Currently the pair should be unique.")
148 }
149 Self::UnsupportedIommusDuplication => {
150 write!(f, "Unsupported (IOMMU token, SID) duplication. Currently the pair should be unique.")
151 }
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900152 Self::Internal => write!(f, "Internal error"),
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900153 Self::UnexpectedFdtError(e) => write!(f, "Unexpected Error from libfdt: {e}"),
154 }
155 }
156}
157
158pub type Result<T> = core::result::Result<T, DeviceAssignmentError>;
159
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900160#[derive(Clone, Default, Ord, PartialOrd, Eq, PartialEq)]
161pub struct DtPathTokens<'a> {
162 tokens: Vec<&'a [u8]>,
163}
164
165impl<'a> fmt::Debug for DtPathTokens<'a> {
166 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
167 let mut list = f.debug_list();
168 for token in &self.tokens {
169 let mut bytes = token.to_vec();
170 bytes.push(b'\0');
171 match CString::from_vec_with_nul(bytes) {
172 Ok(string) => list.entry(&string),
173 Err(_) => list.entry(token),
174 };
175 }
176 list.finish()
177 }
178}
179
180impl<'a> DtPathTokens<'a> {
181 fn new(path: &'a CStr) -> Result<Self> {
182 if path.to_bytes().first() != Some(&b'/') {
183 return Err(DeviceAssignmentError::UnsupportedPathFormat);
184 }
185 let tokens: Vec<_> = path
186 .to_bytes()
187 .split(|char| *char == b'/')
188 .filter(|&component| !component.is_empty())
189 .collect();
190 Ok(Self { tokens })
191 }
192
193 fn to_overlay_target_path(&self) -> Result<Self> {
194 if !self.is_overlayable_node() {
195 return Err(DeviceAssignmentError::InvalidDtbo);
196 }
197 Ok(Self { tokens: self.tokens.as_slice()[2..].to_vec() })
198 }
199
200 fn to_cstring(&self) -> CString {
201 if self.tokens.is_empty() {
202 return CString::new(*b"/\0").unwrap();
203 }
204
205 let size = self.tokens.iter().fold(0, |sum, token| sum + token.len() + 1);
206 let mut path = Vec::with_capacity(size + 1);
207 for token in &self.tokens {
208 path.push(b'/');
209 path.extend_from_slice(token);
210 }
211 path.push(b'\0');
212
213 CString::from_vec_with_nul(path).unwrap()
214 }
215
216 fn is_overlayable_node(&self) -> bool {
217 self.tokens.get(1) == Some(&&b"__overlay__"[..])
218 }
219}
220
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900221#[derive(Debug, Eq, PartialEq)]
222enum DeviceTreeChildrenMask {
223 Partial(Vec<DeviceTreeMask>),
224 All,
225}
226
227#[derive(Eq, PartialEq)]
228struct DeviceTreeMask {
229 name_bytes: Vec<u8>,
230 children: DeviceTreeChildrenMask,
231}
232
233impl fmt::Debug for DeviceTreeMask {
234 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
235 let name_bytes = [self.name_bytes.as_slice(), b"\0"].concat();
236
237 f.debug_struct("DeviceTreeMask")
238 .field("name", &CStr::from_bytes_with_nul(&name_bytes).unwrap())
239 .field("children", &self.children)
240 .finish()
241 }
242}
243
244impl DeviceTreeMask {
245 fn new() -> Self {
246 Self { name_bytes: b"/".to_vec(), children: DeviceTreeChildrenMask::Partial(Vec::new()) }
247 }
248
249 fn mask_internal(&mut self, path: &DtPathTokens, leaf_mask: DeviceTreeChildrenMask) -> bool {
250 let mut iter = self;
251 let mut newly_masked = false;
252 'next_token: for path_token in &path.tokens {
253 let DeviceTreeChildrenMask::Partial(ref mut children) = &mut iter.children else {
254 return false;
255 };
256
257 // Note: Can't use iterator for 'get or insert'. (a.k.a. polonius Rust)
258 #[allow(clippy::needless_range_loop)]
259 for i in 0..children.len() {
260 if children[i].name_bytes.as_slice() == *path_token {
261 iter = &mut children[i];
262 newly_masked = false;
263 continue 'next_token;
264 }
265 }
266 let child = Self {
267 name_bytes: path_token.to_vec(),
268 children: DeviceTreeChildrenMask::Partial(Vec::new()),
269 };
270 children.push(child);
271 newly_masked = true;
272 iter = children.last_mut().unwrap()
273 }
274 iter.children = leaf_mask;
275 newly_masked
276 }
277
278 fn mask(&mut self, path: &DtPathTokens) -> bool {
279 self.mask_internal(path, DeviceTreeChildrenMask::Partial(Vec::new()))
280 }
281
282 fn mask_all(&mut self, path: &DtPathTokens) {
283 self.mask_internal(path, DeviceTreeChildrenMask::All);
284 }
285}
286
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900287/// Represents VM DTBO
288#[repr(transparent)]
289pub struct VmDtbo(Fdt);
290
291impl VmDtbo {
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900292 /// Wraps a mutable slice containing a VM DTBO.
293 ///
294 /// Fails if the VM DTBO does not pass validation.
295 pub fn from_mut_slice(dtbo: &mut [u8]) -> Result<&mut Self> {
296 // This validates DTBO
297 let fdt = Fdt::from_mut_slice(dtbo)?;
298 // SAFETY: VmDtbo is a transparent wrapper around Fdt, so representation is the same.
299 Ok(unsafe { mem::transmute::<&mut Fdt, &mut Self>(fdt) })
300 }
301
302 // Locates device node path as if the given dtbo node path is assigned and VM DTBO is overlaid.
303 // For given dtbo node path, this concatenates <target-path> of the enclosing fragment and
304 // relative path from __overlay__ node.
305 //
306 // Here's an example with sample VM DTBO:
307 // / {
308 // fragment@rng {
309 // target-path = "/"; // Always 'target-path = "/"'. Disallows <target> or other path.
310 // __overlay__ {
311 // rng { ... }; // Actual device node is here. If overlaid, path would be "/rng"
312 // };
313 // };
Jaewan Kim80ef9fa2024-02-25 16:08:14 +0000314 // __symbols__ { // Contains list of assignable devices
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900315 // rng = "/fragment@rng/__overlay__/rng";
316 // };
317 // };
318 //
319 // Then locate_overlay_target_path(cstr!("/fragment@rng/__overlay__/rng")) is Ok("/rng")
320 //
321 // Contrary to fdt_overlay_target_offset(), this API enforces overlay target property
322 // 'target-path = "/"', so the overlay doesn't modify and/or append platform DT's existing
323 // node and/or properties. The enforcement is for compatibility reason.
Jaewan Kim19b984f2023-12-04 15:16:50 +0900324 fn locate_overlay_target_path(
325 &self,
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900326 dtbo_node_path: &DtPathTokens,
Jaewan Kim19b984f2023-12-04 15:16:50 +0900327 dtbo_node: &FdtNode,
328 ) -> Result<CString> {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900329 let fragment_node = dtbo_node.supernode_at_depth(1)?;
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900330 let target_path = fragment_node
Pierre-Clément Tosid701a0b2023-11-07 15:38:59 +0000331 .getprop_str(cstr!("target-path"))?
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900332 .ok_or(DeviceAssignmentError::InvalidDtbo)?;
333 if target_path != cstr!("/") {
334 return Err(DeviceAssignmentError::UnsupportedOverlayTarget);
335 }
336
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900337 let overlaid_path = dtbo_node_path.to_overlay_target_path()?;
338 Ok(overlaid_path.to_cstring())
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900339 }
Jaewan Kim19b984f2023-12-04 15:16:50 +0900340
341 fn parse_physical_iommus(physical_node: &FdtNode) -> Result<BTreeMap<Phandle, PhysIommu>> {
342 let mut phys_iommus = BTreeMap::new();
343 for (node, _) in physical_node.descendants() {
344 let Some(phandle) = node.get_phandle()? else {
345 continue; // Skips unreachable IOMMU node
346 };
347 let Some(iommu) = PhysIommu::parse(&node)? else {
348 continue; // Skip if not a PhysIommu.
349 };
350 if phys_iommus.insert(phandle, iommu).is_some() {
351 return Err(FdtError::BadPhandle.into());
352 }
353 }
354 Self::validate_physical_iommus(&phys_iommus)?;
355 Ok(phys_iommus)
356 }
357
358 fn validate_physical_iommus(phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<()> {
359 let unique_iommus: BTreeSet<_> = phys_iommus.values().cloned().collect();
360 if phys_iommus.len() != unique_iommus.len() {
361 return Err(DeviceAssignmentError::DuplicatedIommuIds);
362 }
363 Ok(())
364 }
365
366 fn validate_physical_devices(
367 physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>,
368 ) -> Result<()> {
369 // Only need to validate iommus because <reg> will be validated together with PV <reg>
370 // see: DeviceAssignmentInfo::validate_all_regs().
371 let mut all_iommus = BTreeSet::new();
372 for physical_device in physical_devices.values() {
373 for iommu in &physical_device.iommus {
374 if !all_iommus.insert(iommu) {
375 error!("Unsupported phys IOMMU duplication found, <iommus> = {iommu:?}");
376 return Err(DeviceAssignmentError::UnsupportedIommusDuplication);
377 }
378 }
379 }
380 Ok(())
381 }
382
383 fn parse_physical_devices_with_iommus(
384 physical_node: &FdtNode,
385 phys_iommus: &BTreeMap<Phandle, PhysIommu>,
386 ) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>> {
387 let mut physical_devices = BTreeMap::new();
388 for (node, _) in physical_node.descendants() {
389 let Some(info) = PhysicalDeviceInfo::parse(&node, phys_iommus)? else {
390 continue;
391 };
392 if physical_devices.insert(info.target, info).is_some() {
393 return Err(DeviceAssignmentError::InvalidDtbo);
394 }
395 }
396 Self::validate_physical_devices(&physical_devices)?;
397 Ok(physical_devices)
398 }
399
400 /// Parses Physical devices in VM DTBO
401 fn parse_physical_devices(&self) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>> {
402 let Some(physical_node) = self.as_ref().node(cstr!("/host"))? else {
403 return Ok(BTreeMap::new());
404 };
405
406 let phys_iommus = Self::parse_physical_iommus(&physical_node)?;
407 Self::parse_physical_devices_with_iommus(&physical_node, &phys_iommus)
408 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900409
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900410 fn node(&self, path: &DtPathTokens) -> Result<Option<FdtNode>> {
411 let mut node = self.as_ref().root();
412 for token in &path.tokens {
413 let Some(subnode) = node.subnode_with_name_bytes(token)? else {
414 return Ok(None);
415 };
416 node = subnode;
417 }
418 Ok(Some(node))
419 }
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900420
421 fn collect_overlayable_nodes_with_phandle(&self) -> Result<BTreeMap<Phandle, DtPathTokens>> {
422 let mut paths = BTreeMap::new();
423 let mut path: DtPathTokens = Default::default();
424 let root = self.as_ref().root();
425 for (node, depth) in root.descendants() {
426 path.tokens.truncate(depth - 1);
427 path.tokens.push(node.name()?.to_bytes());
428 if !path.is_overlayable_node() {
429 continue;
430 }
431 if let Some(phandle) = node.get_phandle()? {
432 paths.insert(phandle, path.clone());
433 }
434 }
435 Ok(paths)
436 }
437
438 fn collect_phandle_references_from_overlayable_nodes(
439 &self,
440 ) -> Result<BTreeMap<DtPathTokens, Vec<Phandle>>> {
441 const CELL_SIZE: usize = core::mem::size_of::<u32>();
442
443 let vm_dtbo = self.as_ref();
444
445 let mut phandle_map = BTreeMap::new();
446 let Some(local_fixups) = vm_dtbo.node(cstr!("/__local_fixups__"))? else {
447 return Ok(phandle_map);
448 };
449
450 let mut path: DtPathTokens = Default::default();
451 for (fixup_node, depth) in local_fixups.descendants() {
452 let node_name = fixup_node.name()?;
453 path.tokens.truncate(depth - 1);
454 path.tokens.push(node_name.to_bytes());
455 if path.tokens.len() != depth {
456 return Err(DeviceAssignmentError::Internal);
457 }
458 if !path.is_overlayable_node() {
459 continue;
460 }
461 let target_node = self.node(&path)?.ok_or(DeviceAssignmentError::InvalidDtbo)?;
462
463 let mut phandles = vec![];
464 for fixup_prop in fixup_node.properties()? {
465 let target_prop = target_node
466 .getprop(fixup_prop.name()?)
467 .or(Err(DeviceAssignmentError::InvalidDtbo))?
468 .ok_or(DeviceAssignmentError::InvalidDtbo)?;
469 let fixup_prop_values = fixup_prop.value()?;
470 if fixup_prop_values.is_empty() || fixup_prop_values.len() % CELL_SIZE != 0 {
471 return Err(DeviceAssignmentError::InvalidDtbo);
472 }
473
474 for fixup_prop_cell in fixup_prop_values.chunks(CELL_SIZE) {
475 let phandle_offset: usize = u32::from_be_bytes(
476 fixup_prop_cell.try_into().or(Err(DeviceAssignmentError::InvalidDtbo))?,
477 )
478 .try_into()
479 .or(Err(DeviceAssignmentError::InvalidDtbo))?;
480 if phandle_offset % CELL_SIZE != 0 {
481 return Err(DeviceAssignmentError::InvalidDtbo);
482 }
483 let phandle_value = target_prop
484 .get(phandle_offset..phandle_offset + CELL_SIZE)
485 .ok_or(DeviceAssignmentError::InvalidDtbo)?;
486 let phandle: Phandle = U32::ref_from(phandle_value)
487 .unwrap()
488 .get()
489 .try_into()
490 .or(Err(DeviceAssignmentError::InvalidDtbo))?;
491
492 phandles.push(phandle);
493 }
494 }
495 if !phandles.is_empty() {
496 phandle_map.insert(path.clone(), phandles);
497 }
498 }
499
500 Ok(phandle_map)
501 }
502
503 fn build_mask(&self, assigned_devices: Vec<DtPathTokens>) -> Result<DeviceTreeMask> {
504 if assigned_devices.is_empty() {
505 return Err(DeviceAssignmentError::Internal);
506 }
507
508 let dependencies = self.collect_phandle_references_from_overlayable_nodes()?;
509 let paths = self.collect_overlayable_nodes_with_phandle()?;
510
511 let mut mask = DeviceTreeMask::new();
512 let mut stack = assigned_devices;
513 while let Some(path) = stack.pop() {
514 if !mask.mask(&path) {
515 continue;
516 }
517 let Some(dst_phandles) = dependencies.get(&path) else {
518 continue;
519 };
520 for dst_phandle in dst_phandles {
521 let dst_path = paths.get(dst_phandle).ok_or(DeviceAssignmentError::Internal)?;
522 stack.push(dst_path.clone());
523 }
524 }
525
526 Ok(mask)
527 }
Jaewan Kimc39974e2023-12-02 01:13:30 +0900528}
529
Jaewan Kimc730ebf2024-02-22 10:34:55 +0900530fn filter_dangling_symbols(fdt: &mut Fdt) -> Result<()> {
531 if let Some(symbols) = fdt.symbols()? {
532 let mut removed = vec![];
533 for prop in symbols.properties()? {
534 let path = CStr::from_bytes_with_nul(prop.value()?)
535 .map_err(|_| DeviceAssignmentError::Internal)?;
536 if fdt.node(path)?.is_none() {
537 let name = prop.name()?;
538 removed.push(CString::from(name));
539 }
540 }
541
542 let mut symbols = fdt.symbols_mut()?.unwrap();
543 for name in removed {
544 symbols.nop_property(&name)?;
545 }
546 }
547 Ok(())
548}
549
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900550impl AsRef<Fdt> for VmDtbo {
551 fn as_ref(&self) -> &Fdt {
552 &self.0
553 }
554}
555
556impl AsMut<Fdt> for VmDtbo {
557 fn as_mut(&mut self) -> &mut Fdt {
558 &mut self.0
559 }
560}
561
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900562// Filter any node that isn't masked by DeviceTreeMask.
563fn filter_with_mask(anchor: FdtNodeMut, mask: &DeviceTreeMask) -> Result<()> {
564 let mut stack = vec![mask];
565 let mut iter = anchor.next_node(0)?;
566 while let Some((node, depth)) = iter {
567 stack.truncate(depth);
568 let parent_mask = stack.last().unwrap();
569 let DeviceTreeChildrenMask::Partial(parent_mask_children) = &parent_mask.children else {
570 // Shouldn't happen. We only step-in if parent has DeviceTreeChildrenMask::Partial.
571 return Err(DeviceAssignmentError::Internal);
572 };
573
574 let name = node.as_node().name()?.to_bytes();
575 let mask = parent_mask_children.iter().find(|child_mask| child_mask.name_bytes == name);
576 if let Some(masked) = mask {
577 if let DeviceTreeChildrenMask::Partial(_) = &masked.children {
578 // This node is partially masked. Stepping-in.
579 stack.push(masked);
580 iter = node.next_node(depth)?;
581 } else {
582 // This node is fully masked. Stepping-out.
583 iter = node.next_node_skip_subnodes(depth)?;
584 }
585 } else {
586 // This node isn't masked.
587 iter = node.delete_and_next_node(depth)?;
588 }
589 }
590
591 Ok(())
592}
593
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900594#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
595struct PvIommu {
596 // ID from pvIOMMU node
597 id: u32,
598}
599
600impl PvIommu {
601 fn parse(node: &FdtNode) -> Result<Self> {
Jaewan Kima9200492023-11-21 20:45:31 +0900602 let iommu_cells = node
603 .getprop_u32(cstr!("#iommu-cells"))?
604 .ok_or(DeviceAssignmentError::InvalidPvIommu)?;
Jaewan Kim19b984f2023-12-04 15:16:50 +0900605 // Ensures #iommu-cells = <1>. It means that `<iommus>` entry contains pair of
Jaewan Kima9200492023-11-21 20:45:31 +0900606 // (pvIOMMU ID, vSID)
607 if iommu_cells != 1 {
608 return Err(DeviceAssignmentError::InvalidPvIommu);
609 }
610 let id = node.getprop_u32(cstr!("id"))?.ok_or(DeviceAssignmentError::InvalidPvIommu)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900611 Ok(Self { id })
612 }
613}
614
Jaewan Kima9200492023-11-21 20:45:31 +0900615#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
616struct Vsid(u32);
617
Jaewan Kim19b984f2023-12-04 15:16:50 +0900618#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
619struct Sid(u64);
620
621impl From<u32> for Sid {
622 fn from(sid: u32) -> Self {
623 Self(sid.into())
624 }
625}
626
627#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
Jaewan Kim52477ae2023-11-21 21:20:52 +0900628struct DeviceReg {
629 addr: u64,
630 size: u64,
631}
632
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +0000633impl DeviceReg {
634 pub fn overlaps(&self, range: &Range<u64>) -> bool {
635 self.addr < range.end && range.start < self.addr.checked_add(self.size).unwrap()
636 }
637}
638
Jaewan Kim52477ae2023-11-21 21:20:52 +0900639impl TryFrom<Reg<u64>> for DeviceReg {
640 type Error = DeviceAssignmentError;
641
642 fn try_from(reg: Reg<u64>) -> Result<Self> {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900643 Ok(Self { addr: reg.addr, size: reg.size.ok_or(DeviceAssignmentError::MalformedReg)? })
Jaewan Kim52477ae2023-11-21 21:20:52 +0900644 }
645}
646
647fn parse_node_reg(node: &FdtNode) -> Result<Vec<DeviceReg>> {
648 node.reg()?
Jaewan Kim19b984f2023-12-04 15:16:50 +0900649 .ok_or(DeviceAssignmentError::MalformedReg)?
Jaewan Kim52477ae2023-11-21 21:20:52 +0900650 .map(DeviceReg::try_from)
651 .collect::<Result<Vec<_>>>()
652}
653
654fn to_be_bytes(reg: &[DeviceReg]) -> Vec<u8> {
655 let mut reg_cells = vec![];
656 for x in reg {
657 reg_cells.extend_from_slice(&x.addr.to_be_bytes());
658 reg_cells.extend_from_slice(&x.size.to_be_bytes());
659 }
660 reg_cells
661}
662
Jaewan Kim19b984f2023-12-04 15:16:50 +0900663#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
664struct PhysIommu {
665 token: u64,
666}
667
668impl PhysIommu {
669 fn parse(node: &FdtNode) -> Result<Option<Self>> {
670 let Some(token) = node.getprop_u64(cstr!("android,pvmfw,token"))? else {
671 return Ok(None);
672 };
673 let Some(iommu_cells) = node.getprop_u32(cstr!("#iommu-cells"))? else {
674 return Err(DeviceAssignmentError::InvalidPhysIommu);
675 };
676 // Currently only supports #iommu-cells = <1>.
677 // In that case `<iommus>` entry contains pair of (pIOMMU phandle, Sid token)
678 if iommu_cells != 1 {
679 return Err(DeviceAssignmentError::UnsupportedPhysIommu);
680 }
681 Ok(Some(Self { token }))
682 }
683}
684
685#[derive(Debug)]
686struct PhysicalDeviceInfo {
687 target: Phandle,
688 reg: Vec<DeviceReg>,
689 iommus: Vec<(PhysIommu, Sid)>,
690}
691
692impl PhysicalDeviceInfo {
693 fn parse_iommus(
694 node: &FdtNode,
695 phys_iommus: &BTreeMap<Phandle, PhysIommu>,
696 ) -> Result<Vec<(PhysIommu, Sid)>> {
697 let mut iommus = vec![];
698 let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? else {
699 return Ok(iommus);
700 };
701 while let Some(cell) = cells.next() {
702 // Parse pIOMMU ID
703 let phandle =
704 Phandle::try_from(cell).or(Err(DeviceAssignmentError::MalformedIommus))?;
705 let iommu = phys_iommus.get(&phandle).ok_or(DeviceAssignmentError::MalformedIommus)?;
706
707 // Parse Sid
708 let Some(cell) = cells.next() else {
709 return Err(DeviceAssignmentError::MalformedIommus);
710 };
711
712 iommus.push((*iommu, Sid::from(cell)));
713 }
714 Ok(iommus)
715 }
716
717 fn parse(node: &FdtNode, phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<Option<Self>> {
718 let Some(phandle) = node.getprop_u32(cstr!("android,pvmfw,target"))? else {
719 return Ok(None);
720 };
721 let target = Phandle::try_from(phandle)?;
722 let reg = parse_node_reg(node)?;
723 let iommus = Self::parse_iommus(node, phys_iommus)?;
724 Ok(Some(Self { target, reg, iommus }))
725 }
726}
727
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900728/// Assigned device information parsed from crosvm DT.
729/// Keeps everything in the owned data because underlying FDT will be reused for platform DT.
730#[derive(Debug, Eq, PartialEq)]
731struct AssignedDeviceInfo {
732 // Node path of assigned device (e.g. "/rng")
733 node_path: CString,
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900734 // <reg> property from the crosvm DT
Jaewan Kim52477ae2023-11-21 21:20:52 +0900735 reg: Vec<DeviceReg>,
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900736 // <interrupts> property from the crosvm DT
737 interrupts: Vec<u8>,
Jaewan Kima9200492023-11-21 20:45:31 +0900738 // Parsed <iommus> property from the crosvm DT. Tuple of PvIommu and vSID.
739 iommus: Vec<(PvIommu, Vsid)>,
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900740}
741
742impl AssignedDeviceInfo {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900743 fn validate_reg(
744 device_reg: &[DeviceReg],
745 physical_device_reg: &[DeviceReg],
Jaewan Kim52477ae2023-11-21 21:20:52 +0900746 hypervisor: &dyn DeviceAssigningHypervisor,
Jaewan Kim19b984f2023-12-04 15:16:50 +0900747 ) -> Result<()> {
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000748 let mut virt_regs = device_reg.iter();
749 let mut phys_regs = physical_device_reg.iter();
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +0000750 // TODO(b/308694211): Move this constant to vmbase::layout once vmbase is std-compatible.
751 const PVMFW_RANGE: Range<u64> = 0x7fc0_0000..0x8000_0000;
Jaewan Kim19b984f2023-12-04 15:16:50 +0900752 // PV reg and physical reg should have 1:1 match in order.
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000753 for (reg, phys_reg) in virt_regs.by_ref().zip(phys_regs.by_ref()) {
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +0000754 if reg.overlaps(&PVMFW_RANGE) {
755 return Err(DeviceAssignmentError::InvalidReg(reg.addr, reg.size));
756 }
757 // If this call returns successfully, hyp has mapped the MMIO region at `reg`.
Jaewan Kim19b984f2023-12-04 15:16:50 +0900758 let addr = hypervisor.get_phys_mmio_token(reg.addr, reg.size).map_err(|e| {
Pierre-Clément Tosi08d6e3f2024-03-13 18:22:16 +0000759 error!("Hypervisor error while requesting MMIO token: {e}");
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000760 DeviceAssignmentError::InvalidReg(reg.addr, reg.size)
Jaewan Kim52477ae2023-11-21 21:20:52 +0900761 })?;
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +0000762 // Only check address because hypervisor guarantees size match when success.
Jaewan Kim19b984f2023-12-04 15:16:50 +0900763 if phys_reg.addr != addr {
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000764 error!("Assigned device {reg:x?} has unexpected physical address");
765 return Err(DeviceAssignmentError::InvalidPhysReg(addr, reg.size));
Jaewan Kim19b984f2023-12-04 15:16:50 +0900766 }
Jaewan Kim52477ae2023-11-21 21:20:52 +0900767 }
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000768
769 if let Some(DeviceReg { addr, size }) = virt_regs.next() {
770 return Err(DeviceAssignmentError::InvalidReg(*addr, *size));
771 }
772
773 if let Some(DeviceReg { addr, size }) = phys_regs.next() {
774 return Err(DeviceAssignmentError::InvalidPhysReg(*addr, *size));
775 }
776
Jaewan Kim19b984f2023-12-04 15:16:50 +0900777 Ok(())
Jaewan Kim52477ae2023-11-21 21:20:52 +0900778 }
779
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900780 fn parse_interrupts(node: &FdtNode) -> Result<Vec<u8>> {
781 // Validation: Validate if interrupts cell numbers are multiple of #interrupt-cells.
782 // We can't know how many interrupts would exist.
783 let interrupts_cells = node
Pierre-Clément Tosid701a0b2023-11-07 15:38:59 +0000784 .getprop_cells(cstr!("interrupts"))?
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900785 .ok_or(DeviceAssignmentError::InvalidInterrupts)?
786 .count();
787 if interrupts_cells % CELLS_PER_INTERRUPT != 0 {
788 return Err(DeviceAssignmentError::InvalidInterrupts);
789 }
790
791 // Once validated, keep the raw bytes so patch can be done with setprop()
Pierre-Clément Tosid701a0b2023-11-07 15:38:59 +0000792 Ok(node.getprop(cstr!("interrupts")).unwrap().unwrap().into())
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900793 }
794
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900795 // TODO(b/277993056): Also validate /__local_fixups__ to ensure that <iommus> has phandle.
Jaewan Kima9200492023-11-21 20:45:31 +0900796 fn parse_iommus(
797 node: &FdtNode,
798 pviommus: &BTreeMap<Phandle, PvIommu>,
799 ) -> Result<Vec<(PvIommu, Vsid)>> {
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900800 let mut iommus = vec![];
Jaewan Kima9200492023-11-21 20:45:31 +0900801 let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? else {
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900802 return Ok(iommus);
803 };
Jaewan Kima9200492023-11-21 20:45:31 +0900804 while let Some(cell) = cells.next() {
805 // Parse pvIOMMU ID
Jaewan Kim19b984f2023-12-04 15:16:50 +0900806 let phandle =
807 Phandle::try_from(cell).or(Err(DeviceAssignmentError::MalformedIommus))?;
808 let pviommu = pviommus.get(&phandle).ok_or(DeviceAssignmentError::MalformedIommus)?;
Jaewan Kima9200492023-11-21 20:45:31 +0900809
810 // Parse vSID
811 let Some(cell) = cells.next() else {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900812 return Err(DeviceAssignmentError::MalformedIommus);
Jaewan Kima9200492023-11-21 20:45:31 +0900813 };
814 let vsid = Vsid(cell);
815
816 iommus.push((*pviommu, vsid));
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900817 }
818 Ok(iommus)
819 }
820
Jaewan Kim19b984f2023-12-04 15:16:50 +0900821 fn validate_iommus(
822 iommus: &[(PvIommu, Vsid)],
823 physical_device_iommu: &[(PhysIommu, Sid)],
824 hypervisor: &dyn DeviceAssigningHypervisor,
825 ) -> Result<()> {
826 if iommus.len() != physical_device_iommu.len() {
827 return Err(DeviceAssignmentError::InvalidIommus);
828 }
829 // pvIOMMU can be reordered, and hypervisor may not guarantee 1:1 mapping.
830 // So we need to mark what's matched or not.
831 let mut physical_device_iommu = physical_device_iommu.to_vec();
832 for (pviommu, vsid) in iommus {
Pierre-Clément Tosi08d6e3f2024-03-13 18:22:16 +0000833 let (id, sid) =
834 hypervisor.get_phys_iommu_token(pviommu.id.into(), vsid.0.into()).map_err(|e| {
835 error!("Hypervisor error while requesting IOMMU token ({pviommu:?}, {vsid:?}): {e}");
836 DeviceAssignmentError::InvalidIommus
837 })?;
Jaewan Kim19b984f2023-12-04 15:16:50 +0900838
839 let pos = physical_device_iommu
840 .iter()
841 .position(|(phys_iommu, phys_sid)| (phys_iommu.token, phys_sid.0) == (id, sid));
842 match pos {
843 Some(pos) => physical_device_iommu.remove(pos),
844 None => {
845 error!("Failed to validate device <iommus>. No matching phys iommu or duplicated mapping for pviommu={pviommu:?}, vsid={vsid:?}");
846 return Err(DeviceAssignmentError::InvalidIommus);
847 }
848 };
849 }
850 Ok(())
851 }
852
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900853 fn parse(
854 fdt: &Fdt,
855 vm_dtbo: &VmDtbo,
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900856 dtbo_node_path: &DtPathTokens,
Jaewan Kim19b984f2023-12-04 15:16:50 +0900857 physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>,
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900858 pviommus: &BTreeMap<Phandle, PvIommu>,
Jaewan Kim52477ae2023-11-21 21:20:52 +0900859 hypervisor: &dyn DeviceAssigningHypervisor,
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900860 ) -> Result<Option<Self>> {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900861 let dtbo_node =
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900862 vm_dtbo.node(dtbo_node_path)?.ok_or(DeviceAssignmentError::InvalidSymbols)?;
Jaewan Kim19b984f2023-12-04 15:16:50 +0900863 let node_path = vm_dtbo.locate_overlay_target_path(dtbo_node_path, &dtbo_node)?;
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900864
865 let Some(node) = fdt.node(&node_path)? else { return Ok(None) };
866
Jaewan Kim80ef9fa2024-02-25 16:08:14 +0000867 // Currently can only assign devices backed by physical devices.
Jaewan Kim19b984f2023-12-04 15:16:50 +0900868 let phandle = dtbo_node.get_phandle()?.ok_or(DeviceAssignmentError::InvalidDtbo)?;
Jaewan Kim80ef9fa2024-02-25 16:08:14 +0000869 let Some(physical_device) = physical_devices.get(&phandle) else {
870 // If labeled DT node isn't backed by physical device node, then just return None.
871 // It's not an error because such node can be a dependency of assignable device nodes.
872 return Ok(None);
873 };
Jaewan Kim19b984f2023-12-04 15:16:50 +0900874
875 let reg = parse_node_reg(&node)?;
876 Self::validate_reg(&reg, &physical_device.reg, hypervisor)?;
877
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900878 let interrupts = Self::parse_interrupts(&node)?;
Jaewan Kim19b984f2023-12-04 15:16:50 +0900879
880 let iommus = Self::parse_iommus(&node, pviommus)?;
881 Self::validate_iommus(&iommus, &physical_device.iommus, hypervisor)?;
882
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900883 Ok(Some(Self { node_path, reg, interrupts, iommus }))
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900884 }
885
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900886 fn patch(&self, fdt: &mut Fdt, pviommu_phandles: &BTreeMap<PvIommu, Phandle>) -> Result<()> {
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900887 let mut dst = fdt.node_mut(&self.node_path)?.unwrap();
Jaewan Kim52477ae2023-11-21 21:20:52 +0900888 dst.setprop(cstr!("reg"), &to_be_bytes(&self.reg))?;
Pierre-Clément Tosid701a0b2023-11-07 15:38:59 +0000889 dst.setprop(cstr!("interrupts"), &self.interrupts)?;
Jaewan Kima9200492023-11-21 20:45:31 +0900890 let mut iommus = Vec::with_capacity(8 * self.iommus.len());
891 for (pviommu, vsid) in &self.iommus {
892 let phandle = pviommu_phandles.get(pviommu).unwrap();
893 iommus.extend_from_slice(&u32::from(*phandle).to_be_bytes());
894 iommus.extend_from_slice(&vsid.0.to_be_bytes());
895 }
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900896 dst.setprop(cstr!("iommus"), &iommus)?;
897
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900898 Ok(())
899 }
900}
901
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900902#[derive(Debug, Eq, PartialEq)]
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900903pub struct DeviceAssignmentInfo {
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900904 pviommus: BTreeSet<PvIommu>,
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900905 assigned_devices: Vec<AssignedDeviceInfo>,
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900906 vm_dtbo_mask: DeviceTreeMask,
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900907}
908
909impl DeviceAssignmentInfo {
Chris Wailes9d09f572024-01-16 13:31:02 -0800910 const PVIOMMU_COMPATIBLE: &'static CStr = cstr!("pkvm,pviommu");
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900911
912 /// Parses pvIOMMUs in fdt
913 // Note: This will validate pvIOMMU ids' uniqueness, even when unassigned.
914 fn parse_pviommus(fdt: &Fdt) -> Result<BTreeMap<Phandle, PvIommu>> {
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900915 let mut pviommus = BTreeMap::new();
916 for compatible in fdt.compatible_nodes(Self::PVIOMMU_COMPATIBLE)? {
917 let Some(phandle) = compatible.get_phandle()? else {
918 continue; // Skips unreachable pvIOMMU node
919 };
920 let pviommu = PvIommu::parse(&compatible)?;
921 if pviommus.insert(phandle, pviommu).is_some() {
922 return Err(FdtError::BadPhandle.into());
923 }
924 }
925 Ok(pviommus)
926 }
927
Jaewan Kim19b984f2023-12-04 15:16:50 +0900928 fn validate_pviommu_topology(assigned_devices: &[AssignedDeviceInfo]) -> Result<()> {
929 let mut all_iommus = BTreeSet::new();
930 for assigned_device in assigned_devices {
931 for iommu in &assigned_device.iommus {
932 if !all_iommus.insert(iommu) {
933 error!("Unsupported pvIOMMU duplication found, <iommus> = {iommu:?}");
934 return Err(DeviceAssignmentError::UnsupportedPvIommusDuplication);
935 }
936 }
937 }
938 Ok(())
939 }
940
Pierre-Clément Tosie5cca922024-04-30 17:54:08 +0100941 // TODO(b/308694211): Remove this workaround for visibility once using
942 // vmbase::hyp::DeviceAssigningHypervisor for tests.
943 #[cfg(test)]
944 fn parse(
945 fdt: &Fdt,
946 vm_dtbo: &VmDtbo,
947 hypervisor: &dyn DeviceAssigningHypervisor,
948 ) -> Result<Option<Self>> {
949 Self::internal_parse(fdt, vm_dtbo, hypervisor)
950 }
951
952 #[cfg(not(test))]
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900953 /// Parses fdt and vm_dtbo, and creates new DeviceAssignmentInfo
954 // TODO(b/277993056): Parse __local_fixups__
955 // TODO(b/277993056): Parse __fixups__
Jaewan Kim52477ae2023-11-21 21:20:52 +0900956 pub fn parse(
957 fdt: &Fdt,
958 vm_dtbo: &VmDtbo,
959 hypervisor: &dyn DeviceAssigningHypervisor,
960 ) -> Result<Option<Self>> {
Pierre-Clément Tosie5cca922024-04-30 17:54:08 +0100961 Self::internal_parse(fdt, vm_dtbo, hypervisor)
962 }
963
964 fn internal_parse(
965 fdt: &Fdt,
966 vm_dtbo: &VmDtbo,
967 hypervisor: &dyn DeviceAssigningHypervisor,
968 ) -> Result<Option<Self>> {
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900969 let Some(symbols_node) = vm_dtbo.as_ref().symbols()? else {
970 // /__symbols__ should contain all assignable devices.
971 // If empty, then nothing can be assigned.
972 return Ok(None);
973 };
974
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900975 let pviommus = Self::parse_pviommus(fdt)?;
976 let unique_pviommus: BTreeSet<_> = pviommus.values().cloned().collect();
977 if pviommus.len() != unique_pviommus.len() {
978 return Err(DeviceAssignmentError::DuplicatedPvIommuIds);
979 }
980
Jaewan Kim19b984f2023-12-04 15:16:50 +0900981 let physical_devices = vm_dtbo.parse_physical_devices()?;
982
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900983 let mut assigned_devices = vec![];
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900984 let mut assigned_device_paths = vec![];
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900985 for symbol_prop in symbols_node.properties()? {
986 let symbol_prop_value = symbol_prop.value()?;
987 let dtbo_node_path = CStr::from_bytes_with_nul(symbol_prop_value)
988 .or(Err(DeviceAssignmentError::InvalidSymbols))?;
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900989 let dtbo_node_path = DtPathTokens::new(dtbo_node_path)?;
990 if !dtbo_node_path.is_overlayable_node() {
Jaewan Kimc39974e2023-12-02 01:13:30 +0900991 continue;
992 }
Jaewan Kim19b984f2023-12-04 15:16:50 +0900993 let assigned_device = AssignedDeviceInfo::parse(
994 fdt,
995 vm_dtbo,
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900996 &dtbo_node_path,
Jaewan Kim19b984f2023-12-04 15:16:50 +0900997 &physical_devices,
998 &pviommus,
999 hypervisor,
1000 )?;
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001001 if let Some(assigned_device) = assigned_device {
1002 assigned_devices.push(assigned_device);
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001003 assigned_device_paths.push(dtbo_node_path);
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001004 }
1005 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001006 if assigned_devices.is_empty() {
1007 return Ok(None);
1008 }
Jaewan Kimc39974e2023-12-02 01:13:30 +09001009
Jaewan Kim19b984f2023-12-04 15:16:50 +09001010 Self::validate_pviommu_topology(&assigned_devices)?;
1011
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001012 let mut vm_dtbo_mask = vm_dtbo.build_mask(assigned_device_paths)?;
1013 vm_dtbo_mask.mask_all(&DtPathTokens::new(cstr!("/__local_fixups__"))?);
1014 vm_dtbo_mask.mask_all(&DtPathTokens::new(cstr!("/__symbols__"))?);
Jaewan Kimc39974e2023-12-02 01:13:30 +09001015
1016 // Note: Any node without __overlay__ will be ignored by fdt_apply_overlay,
1017 // so doesn't need to be filtered.
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001018
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001019 Ok(Some(Self { pviommus: unique_pviommus, assigned_devices, vm_dtbo_mask }))
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001020 }
1021
1022 /// Filters VM DTBO to only contain necessary information for booting pVM
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001023 pub fn filter(&self, vm_dtbo: &mut VmDtbo) -> Result<()> {
1024 let vm_dtbo = vm_dtbo.as_mut();
1025
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001026 // Filter unused references in /__local_fixups__
1027 if let Some(local_fixups) = vm_dtbo.node_mut(cstr!("/__local_fixups__"))? {
1028 filter_with_mask(local_fixups, &self.vm_dtbo_mask)?;
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001029 }
1030
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001031 // Filter unused nodes in rest of tree
1032 let root = vm_dtbo.root_mut();
1033 filter_with_mask(root, &self.vm_dtbo_mask)?;
1034
Jaewan Kim371f6c82024-02-24 01:33:37 +09001035 filter_dangling_symbols(vm_dtbo)
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001036 }
1037
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001038 fn patch_pviommus(&self, fdt: &mut Fdt) -> Result<BTreeMap<PvIommu, Phandle>> {
Pierre-Clément Tosi244efea2024-02-16 14:48:14 +00001039 let mut compatible = fdt.root_mut().next_compatible(Self::PVIOMMU_COMPATIBLE)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001040 let mut pviommu_phandles = BTreeMap::new();
1041
1042 for pviommu in &self.pviommus {
1043 let mut node = compatible.ok_or(DeviceAssignmentError::TooManyPvIommu)?;
1044 let phandle = node.as_node().get_phandle()?.ok_or(DeviceAssignmentError::Internal)?;
1045 node.setprop_inplace(cstr!("id"), &pviommu.id.to_be_bytes())?;
1046 if pviommu_phandles.insert(*pviommu, phandle).is_some() {
1047 return Err(DeviceAssignmentError::Internal);
1048 }
1049 compatible = node.next_compatible(Self::PVIOMMU_COMPATIBLE)?;
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001050 }
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001051
1052 // Filters pre-populated but unassigned pvIOMMUs.
1053 while let Some(filtered_pviommu) = compatible {
1054 compatible = filtered_pviommu.delete_and_next_compatible(Self::PVIOMMU_COMPATIBLE)?;
1055 }
1056
1057 Ok(pviommu_phandles)
1058 }
1059
1060 pub fn patch(&self, fdt: &mut Fdt) -> Result<()> {
1061 let pviommu_phandles = self.patch_pviommus(fdt)?;
1062
1063 // Patches assigned devices
1064 for device in &self.assigned_devices {
1065 device.patch(fdt, &pviommu_phandles)?;
1066 }
1067
Jaewan Kimc730ebf2024-02-22 10:34:55 +09001068 // Removes any dangling references in __symbols__ (e.g. removed pvIOMMUs)
1069 filter_dangling_symbols(fdt)
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001070 }
1071}
1072
Jaewan Kim50246682024-03-11 23:18:54 +09001073/// Cleans device trees not to contain any pre-populated nodes/props for device assignment.
1074pub fn clean(fdt: &mut Fdt) -> Result<()> {
1075 let mut compatible = fdt.root_mut().next_compatible(cstr!("pkvm,pviommu"))?;
1076 // Filters pre-populated
1077 while let Some(filtered_pviommu) = compatible {
1078 compatible = filtered_pviommu.delete_and_next_compatible(cstr!("pkvm,pviommu"))?;
1079 }
1080
1081 // Removes any dangling references in __symbols__ (e.g. removed pvIOMMUs)
1082 filter_dangling_symbols(fdt)
1083}
1084
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001085#[cfg(test)]
Pierre-Clément Tosie5cca922024-04-30 17:54:08 +01001086#[derive(Clone, Copy, Debug)]
1087enum MockHypervisorError {
1088 FailedGetPhysMmioToken,
1089 FailedGetPhysIommuToken,
1090}
1091
1092#[cfg(test)]
1093type MockHypervisorResult<T> = core::result::Result<T, MockHypervisorError>;
1094
1095#[cfg(test)]
1096impl fmt::Display for MockHypervisorError {
1097 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1098 match self {
1099 MockHypervisorError::FailedGetPhysMmioToken => {
1100 write!(f, "Failed to get physical MMIO token")
1101 }
1102 MockHypervisorError::FailedGetPhysIommuToken => {
1103 write!(f, "Failed to get physical IOMMU token")
1104 }
1105 }
1106 }
1107}
1108
1109#[cfg(test)]
1110trait DeviceAssigningHypervisor {
1111 /// Returns MMIO token.
1112 fn get_phys_mmio_token(&self, base_ipa: u64, size: u64) -> MockHypervisorResult<u64>;
1113
1114 /// Returns DMA token as a tuple of (phys_iommu_id, phys_sid).
1115 fn get_phys_iommu_token(&self, pviommu_id: u64, vsid: u64) -> MockHypervisorResult<(u64, u64)>;
1116}
1117
1118#[cfg(test)]
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001119mod tests {
1120 use super::*;
Jaewan Kim52477ae2023-11-21 21:20:52 +09001121 use alloc::collections::{BTreeMap, BTreeSet};
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001122 use dts::Dts;
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001123 use std::fs;
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001124 use std::path::Path;
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001125
1126 const VM_DTBO_FILE_PATH: &str = "test_pvmfw_devices_vm_dtbo.dtbo";
1127 const VM_DTBO_WITHOUT_SYMBOLS_FILE_PATH: &str =
1128 "test_pvmfw_devices_vm_dtbo_without_symbols.dtbo";
Jaewan Kim19b984f2023-12-04 15:16:50 +09001129 const VM_DTBO_WITH_DUPLICATED_IOMMUS_FILE_PATH: &str =
1130 "test_pvmfw_devices_vm_dtbo_with_duplicated_iommus.dtbo";
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001131 const VM_DTBO_WITH_DEPENDENCIES_FILE_PATH: &str =
1132 "test_pvmfw_devices_vm_dtbo_with_dependencies.dtbo";
Jaewan Kima67e36a2023-11-29 16:50:23 +09001133 const FDT_WITHOUT_IOMMUS_FILE_PATH: &str = "test_pvmfw_devices_without_iommus.dtb";
Jaewan Kim52477ae2023-11-21 21:20:52 +09001134 const FDT_WITHOUT_DEVICE_FILE_PATH: &str = "test_pvmfw_devices_without_device.dtb";
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001135 const FDT_FILE_PATH: &str = "test_pvmfw_devices_with_rng.dtb";
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +00001136 const FDT_WITH_DEVICE_OVERLAPPING_PVMFW: &str = "test_pvmfw_devices_overlapping_pvmfw.dtb";
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001137 const FDT_WITH_MULTIPLE_DEVICES_IOMMUS_FILE_PATH: &str =
1138 "test_pvmfw_devices_with_multiple_devices_iommus.dtb";
1139 const FDT_WITH_IOMMU_SHARING: &str = "test_pvmfw_devices_with_iommu_sharing.dtb";
1140 const FDT_WITH_IOMMU_ID_CONFLICT: &str = "test_pvmfw_devices_with_iommu_id_conflict.dtb";
Jaewan Kim19b984f2023-12-04 15:16:50 +09001141 const FDT_WITH_DUPLICATED_PVIOMMUS_FILE_PATH: &str =
1142 "test_pvmfw_devices_with_duplicated_pviommus.dtb";
1143 const FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH: &str =
1144 "test_pvmfw_devices_with_multiple_reg_iommus.dtb";
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001145 const FDT_WITH_DEPENDENCY_FILE_PATH: &str = "test_pvmfw_devices_with_dependency.dtb";
1146 const FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH: &str =
1147 "test_pvmfw_devices_with_multiple_dependencies.dtb";
1148 const FDT_WITH_DEPENDENCY_LOOP_FILE_PATH: &str = "test_pvmfw_devices_with_dependency_loop.dtb";
1149
1150 const EXPECTED_FDT_WITH_DEPENDENCY_FILE_PATH: &str = "expected_dt_with_dependency.dtb";
1151 const EXPECTED_FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH: &str =
1152 "expected_dt_with_multiple_dependencies.dtb";
1153 const EXPECTED_FDT_WITH_DEPENDENCY_LOOP_FILE_PATH: &str =
1154 "expected_dt_with_dependency_loop.dtb";
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001155
Jaewan Kim52477ae2023-11-21 21:20:52 +09001156 #[derive(Debug, Default)]
1157 struct MockHypervisor {
1158 mmio_tokens: BTreeMap<(u64, u64), u64>,
1159 iommu_tokens: BTreeMap<(u64, u64), (u64, u64)>,
1160 }
1161
1162 impl DeviceAssigningHypervisor for MockHypervisor {
Pierre-Clément Tosie5cca922024-04-30 17:54:08 +01001163 fn get_phys_mmio_token(&self, base_ipa: u64, size: u64) -> MockHypervisorResult<u64> {
1164 let token = self.mmio_tokens.get(&(base_ipa, size));
1165
1166 Ok(*token.ok_or(MockHypervisorError::FailedGetPhysMmioToken)?)
Jaewan Kim52477ae2023-11-21 21:20:52 +09001167 }
1168
Pierre-Clément Tosie5cca922024-04-30 17:54:08 +01001169 fn get_phys_iommu_token(
1170 &self,
1171 pviommu_id: u64,
1172 vsid: u64,
1173 ) -> MockHypervisorResult<(u64, u64)> {
1174 let token = self.iommu_tokens.get(&(pviommu_id, vsid));
1175
1176 Ok(*token.ok_or(MockHypervisorError::FailedGetPhysIommuToken)?)
Jaewan Kim52477ae2023-11-21 21:20:52 +09001177 }
1178 }
1179
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001180 #[derive(Debug, Eq, PartialEq)]
1181 struct AssignedDeviceNode {
1182 path: CString,
1183 reg: Vec<u8>,
1184 interrupts: Vec<u8>,
Jaewan Kima67e36a2023-11-29 16:50:23 +09001185 iommus: Vec<u32>, // pvIOMMU id and vSID
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001186 }
1187
1188 impl AssignedDeviceNode {
1189 fn parse(fdt: &Fdt, path: &CStr) -> Result<Self> {
1190 let Some(node) = fdt.node(path)? else {
1191 return Err(FdtError::NotFound.into());
1192 };
1193
Jaewan Kim19b984f2023-12-04 15:16:50 +09001194 let reg = node.getprop(cstr!("reg"))?.ok_or(DeviceAssignmentError::MalformedReg)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001195 let interrupts = node
1196 .getprop(cstr!("interrupts"))?
1197 .ok_or(DeviceAssignmentError::InvalidInterrupts)?;
1198 let mut iommus = vec![];
Jaewan Kima9200492023-11-21 20:45:31 +09001199 if let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? {
1200 while let Some(pviommu_id) = cells.next() {
1201 // pvIOMMU id
1202 let phandle = Phandle::try_from(pviommu_id)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001203 let pviommu = fdt
1204 .node_with_phandle(phandle)?
Jaewan Kim19b984f2023-12-04 15:16:50 +09001205 .ok_or(DeviceAssignmentError::MalformedIommus)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001206 let compatible = pviommu.getprop_str(cstr!("compatible"));
1207 if compatible != Ok(Some(cstr!("pkvm,pviommu"))) {
Jaewan Kim19b984f2023-12-04 15:16:50 +09001208 return Err(DeviceAssignmentError::MalformedIommus);
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001209 }
1210 let id = pviommu
1211 .getprop_u32(cstr!("id"))?
Jaewan Kim19b984f2023-12-04 15:16:50 +09001212 .ok_or(DeviceAssignmentError::MalformedIommus)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001213 iommus.push(id);
Jaewan Kima9200492023-11-21 20:45:31 +09001214
1215 // vSID
1216 let Some(vsid) = cells.next() else {
Jaewan Kim19b984f2023-12-04 15:16:50 +09001217 return Err(DeviceAssignmentError::MalformedIommus);
Jaewan Kima9200492023-11-21 20:45:31 +09001218 };
1219 iommus.push(vsid);
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001220 }
1221 }
1222 Ok(Self { path: path.into(), reg: reg.into(), interrupts: interrupts.into(), iommus })
1223 }
1224 }
1225
1226 fn collect_pviommus(fdt: &Fdt) -> Result<Vec<u32>> {
1227 let mut pviommus = BTreeSet::new();
1228 for pviommu in fdt.compatible_nodes(cstr!("pkvm,pviommu"))? {
1229 if let Ok(Some(id)) = pviommu.getprop_u32(cstr!("id")) {
1230 pviommus.insert(id);
1231 }
1232 }
1233 Ok(pviommus.iter().cloned().collect())
1234 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001235
1236 fn into_fdt_prop(native_bytes: Vec<u32>) -> Vec<u8> {
1237 let mut v = Vec::with_capacity(native_bytes.len() * 4);
1238 for byte in native_bytes {
1239 v.extend_from_slice(&byte.to_be_bytes());
1240 }
1241 v
1242 }
1243
Jaewan Kim52477ae2023-11-21 21:20:52 +09001244 impl From<[u64; 2]> for DeviceReg {
1245 fn from(fdt_cells: [u64; 2]) -> Self {
1246 DeviceReg { addr: fdt_cells[0], size: fdt_cells[1] }
1247 }
1248 }
1249
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001250 #[test]
1251 fn device_info_new_without_symbols() {
1252 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1253 let mut vm_dtbo_data = fs::read(VM_DTBO_WITHOUT_SYMBOLS_FILE_PATH).unwrap();
1254 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1255 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1256
Jaewan Kim52477ae2023-11-21 21:20:52 +09001257 let hypervisor: MockHypervisor = Default::default();
1258 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap();
1259 assert_eq!(device_info, None);
1260 }
1261
1262 #[test]
1263 fn device_info_new_without_device() {
1264 let mut fdt_data = fs::read(FDT_WITHOUT_DEVICE_FILE_PATH).unwrap();
1265 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1266 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1267 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1268
1269 let hypervisor: MockHypervisor = Default::default();
1270 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001271 assert_eq!(device_info, None);
1272 }
1273
1274 #[test]
Jaewan Kima67e36a2023-11-29 16:50:23 +09001275 fn device_info_assigned_info_without_iommus() {
1276 let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
1277 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1278 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1279 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1280
Jaewan Kim52477ae2023-11-21 21:20:52 +09001281 let hypervisor = MockHypervisor {
1282 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1283 iommu_tokens: BTreeMap::new(),
1284 };
1285 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kima67e36a2023-11-29 16:50:23 +09001286
1287 let expected = [AssignedDeviceInfo {
Jaewan Kimc39974e2023-12-02 01:13:30 +09001288 node_path: CString::new("/bus0/backlight").unwrap(),
Jaewan Kim52477ae2023-11-21 21:20:52 +09001289 reg: vec![[0x9, 0xFF].into()],
Jaewan Kima67e36a2023-11-29 16:50:23 +09001290 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1291 iommus: vec![],
1292 }];
1293
1294 assert_eq!(device_info.assigned_devices, expected);
1295 }
1296
1297 #[test]
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001298 fn device_info_assigned_info() {
1299 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1300 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1301 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1302 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1303
Jaewan Kim52477ae2023-11-21 21:20:52 +09001304 let hypervisor = MockHypervisor {
1305 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1306 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1307 };
1308 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001309
1310 let expected = [AssignedDeviceInfo {
1311 node_path: CString::new("/rng").unwrap(),
Jaewan Kim52477ae2023-11-21 21:20:52 +09001312 reg: vec![[0x9, 0xFF].into()],
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001313 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
Jaewan Kima67e36a2023-11-29 16:50:23 +09001314 iommus: vec![(PvIommu { id: 0x4 }, Vsid(0xFF0))],
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001315 }];
1316
1317 assert_eq!(device_info.assigned_devices, expected);
1318 }
1319
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001320 #[test]
1321 fn device_info_filter() {
1322 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1323 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1324 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1325 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1326
Jaewan Kim52477ae2023-11-21 21:20:52 +09001327 let hypervisor = MockHypervisor {
1328 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1329 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1330 };
1331 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001332 device_info.filter(vm_dtbo).unwrap();
1333
1334 let vm_dtbo = vm_dtbo.as_mut();
1335
Jaewan Kim371f6c82024-02-24 01:33:37 +09001336 let symbols = vm_dtbo.symbols().unwrap().unwrap();
1337
Jaewan Kima232ed02024-02-25 16:08:14 +00001338 let rng = vm_dtbo.node(cstr!("/fragment@0/__overlay__/rng")).unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001339 assert_ne!(rng, None);
Jaewan Kim371f6c82024-02-24 01:33:37 +09001340 let rng_symbol = symbols.getprop_str(cstr!("rng")).unwrap();
Jaewan Kima232ed02024-02-25 16:08:14 +00001341 assert_eq!(Some(cstr!("/fragment@0/__overlay__/rng")), rng_symbol);
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001342
Jaewan Kima232ed02024-02-25 16:08:14 +00001343 let light = vm_dtbo.node(cstr!("/fragment@0/__overlay__/light")).unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001344 assert_eq!(light, None);
Jaewan Kim371f6c82024-02-24 01:33:37 +09001345 let light_symbol = symbols.getprop_str(cstr!("light")).unwrap();
1346 assert_eq!(None, light_symbol);
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001347
Jaewan Kima232ed02024-02-25 16:08:14 +00001348 let led = vm_dtbo.node(cstr!("/fragment@0/__overlay__/led")).unwrap();
Jaewan Kima67e36a2023-11-29 16:50:23 +09001349 assert_eq!(led, None);
Jaewan Kim371f6c82024-02-24 01:33:37 +09001350 let led_symbol = symbols.getprop_str(cstr!("led")).unwrap();
1351 assert_eq!(None, led_symbol);
Jaewan Kima67e36a2023-11-29 16:50:23 +09001352
Jaewan Kima232ed02024-02-25 16:08:14 +00001353 let backlight = vm_dtbo.node(cstr!("/fragment@0/__overlay__/bus0/backlight")).unwrap();
Jaewan Kima67e36a2023-11-29 16:50:23 +09001354 assert_eq!(backlight, None);
Jaewan Kim371f6c82024-02-24 01:33:37 +09001355 let backlight_symbol = symbols.getprop_str(cstr!("backlight")).unwrap();
1356 assert_eq!(None, backlight_symbol);
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001357 }
1358
1359 #[test]
1360 fn device_info_patch() {
Jaewan Kima67e36a2023-11-29 16:50:23 +09001361 let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001362 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1363 let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
1364 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1365 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1366 let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
1367
Jaewan Kim52477ae2023-11-21 21:20:52 +09001368 let hypervisor = MockHypervisor {
1369 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1370 iommu_tokens: BTreeMap::new(),
1371 };
1372 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001373 device_info.filter(vm_dtbo).unwrap();
1374
1375 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1376 unsafe {
1377 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1378 }
Jaewan Kim0bd637d2023-11-10 13:09:41 +09001379 device_info.patch(platform_dt).unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001380
Jaewan Kimc39974e2023-12-02 01:13:30 +09001381 let rng_node = platform_dt.node(cstr!("/bus0/backlight")).unwrap().unwrap();
1382 let phandle = rng_node.getprop_u32(cstr!("phandle")).unwrap();
1383 assert_ne!(None, phandle);
1384
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001385 // Note: Intentionally not using AssignedDeviceNode for matching all props.
Jaewan Kim0bd637d2023-11-10 13:09:41 +09001386 type FdtResult<T> = libfdt::Result<T>;
1387 let expected: Vec<(FdtResult<&CStr>, FdtResult<Vec<u8>>)> = vec![
Jaewan Kima67e36a2023-11-29 16:50:23 +09001388 (Ok(cstr!("android,backlight,ignore-gctrl-reset")), Ok(Vec::new())),
1389 (Ok(cstr!("compatible")), Ok(Vec::from(*b"android,backlight\0"))),
Jaewan Kim0bd637d2023-11-10 13:09:41 +09001390 (Ok(cstr!("interrupts")), Ok(into_fdt_prop(vec![0x0, 0xF, 0x4]))),
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001391 (Ok(cstr!("iommus")), Ok(Vec::new())),
Jaewan Kimc39974e2023-12-02 01:13:30 +09001392 (Ok(cstr!("phandle")), Ok(into_fdt_prop(vec![phandle.unwrap()]))),
Jaewan Kim0bd637d2023-11-10 13:09:41 +09001393 (Ok(cstr!("reg")), Ok(into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]))),
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001394 ];
1395
Jaewan Kim0bd637d2023-11-10 13:09:41 +09001396 let mut properties: Vec<_> = rng_node
1397 .properties()
1398 .unwrap()
1399 .map(|prop| (prop.name(), prop.value().map(|x| x.into())))
1400 .collect();
1401 properties.sort_by(|a, b| {
1402 let lhs = a.0.unwrap_or_default();
1403 let rhs = b.0.unwrap_or_default();
1404 lhs.partial_cmp(rhs).unwrap()
1405 });
1406
1407 assert_eq!(properties, expected);
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001408 }
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001409
1410 #[test]
Jaewan Kimc730ebf2024-02-22 10:34:55 +09001411 fn device_info_patch_no_pviommus() {
1412 let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
1413 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1414 let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
1415 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1416 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1417 let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
1418
1419 let hypervisor = MockHypervisor {
1420 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1421 iommu_tokens: BTreeMap::new(),
1422 };
1423 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1424 device_info.filter(vm_dtbo).unwrap();
1425
1426 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1427 unsafe {
1428 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1429 }
1430 device_info.patch(platform_dt).unwrap();
1431
1432 let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu")).unwrap();
1433 assert_eq!(None, compatible);
1434
1435 if let Some(symbols) = platform_dt.symbols().unwrap() {
1436 for prop in symbols.properties().unwrap() {
1437 let path = CStr::from_bytes_with_nul(prop.value().unwrap()).unwrap();
1438 assert_ne!(None, platform_dt.node(path).unwrap());
1439 }
1440 }
1441 }
1442
1443 #[test]
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001444 fn device_info_overlay_iommu() {
Jaewan Kima67e36a2023-11-29 16:50:23 +09001445 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001446 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1447 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1448 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1449 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1450 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1451 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1452 platform_dt.unpack().unwrap();
1453
Jaewan Kim52477ae2023-11-21 21:20:52 +09001454 let hypervisor = MockHypervisor {
1455 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1456 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1457 };
1458 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001459 device_info.filter(vm_dtbo).unwrap();
1460
1461 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1462 unsafe {
1463 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1464 }
1465 device_info.patch(platform_dt).unwrap();
1466
1467 let expected = AssignedDeviceNode {
1468 path: CString::new("/rng").unwrap(),
1469 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1470 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
Jaewan Kima9200492023-11-21 20:45:31 +09001471 iommus: vec![0x4, 0xFF0],
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001472 };
1473
1474 let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1475 assert_eq!(node, Ok(expected));
1476
1477 let pviommus = collect_pviommus(platform_dt);
1478 assert_eq!(pviommus, Ok(vec![0x4]));
1479 }
1480
1481 #[test]
1482 fn device_info_multiple_devices_iommus() {
1483 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_DEVICES_IOMMUS_FILE_PATH).unwrap();
1484 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1485 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1486 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1487 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1488 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1489 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1490 platform_dt.unpack().unwrap();
1491
Jaewan Kim52477ae2023-11-21 21:20:52 +09001492 let hypervisor = MockHypervisor {
1493 mmio_tokens: [
1494 ((0x9, 0xFF), 0x12F00000),
Jaewan Kim19b984f2023-12-04 15:16:50 +09001495 ((0x10000, 0x1000), 0xF00000),
1496 ((0x20000, 0x1000), 0xF10000),
Jaewan Kim52477ae2023-11-21 21:20:52 +09001497 ]
1498 .into(),
1499 iommu_tokens: [
1500 ((0x4, 0xFF0), (0x12E40000, 3)),
1501 ((0x40, 0xFFA), (0x40000, 0x4)),
1502 ((0x50, 0xFFB), (0x50000, 0x5)),
1503 ]
1504 .into(),
1505 };
1506 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001507 device_info.filter(vm_dtbo).unwrap();
1508
1509 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1510 unsafe {
1511 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1512 }
1513 device_info.patch(platform_dt).unwrap();
1514
1515 let expected_devices = [
1516 AssignedDeviceNode {
1517 path: CString::new("/rng").unwrap(),
1518 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1519 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
Jaewan Kima67e36a2023-11-29 16:50:23 +09001520 iommus: vec![0x4, 0xFF0],
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001521 },
1522 AssignedDeviceNode {
1523 path: CString::new("/light").unwrap(),
Jaewan Kim19b984f2023-12-04 15:16:50 +09001524 reg: into_fdt_prop(vec![0x0, 0x10000, 0x0, 0x1000, 0x0, 0x20000, 0x0, 0x1000]),
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001525 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x5]),
Jaewan Kima67e36a2023-11-29 16:50:23 +09001526 iommus: vec![0x40, 0xFFA, 0x50, 0xFFB],
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001527 },
1528 ];
1529
1530 for expected in expected_devices {
1531 let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1532 assert_eq!(node, Ok(expected));
1533 }
1534 let pviommus = collect_pviommus(platform_dt);
Jaewan Kima67e36a2023-11-29 16:50:23 +09001535 assert_eq!(pviommus, Ok(vec![0x4, 0x40, 0x50]));
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001536 }
1537
1538 #[test]
1539 fn device_info_iommu_sharing() {
1540 let mut fdt_data = fs::read(FDT_WITH_IOMMU_SHARING).unwrap();
1541 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1542 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1543 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1544 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1545 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1546 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1547 platform_dt.unpack().unwrap();
1548
Jaewan Kim52477ae2023-11-21 21:20:52 +09001549 let hypervisor = MockHypervisor {
Jaewan Kim19b984f2023-12-04 15:16:50 +09001550 mmio_tokens: [((0x9, 0xFF), 0x12F00000), ((0x1000, 0x9), 0x12000000)].into(),
1551 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 3)), ((0x4, 0xFF1), (0x12E40000, 9))].into(),
Jaewan Kim52477ae2023-11-21 21:20:52 +09001552 };
1553 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001554 device_info.filter(vm_dtbo).unwrap();
1555
1556 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1557 unsafe {
1558 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1559 }
1560 device_info.patch(platform_dt).unwrap();
1561
1562 let expected_devices = [
1563 AssignedDeviceNode {
1564 path: CString::new("/rng").unwrap(),
1565 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1566 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
Jaewan Kima67e36a2023-11-29 16:50:23 +09001567 iommus: vec![0x4, 0xFF0],
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001568 },
1569 AssignedDeviceNode {
Jaewan Kima67e36a2023-11-29 16:50:23 +09001570 path: CString::new("/led").unwrap(),
Jaewan Kim19b984f2023-12-04 15:16:50 +09001571 reg: into_fdt_prop(vec![0x0, 0x1000, 0x0, 0x9]),
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001572 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x5]),
Jaewan Kim19b984f2023-12-04 15:16:50 +09001573 iommus: vec![0x4, 0xFF1],
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001574 },
1575 ];
1576
1577 for expected in expected_devices {
1578 let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1579 assert_eq!(node, Ok(expected));
1580 }
1581
1582 let pviommus = collect_pviommus(platform_dt);
Jaewan Kima67e36a2023-11-29 16:50:23 +09001583 assert_eq!(pviommus, Ok(vec![0x4]));
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001584 }
1585
1586 #[test]
1587 fn device_info_iommu_id_conflict() {
1588 let mut fdt_data = fs::read(FDT_WITH_IOMMU_ID_CONFLICT).unwrap();
1589 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1590 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1591 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1592
Jaewan Kim52477ae2023-11-21 21:20:52 +09001593 let hypervisor = MockHypervisor {
Jaewan Kim19b984f2023-12-04 15:16:50 +09001594 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
Jaewan Kim52477ae2023-11-21 21:20:52 +09001595 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1596 };
1597 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001598
1599 assert_eq!(device_info, Err(DeviceAssignmentError::DuplicatedPvIommuIds));
1600 }
Jaewan Kim52477ae2023-11-21 21:20:52 +09001601
1602 #[test]
1603 fn device_info_invalid_reg() {
1604 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1605 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1606 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1607 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1608
1609 let hypervisor = MockHypervisor {
1610 mmio_tokens: BTreeMap::new(),
1611 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1612 };
1613 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1614
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +00001615 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidReg(0x9, 0xFF)));
Jaewan Kim52477ae2023-11-21 21:20:52 +09001616 }
1617
1618 #[test]
Jaewan Kim19b984f2023-12-04 15:16:50 +09001619 fn device_info_invalid_reg_out_of_order() {
1620 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH).unwrap();
1621 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1622 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1623 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1624
1625 let hypervisor = MockHypervisor {
1626 mmio_tokens: [((0xF000, 0x1000), 0xF10000), ((0xF100, 0x1000), 0xF00000)].into(),
1627 iommu_tokens: [((0xFF0, 0xF0), (0x40000, 0x4)), ((0xFF1, 0xF1), (0x50000, 0x5))].into(),
1628 };
1629 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1630
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +00001631 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidPhysReg(0xF10000, 0x1000)));
Jaewan Kim19b984f2023-12-04 15:16:50 +09001632 }
1633
1634 #[test]
Jaewan Kim52477ae2023-11-21 21:20:52 +09001635 fn device_info_invalid_iommus() {
1636 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1637 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1638 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1639 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1640
1641 let hypervisor = MockHypervisor {
1642 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1643 iommu_tokens: BTreeMap::new(),
1644 };
1645 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1646
1647 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidIommus));
1648 }
Jaewan Kim19b984f2023-12-04 15:16:50 +09001649
1650 #[test]
1651 fn device_info_duplicated_pv_iommus() {
1652 let mut fdt_data = fs::read(FDT_WITH_DUPLICATED_PVIOMMUS_FILE_PATH).unwrap();
1653 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1654 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1655 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1656
1657 let hypervisor = MockHypervisor {
1658 mmio_tokens: [((0x10000, 0x1000), 0xF00000), ((0x20000, 0xFF), 0xF10000)].into(),
1659 iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1660 };
1661 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1662
1663 assert_eq!(device_info, Err(DeviceAssignmentError::DuplicatedPvIommuIds));
1664 }
1665
1666 #[test]
1667 fn device_info_duplicated_iommus() {
1668 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1669 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DUPLICATED_IOMMUS_FILE_PATH).unwrap();
1670 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1671 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1672
1673 let hypervisor = MockHypervisor {
1674 mmio_tokens: [((0x10000, 0x1000), 0xF00000), ((0x20000, 0xFF), 0xF10000)].into(),
1675 iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1676 };
1677 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1678
1679 assert_eq!(device_info, Err(DeviceAssignmentError::UnsupportedIommusDuplication));
1680 }
1681
1682 #[test]
1683 fn device_info_duplicated_iommu_mapping() {
1684 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH).unwrap();
1685 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1686 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1687 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1688
1689 let hypervisor = MockHypervisor {
1690 mmio_tokens: [((0xF000, 0x1000), 0xF00000), ((0xF100, 0x1000), 0xF10000)].into(),
1691 iommu_tokens: [((0xFF0, 0xF0), (0x40000, 0x4)), ((0xFF1, 0xF1), (0x40000, 0x4))].into(),
1692 };
1693 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1694
1695 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidIommus));
1696 }
Jaewan Kim50246682024-03-11 23:18:54 +09001697
1698 #[test]
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +00001699 fn device_info_overlaps_pvmfw() {
1700 let mut fdt_data = fs::read(FDT_WITH_DEVICE_OVERLAPPING_PVMFW).unwrap();
1701 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1702 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1703 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1704
1705 let hypervisor = MockHypervisor {
1706 mmio_tokens: [((0x7fee0000, 0x1000), 0xF00000)].into(),
1707 iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1708 };
1709 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1710
1711 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidReg(0x7fee0000, 0x1000)));
1712 }
1713
1714 #[test]
Jaewan Kim50246682024-03-11 23:18:54 +09001715 fn device_assignment_clean() {
1716 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1717 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1718
1719 let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu"));
1720 assert_ne!(None, compatible.unwrap());
1721
1722 clean(platform_dt).unwrap();
1723
1724 let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu"));
1725 assert_eq!(Ok(None), compatible);
1726 }
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001727
1728 #[test]
1729 fn device_info_dependency() {
1730 let mut fdt_data = fs::read(FDT_WITH_DEPENDENCY_FILE_PATH).unwrap();
1731 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1732 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1733 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1734 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1735 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1736 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1737 platform_dt.unpack().unwrap();
1738
1739 let hypervisor = MockHypervisor {
1740 mmio_tokens: [((0xFF000, 0x1), 0xF000)].into(),
1741 iommu_tokens: Default::default(),
1742 };
1743
1744 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1745 device_info.filter(vm_dtbo).unwrap();
1746
1747 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1748 unsafe {
1749 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1750 }
1751 device_info.patch(platform_dt).unwrap();
1752
1753 let expected = Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_DEPENDENCY_FILE_PATH)).unwrap();
1754 let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1755
1756 assert_eq!(expected, platform_dt);
1757 }
1758
1759 #[test]
1760 fn device_info_multiple_dependencies() {
1761 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH).unwrap();
1762 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1763 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1764 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1765 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1766 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1767 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1768 platform_dt.unpack().unwrap();
1769
1770 let hypervisor = MockHypervisor {
1771 mmio_tokens: [((0xFF000, 0x1), 0xF000), ((0xFF100, 0x1), 0xF100)].into(),
1772 iommu_tokens: Default::default(),
1773 };
1774 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1775 device_info.filter(vm_dtbo).unwrap();
1776
1777 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1778 unsafe {
1779 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1780 }
1781 device_info.patch(platform_dt).unwrap();
1782
1783 let expected =
1784 Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH)).unwrap();
1785 let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1786
1787 assert_eq!(expected, platform_dt);
1788 }
1789
1790 #[test]
1791 fn device_info_dependency_loop() {
1792 let mut fdt_data = fs::read(FDT_WITH_DEPENDENCY_LOOP_FILE_PATH).unwrap();
1793 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1794 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1795 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1796 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1797 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1798 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1799 platform_dt.unpack().unwrap();
1800
1801 let hypervisor = MockHypervisor {
1802 mmio_tokens: [((0xFF200, 0x1), 0xF200)].into(),
1803 iommu_tokens: Default::default(),
1804 };
1805 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1806 device_info.filter(vm_dtbo).unwrap();
1807
1808 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1809 unsafe {
1810 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1811 }
1812 device_info.patch(platform_dt).unwrap();
1813
1814 let expected =
1815 Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_DEPENDENCY_LOOP_FILE_PATH)).unwrap();
1816 let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1817
1818 assert_eq!(expected, platform_dt);
1819 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001820}