blob: 885cd224a4d067173a6ec1a9f551f983f90ca599 [file] [log] [blame]
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001// Copyright 2023, The Android Open Source Project
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Validate device assignment written in crosvm DT with VM DTBO, and apply it
16//! to platform DT.
17//! Declared in separated libs for adding unit tests, which requires libstd.
18
19#[cfg(test)]
20extern crate alloc;
21
Jaewan Kim51ccfed2023-11-08 13:51:58 +090022use alloc::collections::{BTreeMap, BTreeSet};
Jaewan Kimc6e023b2023-10-12 15:11:05 +090023use alloc::ffi::CString;
24use alloc::fmt;
25use alloc::vec;
26use alloc::vec::Vec;
27use core::ffi::CStr;
28use core::iter::Iterator;
29use core::mem;
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +000030use core::ops::Range;
Jaewan Kim52477ae2023-11-21 21:20:52 +090031use hyp::DeviceAssigningHypervisor;
Jaewan Kim8f6f4662023-12-12 17:38:47 +090032use libfdt::{Fdt, FdtError, FdtNode, FdtNodeMut, Phandle, Reg};
Jaewan Kim52477ae2023-11-21 21:20:52 +090033use log::error;
Jaewan Kim8f6f4662023-12-12 17:38:47 +090034use zerocopy::byteorder::big_endian::U32;
35use zerocopy::FromBytes as _;
Jaewan Kimc6e023b2023-10-12 15:11:05 +090036
Jaewan Kimc6e023b2023-10-12 15:11:05 +090037// TODO(b/308694211): Use cstr! from vmbase instead.
38macro_rules! cstr {
39 ($str:literal) => {{
Pierre-Clément Tosid701a0b2023-11-07 15:38:59 +000040 const S: &str = concat!($str, "\0");
41 const C: &::core::ffi::CStr = match ::core::ffi::CStr::from_bytes_with_nul(S.as_bytes()) {
42 Ok(v) => v,
43 Err(_) => panic!("string contains interior NUL"),
44 };
45 C
Jaewan Kimc6e023b2023-10-12 15:11:05 +090046 }};
47}
48
Jaewan Kimc6e023b2023-10-12 15:11:05 +090049// TODO(b/277993056): Keep constants derived from platform.dts in one place.
50const CELLS_PER_INTERRUPT: usize = 3; // from /intc node in platform.dts
51
52/// Errors in device assignment.
53#[derive(Clone, Copy, Debug, Eq, PartialEq)]
54pub enum DeviceAssignmentError {
Jaewan Kim52477ae2023-11-21 21:20:52 +090055 /// Invalid VM DTBO
Jaewan Kimc6e023b2023-10-12 15:11:05 +090056 InvalidDtbo,
57 /// Invalid __symbols__
58 InvalidSymbols,
Jaewan Kim19b984f2023-12-04 15:16:50 +090059 /// Malformed <reg>. Can't parse.
60 MalformedReg,
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +000061 /// Invalid physical <reg> of assigned device.
62 InvalidPhysReg(u64, u64),
63 /// Invalid virtual <reg> of assigned device.
64 InvalidReg(u64, u64),
Jaewan Kimc6e023b2023-10-12 15:11:05 +090065 /// Invalid <interrupts>
66 InvalidInterrupts,
Jaewan Kim19b984f2023-12-04 15:16:50 +090067 /// Malformed <iommus>
68 MalformedIommus,
Jaewan Kim51ccfed2023-11-08 13:51:58 +090069 /// Invalid <iommus>
70 InvalidIommus,
Jaewan Kim19b984f2023-12-04 15:16:50 +090071 /// Invalid phys IOMMU node
72 InvalidPhysIommu,
Jaewan Kima9200492023-11-21 20:45:31 +090073 /// Invalid pvIOMMU node
74 InvalidPvIommu,
Jaewan Kim51ccfed2023-11-08 13:51:58 +090075 /// Too many pvIOMMU
76 TooManyPvIommu,
Jaewan Kim19b984f2023-12-04 15:16:50 +090077 /// Duplicated phys IOMMU IDs exist
78 DuplicatedIommuIds,
Jaewan Kim51ccfed2023-11-08 13:51:58 +090079 /// Duplicated pvIOMMU IDs exist
80 DuplicatedPvIommuIds,
Jaewan Kimf8abbb52023-12-12 22:11:39 +090081 /// Unsupported path format. Only supports full path.
82 UnsupportedPathFormat,
Jaewan Kimc6e023b2023-10-12 15:11:05 +090083 /// Unsupported overlay target syntax. Only supports <target-path> with full path.
84 UnsupportedOverlayTarget,
Jaewan Kim19b984f2023-12-04 15:16:50 +090085 /// Unsupported PhysIommu,
86 UnsupportedPhysIommu,
87 /// Unsupported (pvIOMMU id, vSID) duplication. Currently the pair should be unique.
88 UnsupportedPvIommusDuplication,
89 /// Unsupported (IOMMU token, SID) duplication. Currently the pair should be unique.
90 UnsupportedIommusDuplication,
Jaewan Kim51ccfed2023-11-08 13:51:58 +090091 /// Internal error
92 Internal,
Jaewan Kimc6e023b2023-10-12 15:11:05 +090093 /// Unexpected error from libfdt
94 UnexpectedFdtError(FdtError),
95}
96
97impl From<FdtError> for DeviceAssignmentError {
98 fn from(e: FdtError) -> Self {
99 DeviceAssignmentError::UnexpectedFdtError(e)
100 }
101}
102
103impl fmt::Display for DeviceAssignmentError {
104 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
105 match self {
106 Self::InvalidDtbo => write!(f, "Invalid DTBO"),
107 Self::InvalidSymbols => write!(
108 f,
109 "Invalid property in /__symbols__. Must point to valid assignable device node."
110 ),
Jaewan Kim19b984f2023-12-04 15:16:50 +0900111 Self::MalformedReg => write!(f, "Malformed <reg>. Can't parse"),
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000112 Self::InvalidReg(addr, size) => {
113 write!(f, "Invalid guest MMIO region (addr: {addr:#x}, size: {size:#x})")
114 }
115 Self::InvalidPhysReg(addr, size) => {
116 write!(f, "Invalid physical MMIO region (addr: {addr:#x}, size: {size:#x})")
117 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900118 Self::InvalidInterrupts => write!(f, "Invalid <interrupts>"),
Jaewan Kim19b984f2023-12-04 15:16:50 +0900119 Self::MalformedIommus => write!(f, "Malformed <iommus>. Can't parse."),
120 Self::InvalidIommus => {
121 write!(f, "Invalid <iommus>. Failed to validate with hypervisor")
122 }
123 Self::InvalidPhysIommu => write!(f, "Invalid phys IOMMU node"),
Jaewan Kima9200492023-11-21 20:45:31 +0900124 Self::InvalidPvIommu => write!(f, "Invalid pvIOMMU node"),
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900125 Self::TooManyPvIommu => write!(
126 f,
127 "Too many pvIOMMU node. Insufficient pre-populated pvIOMMUs in platform DT"
128 ),
Jaewan Kim19b984f2023-12-04 15:16:50 +0900129 Self::DuplicatedIommuIds => {
130 write!(f, "Duplicated IOMMU IDs exist. IDs must unique among iommu node")
131 }
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900132 Self::DuplicatedPvIommuIds => {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900133 write!(f, "Duplicated pvIOMMU IDs exist. IDs must unique among iommu node")
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900134 }
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900135 Self::UnsupportedPathFormat => {
136 write!(f, "Unsupported UnsupportedPathFormat. Only supports full path")
137 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900138 Self::UnsupportedOverlayTarget => {
139 write!(f, "Unsupported overlay target. Only supports 'target-path = \"/\"'")
140 }
Jaewan Kim19b984f2023-12-04 15:16:50 +0900141 Self::UnsupportedPhysIommu => {
142 write!(f, "Unsupported Phys IOMMU. Currently only supports #iommu-cells = <1>")
143 }
144 Self::UnsupportedPvIommusDuplication => {
145 write!(f, "Unsupported (pvIOMMU id, vSID) duplication. Currently the pair should be unique.")
146 }
147 Self::UnsupportedIommusDuplication => {
148 write!(f, "Unsupported (IOMMU token, SID) duplication. Currently the pair should be unique.")
149 }
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900150 Self::Internal => write!(f, "Internal error"),
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900151 Self::UnexpectedFdtError(e) => write!(f, "Unexpected Error from libfdt: {e}"),
152 }
153 }
154}
155
156pub type Result<T> = core::result::Result<T, DeviceAssignmentError>;
157
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900158#[derive(Clone, Default, Ord, PartialOrd, Eq, PartialEq)]
159pub struct DtPathTokens<'a> {
160 tokens: Vec<&'a [u8]>,
161}
162
163impl<'a> fmt::Debug for DtPathTokens<'a> {
164 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
165 let mut list = f.debug_list();
166 for token in &self.tokens {
167 let mut bytes = token.to_vec();
168 bytes.push(b'\0');
169 match CString::from_vec_with_nul(bytes) {
170 Ok(string) => list.entry(&string),
171 Err(_) => list.entry(token),
172 };
173 }
174 list.finish()
175 }
176}
177
178impl<'a> DtPathTokens<'a> {
179 fn new(path: &'a CStr) -> Result<Self> {
180 if path.to_bytes().first() != Some(&b'/') {
181 return Err(DeviceAssignmentError::UnsupportedPathFormat);
182 }
183 let tokens: Vec<_> = path
184 .to_bytes()
185 .split(|char| *char == b'/')
186 .filter(|&component| !component.is_empty())
187 .collect();
188 Ok(Self { tokens })
189 }
190
191 fn to_overlay_target_path(&self) -> Result<Self> {
192 if !self.is_overlayable_node() {
193 return Err(DeviceAssignmentError::InvalidDtbo);
194 }
195 Ok(Self { tokens: self.tokens.as_slice()[2..].to_vec() })
196 }
197
198 fn to_cstring(&self) -> CString {
199 if self.tokens.is_empty() {
200 return CString::new(*b"/\0").unwrap();
201 }
202
203 let size = self.tokens.iter().fold(0, |sum, token| sum + token.len() + 1);
204 let mut path = Vec::with_capacity(size + 1);
205 for token in &self.tokens {
206 path.push(b'/');
207 path.extend_from_slice(token);
208 }
209 path.push(b'\0');
210
211 CString::from_vec_with_nul(path).unwrap()
212 }
213
214 fn is_overlayable_node(&self) -> bool {
215 self.tokens.get(1) == Some(&&b"__overlay__"[..])
216 }
217}
218
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900219#[derive(Debug, Eq, PartialEq)]
220enum DeviceTreeChildrenMask {
221 Partial(Vec<DeviceTreeMask>),
222 All,
223}
224
225#[derive(Eq, PartialEq)]
226struct DeviceTreeMask {
227 name_bytes: Vec<u8>,
228 children: DeviceTreeChildrenMask,
229}
230
231impl fmt::Debug for DeviceTreeMask {
232 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
233 let name_bytes = [self.name_bytes.as_slice(), b"\0"].concat();
234
235 f.debug_struct("DeviceTreeMask")
236 .field("name", &CStr::from_bytes_with_nul(&name_bytes).unwrap())
237 .field("children", &self.children)
238 .finish()
239 }
240}
241
242impl DeviceTreeMask {
243 fn new() -> Self {
244 Self { name_bytes: b"/".to_vec(), children: DeviceTreeChildrenMask::Partial(Vec::new()) }
245 }
246
247 fn mask_internal(&mut self, path: &DtPathTokens, leaf_mask: DeviceTreeChildrenMask) -> bool {
248 let mut iter = self;
249 let mut newly_masked = false;
250 'next_token: for path_token in &path.tokens {
251 let DeviceTreeChildrenMask::Partial(ref mut children) = &mut iter.children else {
252 return false;
253 };
254
255 // Note: Can't use iterator for 'get or insert'. (a.k.a. polonius Rust)
256 #[allow(clippy::needless_range_loop)]
257 for i in 0..children.len() {
258 if children[i].name_bytes.as_slice() == *path_token {
259 iter = &mut children[i];
260 newly_masked = false;
261 continue 'next_token;
262 }
263 }
264 let child = Self {
265 name_bytes: path_token.to_vec(),
266 children: DeviceTreeChildrenMask::Partial(Vec::new()),
267 };
268 children.push(child);
269 newly_masked = true;
270 iter = children.last_mut().unwrap()
271 }
272 iter.children = leaf_mask;
273 newly_masked
274 }
275
276 fn mask(&mut self, path: &DtPathTokens) -> bool {
277 self.mask_internal(path, DeviceTreeChildrenMask::Partial(Vec::new()))
278 }
279
280 fn mask_all(&mut self, path: &DtPathTokens) {
281 self.mask_internal(path, DeviceTreeChildrenMask::All);
282 }
283}
284
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900285/// Represents VM DTBO
286#[repr(transparent)]
287pub struct VmDtbo(Fdt);
288
289impl VmDtbo {
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900290 /// Wraps a mutable slice containing a VM DTBO.
291 ///
292 /// Fails if the VM DTBO does not pass validation.
293 pub fn from_mut_slice(dtbo: &mut [u8]) -> Result<&mut Self> {
294 // This validates DTBO
295 let fdt = Fdt::from_mut_slice(dtbo)?;
296 // SAFETY: VmDtbo is a transparent wrapper around Fdt, so representation is the same.
297 Ok(unsafe { mem::transmute::<&mut Fdt, &mut Self>(fdt) })
298 }
299
300 // Locates device node path as if the given dtbo node path is assigned and VM DTBO is overlaid.
301 // For given dtbo node path, this concatenates <target-path> of the enclosing fragment and
302 // relative path from __overlay__ node.
303 //
304 // Here's an example with sample VM DTBO:
305 // / {
306 // fragment@rng {
307 // target-path = "/"; // Always 'target-path = "/"'. Disallows <target> or other path.
308 // __overlay__ {
309 // rng { ... }; // Actual device node is here. If overlaid, path would be "/rng"
310 // };
311 // };
Jaewan Kim80ef9fa2024-02-25 16:08:14 +0000312 // __symbols__ { // Contains list of assignable devices
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900313 // rng = "/fragment@rng/__overlay__/rng";
314 // };
315 // };
316 //
317 // Then locate_overlay_target_path(cstr!("/fragment@rng/__overlay__/rng")) is Ok("/rng")
318 //
319 // Contrary to fdt_overlay_target_offset(), this API enforces overlay target property
320 // 'target-path = "/"', so the overlay doesn't modify and/or append platform DT's existing
321 // node and/or properties. The enforcement is for compatibility reason.
Jaewan Kim19b984f2023-12-04 15:16:50 +0900322 fn locate_overlay_target_path(
323 &self,
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900324 dtbo_node_path: &DtPathTokens,
Jaewan Kim19b984f2023-12-04 15:16:50 +0900325 dtbo_node: &FdtNode,
326 ) -> Result<CString> {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900327 let fragment_node = dtbo_node.supernode_at_depth(1)?;
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900328 let target_path = fragment_node
Pierre-Clément Tosid701a0b2023-11-07 15:38:59 +0000329 .getprop_str(cstr!("target-path"))?
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900330 .ok_or(DeviceAssignmentError::InvalidDtbo)?;
331 if target_path != cstr!("/") {
332 return Err(DeviceAssignmentError::UnsupportedOverlayTarget);
333 }
334
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900335 let overlaid_path = dtbo_node_path.to_overlay_target_path()?;
336 Ok(overlaid_path.to_cstring())
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900337 }
Jaewan Kim19b984f2023-12-04 15:16:50 +0900338
339 fn parse_physical_iommus(physical_node: &FdtNode) -> Result<BTreeMap<Phandle, PhysIommu>> {
340 let mut phys_iommus = BTreeMap::new();
341 for (node, _) in physical_node.descendants() {
342 let Some(phandle) = node.get_phandle()? else {
343 continue; // Skips unreachable IOMMU node
344 };
345 let Some(iommu) = PhysIommu::parse(&node)? else {
346 continue; // Skip if not a PhysIommu.
347 };
348 if phys_iommus.insert(phandle, iommu).is_some() {
349 return Err(FdtError::BadPhandle.into());
350 }
351 }
352 Self::validate_physical_iommus(&phys_iommus)?;
353 Ok(phys_iommus)
354 }
355
356 fn validate_physical_iommus(phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<()> {
357 let unique_iommus: BTreeSet<_> = phys_iommus.values().cloned().collect();
358 if phys_iommus.len() != unique_iommus.len() {
359 return Err(DeviceAssignmentError::DuplicatedIommuIds);
360 }
361 Ok(())
362 }
363
364 fn validate_physical_devices(
365 physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>,
366 ) -> Result<()> {
367 // Only need to validate iommus because <reg> will be validated together with PV <reg>
368 // see: DeviceAssignmentInfo::validate_all_regs().
369 let mut all_iommus = BTreeSet::new();
370 for physical_device in physical_devices.values() {
371 for iommu in &physical_device.iommus {
372 if !all_iommus.insert(iommu) {
373 error!("Unsupported phys IOMMU duplication found, <iommus> = {iommu:?}");
374 return Err(DeviceAssignmentError::UnsupportedIommusDuplication);
375 }
376 }
377 }
378 Ok(())
379 }
380
381 fn parse_physical_devices_with_iommus(
382 physical_node: &FdtNode,
383 phys_iommus: &BTreeMap<Phandle, PhysIommu>,
384 ) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>> {
385 let mut physical_devices = BTreeMap::new();
386 for (node, _) in physical_node.descendants() {
387 let Some(info) = PhysicalDeviceInfo::parse(&node, phys_iommus)? else {
388 continue;
389 };
390 if physical_devices.insert(info.target, info).is_some() {
391 return Err(DeviceAssignmentError::InvalidDtbo);
392 }
393 }
394 Self::validate_physical_devices(&physical_devices)?;
395 Ok(physical_devices)
396 }
397
398 /// Parses Physical devices in VM DTBO
399 fn parse_physical_devices(&self) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>> {
400 let Some(physical_node) = self.as_ref().node(cstr!("/host"))? else {
401 return Ok(BTreeMap::new());
402 };
403
404 let phys_iommus = Self::parse_physical_iommus(&physical_node)?;
405 Self::parse_physical_devices_with_iommus(&physical_node, &phys_iommus)
406 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900407
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900408 fn node(&self, path: &DtPathTokens) -> Result<Option<FdtNode>> {
409 let mut node = self.as_ref().root();
410 for token in &path.tokens {
411 let Some(subnode) = node.subnode_with_name_bytes(token)? else {
412 return Ok(None);
413 };
414 node = subnode;
415 }
416 Ok(Some(node))
417 }
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900418
419 fn collect_overlayable_nodes_with_phandle(&self) -> Result<BTreeMap<Phandle, DtPathTokens>> {
420 let mut paths = BTreeMap::new();
421 let mut path: DtPathTokens = Default::default();
422 let root = self.as_ref().root();
423 for (node, depth) in root.descendants() {
424 path.tokens.truncate(depth - 1);
425 path.tokens.push(node.name()?.to_bytes());
426 if !path.is_overlayable_node() {
427 continue;
428 }
429 if let Some(phandle) = node.get_phandle()? {
430 paths.insert(phandle, path.clone());
431 }
432 }
433 Ok(paths)
434 }
435
436 fn collect_phandle_references_from_overlayable_nodes(
437 &self,
438 ) -> Result<BTreeMap<DtPathTokens, Vec<Phandle>>> {
439 const CELL_SIZE: usize = core::mem::size_of::<u32>();
440
441 let vm_dtbo = self.as_ref();
442
443 let mut phandle_map = BTreeMap::new();
444 let Some(local_fixups) = vm_dtbo.node(cstr!("/__local_fixups__"))? else {
445 return Ok(phandle_map);
446 };
447
448 let mut path: DtPathTokens = Default::default();
449 for (fixup_node, depth) in local_fixups.descendants() {
450 let node_name = fixup_node.name()?;
451 path.tokens.truncate(depth - 1);
452 path.tokens.push(node_name.to_bytes());
453 if path.tokens.len() != depth {
454 return Err(DeviceAssignmentError::Internal);
455 }
456 if !path.is_overlayable_node() {
457 continue;
458 }
459 let target_node = self.node(&path)?.ok_or(DeviceAssignmentError::InvalidDtbo)?;
460
461 let mut phandles = vec![];
462 for fixup_prop in fixup_node.properties()? {
463 let target_prop = target_node
464 .getprop(fixup_prop.name()?)
465 .or(Err(DeviceAssignmentError::InvalidDtbo))?
466 .ok_or(DeviceAssignmentError::InvalidDtbo)?;
467 let fixup_prop_values = fixup_prop.value()?;
468 if fixup_prop_values.is_empty() || fixup_prop_values.len() % CELL_SIZE != 0 {
469 return Err(DeviceAssignmentError::InvalidDtbo);
470 }
471
472 for fixup_prop_cell in fixup_prop_values.chunks(CELL_SIZE) {
473 let phandle_offset: usize = u32::from_be_bytes(
474 fixup_prop_cell.try_into().or(Err(DeviceAssignmentError::InvalidDtbo))?,
475 )
476 .try_into()
477 .or(Err(DeviceAssignmentError::InvalidDtbo))?;
478 if phandle_offset % CELL_SIZE != 0 {
479 return Err(DeviceAssignmentError::InvalidDtbo);
480 }
481 let phandle_value = target_prop
482 .get(phandle_offset..phandle_offset + CELL_SIZE)
483 .ok_or(DeviceAssignmentError::InvalidDtbo)?;
484 let phandle: Phandle = U32::ref_from(phandle_value)
485 .unwrap()
486 .get()
487 .try_into()
488 .or(Err(DeviceAssignmentError::InvalidDtbo))?;
489
490 phandles.push(phandle);
491 }
492 }
493 if !phandles.is_empty() {
494 phandle_map.insert(path.clone(), phandles);
495 }
496 }
497
498 Ok(phandle_map)
499 }
500
501 fn build_mask(&self, assigned_devices: Vec<DtPathTokens>) -> Result<DeviceTreeMask> {
502 if assigned_devices.is_empty() {
503 return Err(DeviceAssignmentError::Internal);
504 }
505
506 let dependencies = self.collect_phandle_references_from_overlayable_nodes()?;
507 let paths = self.collect_overlayable_nodes_with_phandle()?;
508
509 let mut mask = DeviceTreeMask::new();
510 let mut stack = assigned_devices;
511 while let Some(path) = stack.pop() {
512 if !mask.mask(&path) {
513 continue;
514 }
515 let Some(dst_phandles) = dependencies.get(&path) else {
516 continue;
517 };
518 for dst_phandle in dst_phandles {
519 let dst_path = paths.get(dst_phandle).ok_or(DeviceAssignmentError::Internal)?;
520 stack.push(dst_path.clone());
521 }
522 }
523
524 Ok(mask)
525 }
Jaewan Kimc39974e2023-12-02 01:13:30 +0900526}
527
Jaewan Kimc730ebf2024-02-22 10:34:55 +0900528fn filter_dangling_symbols(fdt: &mut Fdt) -> Result<()> {
529 if let Some(symbols) = fdt.symbols()? {
530 let mut removed = vec![];
531 for prop in symbols.properties()? {
532 let path = CStr::from_bytes_with_nul(prop.value()?)
533 .map_err(|_| DeviceAssignmentError::Internal)?;
534 if fdt.node(path)?.is_none() {
535 let name = prop.name()?;
536 removed.push(CString::from(name));
537 }
538 }
539
540 let mut symbols = fdt.symbols_mut()?.unwrap();
541 for name in removed {
542 symbols.nop_property(&name)?;
543 }
544 }
545 Ok(())
546}
547
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900548impl AsRef<Fdt> for VmDtbo {
549 fn as_ref(&self) -> &Fdt {
550 &self.0
551 }
552}
553
554impl AsMut<Fdt> for VmDtbo {
555 fn as_mut(&mut self) -> &mut Fdt {
556 &mut self.0
557 }
558}
559
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900560// Filter any node that isn't masked by DeviceTreeMask.
561fn filter_with_mask(anchor: FdtNodeMut, mask: &DeviceTreeMask) -> Result<()> {
562 let mut stack = vec![mask];
563 let mut iter = anchor.next_node(0)?;
564 while let Some((node, depth)) = iter {
565 stack.truncate(depth);
566 let parent_mask = stack.last().unwrap();
567 let DeviceTreeChildrenMask::Partial(parent_mask_children) = &parent_mask.children else {
568 // Shouldn't happen. We only step-in if parent has DeviceTreeChildrenMask::Partial.
569 return Err(DeviceAssignmentError::Internal);
570 };
571
572 let name = node.as_node().name()?.to_bytes();
573 let mask = parent_mask_children.iter().find(|child_mask| child_mask.name_bytes == name);
574 if let Some(masked) = mask {
575 if let DeviceTreeChildrenMask::Partial(_) = &masked.children {
576 // This node is partially masked. Stepping-in.
577 stack.push(masked);
578 iter = node.next_node(depth)?;
579 } else {
580 // This node is fully masked. Stepping-out.
581 iter = node.next_node_skip_subnodes(depth)?;
582 }
583 } else {
584 // This node isn't masked.
585 iter = node.delete_and_next_node(depth)?;
586 }
587 }
588
589 Ok(())
590}
591
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900592#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
593struct PvIommu {
594 // ID from pvIOMMU node
595 id: u32,
596}
597
598impl PvIommu {
599 fn parse(node: &FdtNode) -> Result<Self> {
Jaewan Kima9200492023-11-21 20:45:31 +0900600 let iommu_cells = node
601 .getprop_u32(cstr!("#iommu-cells"))?
602 .ok_or(DeviceAssignmentError::InvalidPvIommu)?;
Jaewan Kim19b984f2023-12-04 15:16:50 +0900603 // Ensures #iommu-cells = <1>. It means that `<iommus>` entry contains pair of
Jaewan Kima9200492023-11-21 20:45:31 +0900604 // (pvIOMMU ID, vSID)
605 if iommu_cells != 1 {
606 return Err(DeviceAssignmentError::InvalidPvIommu);
607 }
608 let id = node.getprop_u32(cstr!("id"))?.ok_or(DeviceAssignmentError::InvalidPvIommu)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900609 Ok(Self { id })
610 }
611}
612
Jaewan Kima9200492023-11-21 20:45:31 +0900613#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
614struct Vsid(u32);
615
Jaewan Kim19b984f2023-12-04 15:16:50 +0900616#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
617struct Sid(u64);
618
619impl From<u32> for Sid {
620 fn from(sid: u32) -> Self {
621 Self(sid.into())
622 }
623}
624
625#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
Jaewan Kim52477ae2023-11-21 21:20:52 +0900626struct DeviceReg {
627 addr: u64,
628 size: u64,
629}
630
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +0000631impl DeviceReg {
632 pub fn overlaps(&self, range: &Range<u64>) -> bool {
633 self.addr < range.end && range.start < self.addr.checked_add(self.size).unwrap()
634 }
635}
636
Jaewan Kim52477ae2023-11-21 21:20:52 +0900637impl TryFrom<Reg<u64>> for DeviceReg {
638 type Error = DeviceAssignmentError;
639
640 fn try_from(reg: Reg<u64>) -> Result<Self> {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900641 Ok(Self { addr: reg.addr, size: reg.size.ok_or(DeviceAssignmentError::MalformedReg)? })
Jaewan Kim52477ae2023-11-21 21:20:52 +0900642 }
643}
644
645fn parse_node_reg(node: &FdtNode) -> Result<Vec<DeviceReg>> {
646 node.reg()?
Jaewan Kim19b984f2023-12-04 15:16:50 +0900647 .ok_or(DeviceAssignmentError::MalformedReg)?
Jaewan Kim52477ae2023-11-21 21:20:52 +0900648 .map(DeviceReg::try_from)
649 .collect::<Result<Vec<_>>>()
650}
651
652fn to_be_bytes(reg: &[DeviceReg]) -> Vec<u8> {
653 let mut reg_cells = vec![];
654 for x in reg {
655 reg_cells.extend_from_slice(&x.addr.to_be_bytes());
656 reg_cells.extend_from_slice(&x.size.to_be_bytes());
657 }
658 reg_cells
659}
660
Jaewan Kim19b984f2023-12-04 15:16:50 +0900661#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
662struct PhysIommu {
663 token: u64,
664}
665
666impl PhysIommu {
667 fn parse(node: &FdtNode) -> Result<Option<Self>> {
668 let Some(token) = node.getprop_u64(cstr!("android,pvmfw,token"))? else {
669 return Ok(None);
670 };
671 let Some(iommu_cells) = node.getprop_u32(cstr!("#iommu-cells"))? else {
672 return Err(DeviceAssignmentError::InvalidPhysIommu);
673 };
674 // Currently only supports #iommu-cells = <1>.
675 // In that case `<iommus>` entry contains pair of (pIOMMU phandle, Sid token)
676 if iommu_cells != 1 {
677 return Err(DeviceAssignmentError::UnsupportedPhysIommu);
678 }
679 Ok(Some(Self { token }))
680 }
681}
682
683#[derive(Debug)]
684struct PhysicalDeviceInfo {
685 target: Phandle,
686 reg: Vec<DeviceReg>,
687 iommus: Vec<(PhysIommu, Sid)>,
688}
689
690impl PhysicalDeviceInfo {
691 fn parse_iommus(
692 node: &FdtNode,
693 phys_iommus: &BTreeMap<Phandle, PhysIommu>,
694 ) -> Result<Vec<(PhysIommu, Sid)>> {
695 let mut iommus = vec![];
696 let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? else {
697 return Ok(iommus);
698 };
699 while let Some(cell) = cells.next() {
700 // Parse pIOMMU ID
701 let phandle =
702 Phandle::try_from(cell).or(Err(DeviceAssignmentError::MalformedIommus))?;
703 let iommu = phys_iommus.get(&phandle).ok_or(DeviceAssignmentError::MalformedIommus)?;
704
705 // Parse Sid
706 let Some(cell) = cells.next() else {
707 return Err(DeviceAssignmentError::MalformedIommus);
708 };
709
710 iommus.push((*iommu, Sid::from(cell)));
711 }
712 Ok(iommus)
713 }
714
715 fn parse(node: &FdtNode, phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<Option<Self>> {
716 let Some(phandle) = node.getprop_u32(cstr!("android,pvmfw,target"))? else {
717 return Ok(None);
718 };
719 let target = Phandle::try_from(phandle)?;
720 let reg = parse_node_reg(node)?;
721 let iommus = Self::parse_iommus(node, phys_iommus)?;
722 Ok(Some(Self { target, reg, iommus }))
723 }
724}
725
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900726/// Assigned device information parsed from crosvm DT.
727/// Keeps everything in the owned data because underlying FDT will be reused for platform DT.
728#[derive(Debug, Eq, PartialEq)]
729struct AssignedDeviceInfo {
730 // Node path of assigned device (e.g. "/rng")
731 node_path: CString,
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900732 // <reg> property from the crosvm DT
Jaewan Kim52477ae2023-11-21 21:20:52 +0900733 reg: Vec<DeviceReg>,
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900734 // <interrupts> property from the crosvm DT
735 interrupts: Vec<u8>,
Jaewan Kima9200492023-11-21 20:45:31 +0900736 // Parsed <iommus> property from the crosvm DT. Tuple of PvIommu and vSID.
737 iommus: Vec<(PvIommu, Vsid)>,
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900738}
739
740impl AssignedDeviceInfo {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900741 fn validate_reg(
742 device_reg: &[DeviceReg],
743 physical_device_reg: &[DeviceReg],
Jaewan Kim52477ae2023-11-21 21:20:52 +0900744 hypervisor: &dyn DeviceAssigningHypervisor,
Jaewan Kim19b984f2023-12-04 15:16:50 +0900745 ) -> Result<()> {
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000746 let mut virt_regs = device_reg.iter();
747 let mut phys_regs = physical_device_reg.iter();
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +0000748 // TODO(b/308694211): Move this constant to vmbase::layout once vmbase is std-compatible.
749 const PVMFW_RANGE: Range<u64> = 0x7fc0_0000..0x8000_0000;
Jaewan Kim19b984f2023-12-04 15:16:50 +0900750 // PV reg and physical reg should have 1:1 match in order.
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000751 for (reg, phys_reg) in virt_regs.by_ref().zip(phys_regs.by_ref()) {
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +0000752 if reg.overlaps(&PVMFW_RANGE) {
753 return Err(DeviceAssignmentError::InvalidReg(reg.addr, reg.size));
754 }
755 // If this call returns successfully, hyp has mapped the MMIO region at `reg`.
Jaewan Kim19b984f2023-12-04 15:16:50 +0900756 let addr = hypervisor.get_phys_mmio_token(reg.addr, reg.size).map_err(|e| {
Pierre-Clément Tosi08d6e3f2024-03-13 18:22:16 +0000757 error!("Hypervisor error while requesting MMIO token: {e}");
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000758 DeviceAssignmentError::InvalidReg(reg.addr, reg.size)
Jaewan Kim52477ae2023-11-21 21:20:52 +0900759 })?;
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +0000760 // Only check address because hypervisor guarantees size match when success.
Jaewan Kim19b984f2023-12-04 15:16:50 +0900761 if phys_reg.addr != addr {
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000762 error!("Assigned device {reg:x?} has unexpected physical address");
763 return Err(DeviceAssignmentError::InvalidPhysReg(addr, reg.size));
Jaewan Kim19b984f2023-12-04 15:16:50 +0900764 }
Jaewan Kim52477ae2023-11-21 21:20:52 +0900765 }
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +0000766
767 if let Some(DeviceReg { addr, size }) = virt_regs.next() {
768 return Err(DeviceAssignmentError::InvalidReg(*addr, *size));
769 }
770
771 if let Some(DeviceReg { addr, size }) = phys_regs.next() {
772 return Err(DeviceAssignmentError::InvalidPhysReg(*addr, *size));
773 }
774
Jaewan Kim19b984f2023-12-04 15:16:50 +0900775 Ok(())
Jaewan Kim52477ae2023-11-21 21:20:52 +0900776 }
777
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900778 fn parse_interrupts(node: &FdtNode) -> Result<Vec<u8>> {
779 // Validation: Validate if interrupts cell numbers are multiple of #interrupt-cells.
780 // We can't know how many interrupts would exist.
781 let interrupts_cells = node
Pierre-Clément Tosid701a0b2023-11-07 15:38:59 +0000782 .getprop_cells(cstr!("interrupts"))?
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900783 .ok_or(DeviceAssignmentError::InvalidInterrupts)?
784 .count();
785 if interrupts_cells % CELLS_PER_INTERRUPT != 0 {
786 return Err(DeviceAssignmentError::InvalidInterrupts);
787 }
788
789 // Once validated, keep the raw bytes so patch can be done with setprop()
Pierre-Clément Tosid701a0b2023-11-07 15:38:59 +0000790 Ok(node.getprop(cstr!("interrupts")).unwrap().unwrap().into())
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900791 }
792
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900793 // TODO(b/277993056): Also validate /__local_fixups__ to ensure that <iommus> has phandle.
Jaewan Kima9200492023-11-21 20:45:31 +0900794 fn parse_iommus(
795 node: &FdtNode,
796 pviommus: &BTreeMap<Phandle, PvIommu>,
797 ) -> Result<Vec<(PvIommu, Vsid)>> {
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900798 let mut iommus = vec![];
Jaewan Kima9200492023-11-21 20:45:31 +0900799 let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? else {
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900800 return Ok(iommus);
801 };
Jaewan Kima9200492023-11-21 20:45:31 +0900802 while let Some(cell) = cells.next() {
803 // Parse pvIOMMU ID
Jaewan Kim19b984f2023-12-04 15:16:50 +0900804 let phandle =
805 Phandle::try_from(cell).or(Err(DeviceAssignmentError::MalformedIommus))?;
806 let pviommu = pviommus.get(&phandle).ok_or(DeviceAssignmentError::MalformedIommus)?;
Jaewan Kima9200492023-11-21 20:45:31 +0900807
808 // Parse vSID
809 let Some(cell) = cells.next() else {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900810 return Err(DeviceAssignmentError::MalformedIommus);
Jaewan Kima9200492023-11-21 20:45:31 +0900811 };
812 let vsid = Vsid(cell);
813
814 iommus.push((*pviommu, vsid));
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900815 }
816 Ok(iommus)
817 }
818
Jaewan Kim19b984f2023-12-04 15:16:50 +0900819 fn validate_iommus(
820 iommus: &[(PvIommu, Vsid)],
821 physical_device_iommu: &[(PhysIommu, Sid)],
822 hypervisor: &dyn DeviceAssigningHypervisor,
823 ) -> Result<()> {
824 if iommus.len() != physical_device_iommu.len() {
825 return Err(DeviceAssignmentError::InvalidIommus);
826 }
827 // pvIOMMU can be reordered, and hypervisor may not guarantee 1:1 mapping.
828 // So we need to mark what's matched or not.
829 let mut physical_device_iommu = physical_device_iommu.to_vec();
830 for (pviommu, vsid) in iommus {
Pierre-Clément Tosi08d6e3f2024-03-13 18:22:16 +0000831 let (id, sid) =
832 hypervisor.get_phys_iommu_token(pviommu.id.into(), vsid.0.into()).map_err(|e| {
833 error!("Hypervisor error while requesting IOMMU token ({pviommu:?}, {vsid:?}): {e}");
834 DeviceAssignmentError::InvalidIommus
835 })?;
Jaewan Kim19b984f2023-12-04 15:16:50 +0900836
837 let pos = physical_device_iommu
838 .iter()
839 .position(|(phys_iommu, phys_sid)| (phys_iommu.token, phys_sid.0) == (id, sid));
840 match pos {
841 Some(pos) => physical_device_iommu.remove(pos),
842 None => {
843 error!("Failed to validate device <iommus>. No matching phys iommu or duplicated mapping for pviommu={pviommu:?}, vsid={vsid:?}");
844 return Err(DeviceAssignmentError::InvalidIommus);
845 }
846 };
847 }
848 Ok(())
849 }
850
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900851 fn parse(
852 fdt: &Fdt,
853 vm_dtbo: &VmDtbo,
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900854 dtbo_node_path: &DtPathTokens,
Jaewan Kim19b984f2023-12-04 15:16:50 +0900855 physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>,
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900856 pviommus: &BTreeMap<Phandle, PvIommu>,
Jaewan Kim52477ae2023-11-21 21:20:52 +0900857 hypervisor: &dyn DeviceAssigningHypervisor,
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900858 ) -> Result<Option<Self>> {
Jaewan Kim19b984f2023-12-04 15:16:50 +0900859 let dtbo_node =
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900860 vm_dtbo.node(dtbo_node_path)?.ok_or(DeviceAssignmentError::InvalidSymbols)?;
Jaewan Kim19b984f2023-12-04 15:16:50 +0900861 let node_path = vm_dtbo.locate_overlay_target_path(dtbo_node_path, &dtbo_node)?;
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900862
863 let Some(node) = fdt.node(&node_path)? else { return Ok(None) };
864
Jaewan Kim80ef9fa2024-02-25 16:08:14 +0000865 // Currently can only assign devices backed by physical devices.
Jaewan Kim19b984f2023-12-04 15:16:50 +0900866 let phandle = dtbo_node.get_phandle()?.ok_or(DeviceAssignmentError::InvalidDtbo)?;
Jaewan Kim80ef9fa2024-02-25 16:08:14 +0000867 let Some(physical_device) = physical_devices.get(&phandle) else {
868 // If labeled DT node isn't backed by physical device node, then just return None.
869 // It's not an error because such node can be a dependency of assignable device nodes.
870 return Ok(None);
871 };
Jaewan Kim19b984f2023-12-04 15:16:50 +0900872
873 let reg = parse_node_reg(&node)?;
874 Self::validate_reg(&reg, &physical_device.reg, hypervisor)?;
875
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900876 let interrupts = Self::parse_interrupts(&node)?;
Jaewan Kim19b984f2023-12-04 15:16:50 +0900877
878 let iommus = Self::parse_iommus(&node, pviommus)?;
879 Self::validate_iommus(&iommus, &physical_device.iommus, hypervisor)?;
880
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900881 Ok(Some(Self { node_path, reg, interrupts, iommus }))
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900882 }
883
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900884 fn patch(&self, fdt: &mut Fdt, pviommu_phandles: &BTreeMap<PvIommu, Phandle>) -> Result<()> {
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900885 let mut dst = fdt.node_mut(&self.node_path)?.unwrap();
Jaewan Kim52477ae2023-11-21 21:20:52 +0900886 dst.setprop(cstr!("reg"), &to_be_bytes(&self.reg))?;
Pierre-Clément Tosid701a0b2023-11-07 15:38:59 +0000887 dst.setprop(cstr!("interrupts"), &self.interrupts)?;
Jaewan Kima9200492023-11-21 20:45:31 +0900888 let mut iommus = Vec::with_capacity(8 * self.iommus.len());
889 for (pviommu, vsid) in &self.iommus {
890 let phandle = pviommu_phandles.get(pviommu).unwrap();
891 iommus.extend_from_slice(&u32::from(*phandle).to_be_bytes());
892 iommus.extend_from_slice(&vsid.0.to_be_bytes());
893 }
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900894 dst.setprop(cstr!("iommus"), &iommus)?;
895
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900896 Ok(())
897 }
898}
899
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900900#[derive(Debug, Eq, PartialEq)]
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900901pub struct DeviceAssignmentInfo {
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900902 pviommus: BTreeSet<PvIommu>,
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900903 assigned_devices: Vec<AssignedDeviceInfo>,
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900904 vm_dtbo_mask: DeviceTreeMask,
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900905}
906
907impl DeviceAssignmentInfo {
Chris Wailes9d09f572024-01-16 13:31:02 -0800908 const PVIOMMU_COMPATIBLE: &'static CStr = cstr!("pkvm,pviommu");
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900909
910 /// Parses pvIOMMUs in fdt
911 // Note: This will validate pvIOMMU ids' uniqueness, even when unassigned.
912 fn parse_pviommus(fdt: &Fdt) -> Result<BTreeMap<Phandle, PvIommu>> {
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900913 let mut pviommus = BTreeMap::new();
914 for compatible in fdt.compatible_nodes(Self::PVIOMMU_COMPATIBLE)? {
915 let Some(phandle) = compatible.get_phandle()? else {
916 continue; // Skips unreachable pvIOMMU node
917 };
918 let pviommu = PvIommu::parse(&compatible)?;
919 if pviommus.insert(phandle, pviommu).is_some() {
920 return Err(FdtError::BadPhandle.into());
921 }
922 }
923 Ok(pviommus)
924 }
925
Jaewan Kim19b984f2023-12-04 15:16:50 +0900926 fn validate_pviommu_topology(assigned_devices: &[AssignedDeviceInfo]) -> Result<()> {
927 let mut all_iommus = BTreeSet::new();
928 for assigned_device in assigned_devices {
929 for iommu in &assigned_device.iommus {
930 if !all_iommus.insert(iommu) {
931 error!("Unsupported pvIOMMU duplication found, <iommus> = {iommu:?}");
932 return Err(DeviceAssignmentError::UnsupportedPvIommusDuplication);
933 }
934 }
935 }
936 Ok(())
937 }
938
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900939 /// Parses fdt and vm_dtbo, and creates new DeviceAssignmentInfo
940 // TODO(b/277993056): Parse __local_fixups__
941 // TODO(b/277993056): Parse __fixups__
Jaewan Kim52477ae2023-11-21 21:20:52 +0900942 pub fn parse(
943 fdt: &Fdt,
944 vm_dtbo: &VmDtbo,
945 hypervisor: &dyn DeviceAssigningHypervisor,
946 ) -> Result<Option<Self>> {
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900947 let Some(symbols_node) = vm_dtbo.as_ref().symbols()? else {
948 // /__symbols__ should contain all assignable devices.
949 // If empty, then nothing can be assigned.
950 return Ok(None);
951 };
952
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900953 let pviommus = Self::parse_pviommus(fdt)?;
954 let unique_pviommus: BTreeSet<_> = pviommus.values().cloned().collect();
955 if pviommus.len() != unique_pviommus.len() {
956 return Err(DeviceAssignmentError::DuplicatedPvIommuIds);
957 }
958
Jaewan Kim19b984f2023-12-04 15:16:50 +0900959 let physical_devices = vm_dtbo.parse_physical_devices()?;
960
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900961 let mut assigned_devices = vec![];
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900962 let mut assigned_device_paths = vec![];
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900963 for symbol_prop in symbols_node.properties()? {
964 let symbol_prop_value = symbol_prop.value()?;
965 let dtbo_node_path = CStr::from_bytes_with_nul(symbol_prop_value)
966 .or(Err(DeviceAssignmentError::InvalidSymbols))?;
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900967 let dtbo_node_path = DtPathTokens::new(dtbo_node_path)?;
968 if !dtbo_node_path.is_overlayable_node() {
Jaewan Kimc39974e2023-12-02 01:13:30 +0900969 continue;
970 }
Jaewan Kim19b984f2023-12-04 15:16:50 +0900971 let assigned_device = AssignedDeviceInfo::parse(
972 fdt,
973 vm_dtbo,
Jaewan Kimf8abbb52023-12-12 22:11:39 +0900974 &dtbo_node_path,
Jaewan Kim19b984f2023-12-04 15:16:50 +0900975 &physical_devices,
976 &pviommus,
977 hypervisor,
978 )?;
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900979 if let Some(assigned_device) = assigned_device {
980 assigned_devices.push(assigned_device);
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900981 assigned_device_paths.push(dtbo_node_path);
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900982 }
983 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900984 if assigned_devices.is_empty() {
985 return Ok(None);
986 }
Jaewan Kimc39974e2023-12-02 01:13:30 +0900987
Jaewan Kim19b984f2023-12-04 15:16:50 +0900988 Self::validate_pviommu_topology(&assigned_devices)?;
989
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900990 let mut vm_dtbo_mask = vm_dtbo.build_mask(assigned_device_paths)?;
991 vm_dtbo_mask.mask_all(&DtPathTokens::new(cstr!("/__local_fixups__"))?);
992 vm_dtbo_mask.mask_all(&DtPathTokens::new(cstr!("/__symbols__"))?);
Jaewan Kimc39974e2023-12-02 01:13:30 +0900993
994 // Note: Any node without __overlay__ will be ignored by fdt_apply_overlay,
995 // so doesn't need to be filtered.
Jaewan Kim51ccfed2023-11-08 13:51:58 +0900996
Jaewan Kim8f6f4662023-12-12 17:38:47 +0900997 Ok(Some(Self { pviommus: unique_pviommus, assigned_devices, vm_dtbo_mask }))
Jaewan Kimc6e023b2023-10-12 15:11:05 +0900998 }
999
1000 /// Filters VM DTBO to only contain necessary information for booting pVM
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001001 pub fn filter(&self, vm_dtbo: &mut VmDtbo) -> Result<()> {
1002 let vm_dtbo = vm_dtbo.as_mut();
1003
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001004 // Filter unused references in /__local_fixups__
1005 if let Some(local_fixups) = vm_dtbo.node_mut(cstr!("/__local_fixups__"))? {
1006 filter_with_mask(local_fixups, &self.vm_dtbo_mask)?;
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001007 }
1008
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001009 // Filter unused nodes in rest of tree
1010 let root = vm_dtbo.root_mut();
1011 filter_with_mask(root, &self.vm_dtbo_mask)?;
1012
Jaewan Kim371f6c82024-02-24 01:33:37 +09001013 filter_dangling_symbols(vm_dtbo)
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001014 }
1015
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001016 fn patch_pviommus(&self, fdt: &mut Fdt) -> Result<BTreeMap<PvIommu, Phandle>> {
Pierre-Clément Tosi244efea2024-02-16 14:48:14 +00001017 let mut compatible = fdt.root_mut().next_compatible(Self::PVIOMMU_COMPATIBLE)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001018 let mut pviommu_phandles = BTreeMap::new();
1019
1020 for pviommu in &self.pviommus {
1021 let mut node = compatible.ok_or(DeviceAssignmentError::TooManyPvIommu)?;
1022 let phandle = node.as_node().get_phandle()?.ok_or(DeviceAssignmentError::Internal)?;
1023 node.setprop_inplace(cstr!("id"), &pviommu.id.to_be_bytes())?;
1024 if pviommu_phandles.insert(*pviommu, phandle).is_some() {
1025 return Err(DeviceAssignmentError::Internal);
1026 }
1027 compatible = node.next_compatible(Self::PVIOMMU_COMPATIBLE)?;
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001028 }
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001029
1030 // Filters pre-populated but unassigned pvIOMMUs.
1031 while let Some(filtered_pviommu) = compatible {
1032 compatible = filtered_pviommu.delete_and_next_compatible(Self::PVIOMMU_COMPATIBLE)?;
1033 }
1034
1035 Ok(pviommu_phandles)
1036 }
1037
1038 pub fn patch(&self, fdt: &mut Fdt) -> Result<()> {
1039 let pviommu_phandles = self.patch_pviommus(fdt)?;
1040
1041 // Patches assigned devices
1042 for device in &self.assigned_devices {
1043 device.patch(fdt, &pviommu_phandles)?;
1044 }
1045
Jaewan Kimc730ebf2024-02-22 10:34:55 +09001046 // Removes any dangling references in __symbols__ (e.g. removed pvIOMMUs)
1047 filter_dangling_symbols(fdt)
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001048 }
1049}
1050
Jaewan Kim50246682024-03-11 23:18:54 +09001051/// Cleans device trees not to contain any pre-populated nodes/props for device assignment.
1052pub fn clean(fdt: &mut Fdt) -> Result<()> {
1053 let mut compatible = fdt.root_mut().next_compatible(cstr!("pkvm,pviommu"))?;
1054 // Filters pre-populated
1055 while let Some(filtered_pviommu) = compatible {
1056 compatible = filtered_pviommu.delete_and_next_compatible(cstr!("pkvm,pviommu"))?;
1057 }
1058
1059 // Removes any dangling references in __symbols__ (e.g. removed pvIOMMUs)
1060 filter_dangling_symbols(fdt)
1061}
1062
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001063#[cfg(test)]
1064mod tests {
1065 use super::*;
Jaewan Kim52477ae2023-11-21 21:20:52 +09001066 use alloc::collections::{BTreeMap, BTreeSet};
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001067 use dts::Dts;
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001068 use std::fs;
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001069 use std::path::Path;
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001070
1071 const VM_DTBO_FILE_PATH: &str = "test_pvmfw_devices_vm_dtbo.dtbo";
1072 const VM_DTBO_WITHOUT_SYMBOLS_FILE_PATH: &str =
1073 "test_pvmfw_devices_vm_dtbo_without_symbols.dtbo";
Jaewan Kim19b984f2023-12-04 15:16:50 +09001074 const VM_DTBO_WITH_DUPLICATED_IOMMUS_FILE_PATH: &str =
1075 "test_pvmfw_devices_vm_dtbo_with_duplicated_iommus.dtbo";
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001076 const VM_DTBO_WITH_DEPENDENCIES_FILE_PATH: &str =
1077 "test_pvmfw_devices_vm_dtbo_with_dependencies.dtbo";
Jaewan Kima67e36a2023-11-29 16:50:23 +09001078 const FDT_WITHOUT_IOMMUS_FILE_PATH: &str = "test_pvmfw_devices_without_iommus.dtb";
Jaewan Kim52477ae2023-11-21 21:20:52 +09001079 const FDT_WITHOUT_DEVICE_FILE_PATH: &str = "test_pvmfw_devices_without_device.dtb";
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001080 const FDT_FILE_PATH: &str = "test_pvmfw_devices_with_rng.dtb";
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +00001081 const FDT_WITH_DEVICE_OVERLAPPING_PVMFW: &str = "test_pvmfw_devices_overlapping_pvmfw.dtb";
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001082 const FDT_WITH_MULTIPLE_DEVICES_IOMMUS_FILE_PATH: &str =
1083 "test_pvmfw_devices_with_multiple_devices_iommus.dtb";
1084 const FDT_WITH_IOMMU_SHARING: &str = "test_pvmfw_devices_with_iommu_sharing.dtb";
1085 const FDT_WITH_IOMMU_ID_CONFLICT: &str = "test_pvmfw_devices_with_iommu_id_conflict.dtb";
Jaewan Kim19b984f2023-12-04 15:16:50 +09001086 const FDT_WITH_DUPLICATED_PVIOMMUS_FILE_PATH: &str =
1087 "test_pvmfw_devices_with_duplicated_pviommus.dtb";
1088 const FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH: &str =
1089 "test_pvmfw_devices_with_multiple_reg_iommus.dtb";
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001090 const FDT_WITH_DEPENDENCY_FILE_PATH: &str = "test_pvmfw_devices_with_dependency.dtb";
1091 const FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH: &str =
1092 "test_pvmfw_devices_with_multiple_dependencies.dtb";
1093 const FDT_WITH_DEPENDENCY_LOOP_FILE_PATH: &str = "test_pvmfw_devices_with_dependency_loop.dtb";
1094
1095 const EXPECTED_FDT_WITH_DEPENDENCY_FILE_PATH: &str = "expected_dt_with_dependency.dtb";
1096 const EXPECTED_FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH: &str =
1097 "expected_dt_with_multiple_dependencies.dtb";
1098 const EXPECTED_FDT_WITH_DEPENDENCY_LOOP_FILE_PATH: &str =
1099 "expected_dt_with_dependency_loop.dtb";
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001100
Jaewan Kim52477ae2023-11-21 21:20:52 +09001101 #[derive(Debug, Default)]
1102 struct MockHypervisor {
1103 mmio_tokens: BTreeMap<(u64, u64), u64>,
1104 iommu_tokens: BTreeMap<(u64, u64), (u64, u64)>,
1105 }
1106
1107 impl DeviceAssigningHypervisor for MockHypervisor {
1108 fn get_phys_mmio_token(&self, base_ipa: u64, size: u64) -> hyp::Result<u64> {
1109 Ok(*self.mmio_tokens.get(&(base_ipa, size)).ok_or(hyp::Error::KvmError(
1110 hyp::KvmError::InvalidParameter,
1111 0xc6000012, /* VENDOR_HYP_KVM_DEV_REQ_MMIO_FUNC_ID */
1112 ))?)
1113 }
1114
1115 fn get_phys_iommu_token(&self, pviommu_id: u64, vsid: u64) -> hyp::Result<(u64, u64)> {
1116 Ok(*self.iommu_tokens.get(&(pviommu_id, vsid)).ok_or(hyp::Error::KvmError(
1117 hyp::KvmError::InvalidParameter,
1118 0xc6000013, /* VENDOR_HYP_KVM_DEV_REQ_DMA_FUNC_ID */
1119 ))?)
1120 }
1121 }
1122
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001123 #[derive(Debug, Eq, PartialEq)]
1124 struct AssignedDeviceNode {
1125 path: CString,
1126 reg: Vec<u8>,
1127 interrupts: Vec<u8>,
Jaewan Kima67e36a2023-11-29 16:50:23 +09001128 iommus: Vec<u32>, // pvIOMMU id and vSID
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001129 }
1130
1131 impl AssignedDeviceNode {
1132 fn parse(fdt: &Fdt, path: &CStr) -> Result<Self> {
1133 let Some(node) = fdt.node(path)? else {
1134 return Err(FdtError::NotFound.into());
1135 };
1136
Jaewan Kim19b984f2023-12-04 15:16:50 +09001137 let reg = node.getprop(cstr!("reg"))?.ok_or(DeviceAssignmentError::MalformedReg)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001138 let interrupts = node
1139 .getprop(cstr!("interrupts"))?
1140 .ok_or(DeviceAssignmentError::InvalidInterrupts)?;
1141 let mut iommus = vec![];
Jaewan Kima9200492023-11-21 20:45:31 +09001142 if let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? {
1143 while let Some(pviommu_id) = cells.next() {
1144 // pvIOMMU id
1145 let phandle = Phandle::try_from(pviommu_id)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001146 let pviommu = fdt
1147 .node_with_phandle(phandle)?
Jaewan Kim19b984f2023-12-04 15:16:50 +09001148 .ok_or(DeviceAssignmentError::MalformedIommus)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001149 let compatible = pviommu.getprop_str(cstr!("compatible"));
1150 if compatible != Ok(Some(cstr!("pkvm,pviommu"))) {
Jaewan Kim19b984f2023-12-04 15:16:50 +09001151 return Err(DeviceAssignmentError::MalformedIommus);
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001152 }
1153 let id = pviommu
1154 .getprop_u32(cstr!("id"))?
Jaewan Kim19b984f2023-12-04 15:16:50 +09001155 .ok_or(DeviceAssignmentError::MalformedIommus)?;
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001156 iommus.push(id);
Jaewan Kima9200492023-11-21 20:45:31 +09001157
1158 // vSID
1159 let Some(vsid) = cells.next() else {
Jaewan Kim19b984f2023-12-04 15:16:50 +09001160 return Err(DeviceAssignmentError::MalformedIommus);
Jaewan Kima9200492023-11-21 20:45:31 +09001161 };
1162 iommus.push(vsid);
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001163 }
1164 }
1165 Ok(Self { path: path.into(), reg: reg.into(), interrupts: interrupts.into(), iommus })
1166 }
1167 }
1168
1169 fn collect_pviommus(fdt: &Fdt) -> Result<Vec<u32>> {
1170 let mut pviommus = BTreeSet::new();
1171 for pviommu in fdt.compatible_nodes(cstr!("pkvm,pviommu"))? {
1172 if let Ok(Some(id)) = pviommu.getprop_u32(cstr!("id")) {
1173 pviommus.insert(id);
1174 }
1175 }
1176 Ok(pviommus.iter().cloned().collect())
1177 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001178
1179 fn into_fdt_prop(native_bytes: Vec<u32>) -> Vec<u8> {
1180 let mut v = Vec::with_capacity(native_bytes.len() * 4);
1181 for byte in native_bytes {
1182 v.extend_from_slice(&byte.to_be_bytes());
1183 }
1184 v
1185 }
1186
Jaewan Kim52477ae2023-11-21 21:20:52 +09001187 impl From<[u64; 2]> for DeviceReg {
1188 fn from(fdt_cells: [u64; 2]) -> Self {
1189 DeviceReg { addr: fdt_cells[0], size: fdt_cells[1] }
1190 }
1191 }
1192
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001193 #[test]
1194 fn device_info_new_without_symbols() {
1195 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1196 let mut vm_dtbo_data = fs::read(VM_DTBO_WITHOUT_SYMBOLS_FILE_PATH).unwrap();
1197 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1198 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1199
Jaewan Kim52477ae2023-11-21 21:20:52 +09001200 let hypervisor: MockHypervisor = Default::default();
1201 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap();
1202 assert_eq!(device_info, None);
1203 }
1204
1205 #[test]
1206 fn device_info_new_without_device() {
1207 let mut fdt_data = fs::read(FDT_WITHOUT_DEVICE_FILE_PATH).unwrap();
1208 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1209 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1210 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1211
1212 let hypervisor: MockHypervisor = Default::default();
1213 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001214 assert_eq!(device_info, None);
1215 }
1216
1217 #[test]
Jaewan Kima67e36a2023-11-29 16:50:23 +09001218 fn device_info_assigned_info_without_iommus() {
1219 let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
1220 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1221 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1222 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1223
Jaewan Kim52477ae2023-11-21 21:20:52 +09001224 let hypervisor = MockHypervisor {
1225 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1226 iommu_tokens: BTreeMap::new(),
1227 };
1228 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kima67e36a2023-11-29 16:50:23 +09001229
1230 let expected = [AssignedDeviceInfo {
Jaewan Kimc39974e2023-12-02 01:13:30 +09001231 node_path: CString::new("/bus0/backlight").unwrap(),
Jaewan Kim52477ae2023-11-21 21:20:52 +09001232 reg: vec![[0x9, 0xFF].into()],
Jaewan Kima67e36a2023-11-29 16:50:23 +09001233 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1234 iommus: vec![],
1235 }];
1236
1237 assert_eq!(device_info.assigned_devices, expected);
1238 }
1239
1240 #[test]
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001241 fn device_info_assigned_info() {
1242 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1243 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1244 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1245 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1246
Jaewan Kim52477ae2023-11-21 21:20:52 +09001247 let hypervisor = MockHypervisor {
1248 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1249 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1250 };
1251 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001252
1253 let expected = [AssignedDeviceInfo {
1254 node_path: CString::new("/rng").unwrap(),
Jaewan Kim52477ae2023-11-21 21:20:52 +09001255 reg: vec![[0x9, 0xFF].into()],
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001256 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
Jaewan Kima67e36a2023-11-29 16:50:23 +09001257 iommus: vec![(PvIommu { id: 0x4 }, Vsid(0xFF0))],
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001258 }];
1259
1260 assert_eq!(device_info.assigned_devices, expected);
1261 }
1262
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001263 #[test]
1264 fn device_info_filter() {
1265 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1266 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1267 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1268 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1269
Jaewan Kim52477ae2023-11-21 21:20:52 +09001270 let hypervisor = MockHypervisor {
1271 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1272 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1273 };
1274 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001275 device_info.filter(vm_dtbo).unwrap();
1276
1277 let vm_dtbo = vm_dtbo.as_mut();
1278
Jaewan Kim371f6c82024-02-24 01:33:37 +09001279 let symbols = vm_dtbo.symbols().unwrap().unwrap();
1280
Jaewan Kima232ed02024-02-25 16:08:14 +00001281 let rng = vm_dtbo.node(cstr!("/fragment@0/__overlay__/rng")).unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001282 assert_ne!(rng, None);
Jaewan Kim371f6c82024-02-24 01:33:37 +09001283 let rng_symbol = symbols.getprop_str(cstr!("rng")).unwrap();
Jaewan Kima232ed02024-02-25 16:08:14 +00001284 assert_eq!(Some(cstr!("/fragment@0/__overlay__/rng")), rng_symbol);
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001285
Jaewan Kima232ed02024-02-25 16:08:14 +00001286 let light = vm_dtbo.node(cstr!("/fragment@0/__overlay__/light")).unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001287 assert_eq!(light, None);
Jaewan Kim371f6c82024-02-24 01:33:37 +09001288 let light_symbol = symbols.getprop_str(cstr!("light")).unwrap();
1289 assert_eq!(None, light_symbol);
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001290
Jaewan Kima232ed02024-02-25 16:08:14 +00001291 let led = vm_dtbo.node(cstr!("/fragment@0/__overlay__/led")).unwrap();
Jaewan Kima67e36a2023-11-29 16:50:23 +09001292 assert_eq!(led, None);
Jaewan Kim371f6c82024-02-24 01:33:37 +09001293 let led_symbol = symbols.getprop_str(cstr!("led")).unwrap();
1294 assert_eq!(None, led_symbol);
Jaewan Kima67e36a2023-11-29 16:50:23 +09001295
Jaewan Kima232ed02024-02-25 16:08:14 +00001296 let backlight = vm_dtbo.node(cstr!("/fragment@0/__overlay__/bus0/backlight")).unwrap();
Jaewan Kima67e36a2023-11-29 16:50:23 +09001297 assert_eq!(backlight, None);
Jaewan Kim371f6c82024-02-24 01:33:37 +09001298 let backlight_symbol = symbols.getprop_str(cstr!("backlight")).unwrap();
1299 assert_eq!(None, backlight_symbol);
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001300 }
1301
1302 #[test]
1303 fn device_info_patch() {
Jaewan Kima67e36a2023-11-29 16:50:23 +09001304 let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001305 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1306 let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
1307 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1308 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1309 let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
1310
Jaewan Kim52477ae2023-11-21 21:20:52 +09001311 let hypervisor = MockHypervisor {
1312 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1313 iommu_tokens: BTreeMap::new(),
1314 };
1315 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001316 device_info.filter(vm_dtbo).unwrap();
1317
1318 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1319 unsafe {
1320 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1321 }
Jaewan Kim0bd637d2023-11-10 13:09:41 +09001322 device_info.patch(platform_dt).unwrap();
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001323
Jaewan Kimc39974e2023-12-02 01:13:30 +09001324 let rng_node = platform_dt.node(cstr!("/bus0/backlight")).unwrap().unwrap();
1325 let phandle = rng_node.getprop_u32(cstr!("phandle")).unwrap();
1326 assert_ne!(None, phandle);
1327
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001328 // Note: Intentionally not using AssignedDeviceNode for matching all props.
Jaewan Kim0bd637d2023-11-10 13:09:41 +09001329 type FdtResult<T> = libfdt::Result<T>;
1330 let expected: Vec<(FdtResult<&CStr>, FdtResult<Vec<u8>>)> = vec![
Jaewan Kima67e36a2023-11-29 16:50:23 +09001331 (Ok(cstr!("android,backlight,ignore-gctrl-reset")), Ok(Vec::new())),
1332 (Ok(cstr!("compatible")), Ok(Vec::from(*b"android,backlight\0"))),
Jaewan Kim0bd637d2023-11-10 13:09:41 +09001333 (Ok(cstr!("interrupts")), Ok(into_fdt_prop(vec![0x0, 0xF, 0x4]))),
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001334 (Ok(cstr!("iommus")), Ok(Vec::new())),
Jaewan Kimc39974e2023-12-02 01:13:30 +09001335 (Ok(cstr!("phandle")), Ok(into_fdt_prop(vec![phandle.unwrap()]))),
Jaewan Kim0bd637d2023-11-10 13:09:41 +09001336 (Ok(cstr!("reg")), Ok(into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]))),
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001337 ];
1338
Jaewan Kim0bd637d2023-11-10 13:09:41 +09001339 let mut properties: Vec<_> = rng_node
1340 .properties()
1341 .unwrap()
1342 .map(|prop| (prop.name(), prop.value().map(|x| x.into())))
1343 .collect();
1344 properties.sort_by(|a, b| {
1345 let lhs = a.0.unwrap_or_default();
1346 let rhs = b.0.unwrap_or_default();
1347 lhs.partial_cmp(rhs).unwrap()
1348 });
1349
1350 assert_eq!(properties, expected);
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001351 }
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001352
1353 #[test]
Jaewan Kimc730ebf2024-02-22 10:34:55 +09001354 fn device_info_patch_no_pviommus() {
1355 let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
1356 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1357 let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
1358 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1359 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1360 let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
1361
1362 let hypervisor = MockHypervisor {
1363 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1364 iommu_tokens: BTreeMap::new(),
1365 };
1366 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1367 device_info.filter(vm_dtbo).unwrap();
1368
1369 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1370 unsafe {
1371 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1372 }
1373 device_info.patch(platform_dt).unwrap();
1374
1375 let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu")).unwrap();
1376 assert_eq!(None, compatible);
1377
1378 if let Some(symbols) = platform_dt.symbols().unwrap() {
1379 for prop in symbols.properties().unwrap() {
1380 let path = CStr::from_bytes_with_nul(prop.value().unwrap()).unwrap();
1381 assert_ne!(None, platform_dt.node(path).unwrap());
1382 }
1383 }
1384 }
1385
1386 #[test]
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001387 fn device_info_overlay_iommu() {
Jaewan Kima67e36a2023-11-29 16:50:23 +09001388 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001389 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1390 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1391 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1392 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1393 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1394 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1395 platform_dt.unpack().unwrap();
1396
Jaewan Kim52477ae2023-11-21 21:20:52 +09001397 let hypervisor = MockHypervisor {
1398 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1399 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1400 };
1401 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001402 device_info.filter(vm_dtbo).unwrap();
1403
1404 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1405 unsafe {
1406 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1407 }
1408 device_info.patch(platform_dt).unwrap();
1409
1410 let expected = AssignedDeviceNode {
1411 path: CString::new("/rng").unwrap(),
1412 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1413 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
Jaewan Kima9200492023-11-21 20:45:31 +09001414 iommus: vec![0x4, 0xFF0],
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001415 };
1416
1417 let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1418 assert_eq!(node, Ok(expected));
1419
1420 let pviommus = collect_pviommus(platform_dt);
1421 assert_eq!(pviommus, Ok(vec![0x4]));
1422 }
1423
1424 #[test]
1425 fn device_info_multiple_devices_iommus() {
1426 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_DEVICES_IOMMUS_FILE_PATH).unwrap();
1427 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1428 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1429 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1430 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1431 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1432 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1433 platform_dt.unpack().unwrap();
1434
Jaewan Kim52477ae2023-11-21 21:20:52 +09001435 let hypervisor = MockHypervisor {
1436 mmio_tokens: [
1437 ((0x9, 0xFF), 0x12F00000),
Jaewan Kim19b984f2023-12-04 15:16:50 +09001438 ((0x10000, 0x1000), 0xF00000),
1439 ((0x20000, 0x1000), 0xF10000),
Jaewan Kim52477ae2023-11-21 21:20:52 +09001440 ]
1441 .into(),
1442 iommu_tokens: [
1443 ((0x4, 0xFF0), (0x12E40000, 3)),
1444 ((0x40, 0xFFA), (0x40000, 0x4)),
1445 ((0x50, 0xFFB), (0x50000, 0x5)),
1446 ]
1447 .into(),
1448 };
1449 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001450 device_info.filter(vm_dtbo).unwrap();
1451
1452 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1453 unsafe {
1454 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1455 }
1456 device_info.patch(platform_dt).unwrap();
1457
1458 let expected_devices = [
1459 AssignedDeviceNode {
1460 path: CString::new("/rng").unwrap(),
1461 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1462 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
Jaewan Kima67e36a2023-11-29 16:50:23 +09001463 iommus: vec![0x4, 0xFF0],
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001464 },
1465 AssignedDeviceNode {
1466 path: CString::new("/light").unwrap(),
Jaewan Kim19b984f2023-12-04 15:16:50 +09001467 reg: into_fdt_prop(vec![0x0, 0x10000, 0x0, 0x1000, 0x0, 0x20000, 0x0, 0x1000]),
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001468 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x5]),
Jaewan Kima67e36a2023-11-29 16:50:23 +09001469 iommus: vec![0x40, 0xFFA, 0x50, 0xFFB],
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001470 },
1471 ];
1472
1473 for expected in expected_devices {
1474 let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1475 assert_eq!(node, Ok(expected));
1476 }
1477 let pviommus = collect_pviommus(platform_dt);
Jaewan Kima67e36a2023-11-29 16:50:23 +09001478 assert_eq!(pviommus, Ok(vec![0x4, 0x40, 0x50]));
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001479 }
1480
1481 #[test]
1482 fn device_info_iommu_sharing() {
1483 let mut fdt_data = fs::read(FDT_WITH_IOMMU_SHARING).unwrap();
1484 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1485 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1486 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1487 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1488 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1489 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1490 platform_dt.unpack().unwrap();
1491
Jaewan Kim52477ae2023-11-21 21:20:52 +09001492 let hypervisor = MockHypervisor {
Jaewan Kim19b984f2023-12-04 15:16:50 +09001493 mmio_tokens: [((0x9, 0xFF), 0x12F00000), ((0x1000, 0x9), 0x12000000)].into(),
1494 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 3)), ((0x4, 0xFF1), (0x12E40000, 9))].into(),
Jaewan Kim52477ae2023-11-21 21:20:52 +09001495 };
1496 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001497 device_info.filter(vm_dtbo).unwrap();
1498
1499 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1500 unsafe {
1501 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1502 }
1503 device_info.patch(platform_dt).unwrap();
1504
1505 let expected_devices = [
1506 AssignedDeviceNode {
1507 path: CString::new("/rng").unwrap(),
1508 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1509 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
Jaewan Kima67e36a2023-11-29 16:50:23 +09001510 iommus: vec![0x4, 0xFF0],
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001511 },
1512 AssignedDeviceNode {
Jaewan Kima67e36a2023-11-29 16:50:23 +09001513 path: CString::new("/led").unwrap(),
Jaewan Kim19b984f2023-12-04 15:16:50 +09001514 reg: into_fdt_prop(vec![0x0, 0x1000, 0x0, 0x9]),
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001515 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x5]),
Jaewan Kim19b984f2023-12-04 15:16:50 +09001516 iommus: vec![0x4, 0xFF1],
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001517 },
1518 ];
1519
1520 for expected in expected_devices {
1521 let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1522 assert_eq!(node, Ok(expected));
1523 }
1524
1525 let pviommus = collect_pviommus(platform_dt);
Jaewan Kima67e36a2023-11-29 16:50:23 +09001526 assert_eq!(pviommus, Ok(vec![0x4]));
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001527 }
1528
1529 #[test]
1530 fn device_info_iommu_id_conflict() {
1531 let mut fdt_data = fs::read(FDT_WITH_IOMMU_ID_CONFLICT).unwrap();
1532 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1533 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1534 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1535
Jaewan Kim52477ae2023-11-21 21:20:52 +09001536 let hypervisor = MockHypervisor {
Jaewan Kim19b984f2023-12-04 15:16:50 +09001537 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
Jaewan Kim52477ae2023-11-21 21:20:52 +09001538 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1539 };
1540 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
Jaewan Kim51ccfed2023-11-08 13:51:58 +09001541
1542 assert_eq!(device_info, Err(DeviceAssignmentError::DuplicatedPvIommuIds));
1543 }
Jaewan Kim52477ae2023-11-21 21:20:52 +09001544
1545 #[test]
1546 fn device_info_invalid_reg() {
1547 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1548 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1549 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1550 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1551
1552 let hypervisor = MockHypervisor {
1553 mmio_tokens: BTreeMap::new(),
1554 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1555 };
1556 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1557
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +00001558 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidReg(0x9, 0xFF)));
Jaewan Kim52477ae2023-11-21 21:20:52 +09001559 }
1560
1561 #[test]
Jaewan Kim19b984f2023-12-04 15:16:50 +09001562 fn device_info_invalid_reg_out_of_order() {
1563 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH).unwrap();
1564 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1565 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1566 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1567
1568 let hypervisor = MockHypervisor {
1569 mmio_tokens: [((0xF000, 0x1000), 0xF10000), ((0xF100, 0x1000), 0xF00000)].into(),
1570 iommu_tokens: [((0xFF0, 0xF0), (0x40000, 0x4)), ((0xFF1, 0xF1), (0x50000, 0x5))].into(),
1571 };
1572 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1573
Pierre-Clément Tosi8b78bc32024-03-13 17:37:07 +00001574 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidPhysReg(0xF10000, 0x1000)));
Jaewan Kim19b984f2023-12-04 15:16:50 +09001575 }
1576
1577 #[test]
Jaewan Kim52477ae2023-11-21 21:20:52 +09001578 fn device_info_invalid_iommus() {
1579 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1580 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1581 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1582 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1583
1584 let hypervisor = MockHypervisor {
1585 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1586 iommu_tokens: BTreeMap::new(),
1587 };
1588 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1589
1590 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidIommus));
1591 }
Jaewan Kim19b984f2023-12-04 15:16:50 +09001592
1593 #[test]
1594 fn device_info_duplicated_pv_iommus() {
1595 let mut fdt_data = fs::read(FDT_WITH_DUPLICATED_PVIOMMUS_FILE_PATH).unwrap();
1596 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1597 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1598 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1599
1600 let hypervisor = MockHypervisor {
1601 mmio_tokens: [((0x10000, 0x1000), 0xF00000), ((0x20000, 0xFF), 0xF10000)].into(),
1602 iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1603 };
1604 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1605
1606 assert_eq!(device_info, Err(DeviceAssignmentError::DuplicatedPvIommuIds));
1607 }
1608
1609 #[test]
1610 fn device_info_duplicated_iommus() {
1611 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1612 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DUPLICATED_IOMMUS_FILE_PATH).unwrap();
1613 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1614 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1615
1616 let hypervisor = MockHypervisor {
1617 mmio_tokens: [((0x10000, 0x1000), 0xF00000), ((0x20000, 0xFF), 0xF10000)].into(),
1618 iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1619 };
1620 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1621
1622 assert_eq!(device_info, Err(DeviceAssignmentError::UnsupportedIommusDuplication));
1623 }
1624
1625 #[test]
1626 fn device_info_duplicated_iommu_mapping() {
1627 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH).unwrap();
1628 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1629 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1630 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1631
1632 let hypervisor = MockHypervisor {
1633 mmio_tokens: [((0xF000, 0x1000), 0xF00000), ((0xF100, 0x1000), 0xF10000)].into(),
1634 iommu_tokens: [((0xFF0, 0xF0), (0x40000, 0x4)), ((0xFF1, 0xF1), (0x40000, 0x4))].into(),
1635 };
1636 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1637
1638 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidIommus));
1639 }
Jaewan Kim50246682024-03-11 23:18:54 +09001640
1641 #[test]
Pierre-Clément Tosi49e26ce2024-03-12 16:31:50 +00001642 fn device_info_overlaps_pvmfw() {
1643 let mut fdt_data = fs::read(FDT_WITH_DEVICE_OVERLAPPING_PVMFW).unwrap();
1644 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1645 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1646 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1647
1648 let hypervisor = MockHypervisor {
1649 mmio_tokens: [((0x7fee0000, 0x1000), 0xF00000)].into(),
1650 iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1651 };
1652 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor);
1653
1654 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidReg(0x7fee0000, 0x1000)));
1655 }
1656
1657 #[test]
Jaewan Kim50246682024-03-11 23:18:54 +09001658 fn device_assignment_clean() {
1659 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1660 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1661
1662 let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu"));
1663 assert_ne!(None, compatible.unwrap());
1664
1665 clean(platform_dt).unwrap();
1666
1667 let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu"));
1668 assert_eq!(Ok(None), compatible);
1669 }
Jaewan Kim8f6f4662023-12-12 17:38:47 +09001670
1671 #[test]
1672 fn device_info_dependency() {
1673 let mut fdt_data = fs::read(FDT_WITH_DEPENDENCY_FILE_PATH).unwrap();
1674 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1675 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1676 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1677 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1678 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1679 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1680 platform_dt.unpack().unwrap();
1681
1682 let hypervisor = MockHypervisor {
1683 mmio_tokens: [((0xFF000, 0x1), 0xF000)].into(),
1684 iommu_tokens: Default::default(),
1685 };
1686
1687 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1688 device_info.filter(vm_dtbo).unwrap();
1689
1690 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1691 unsafe {
1692 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1693 }
1694 device_info.patch(platform_dt).unwrap();
1695
1696 let expected = Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_DEPENDENCY_FILE_PATH)).unwrap();
1697 let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1698
1699 assert_eq!(expected, platform_dt);
1700 }
1701
1702 #[test]
1703 fn device_info_multiple_dependencies() {
1704 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH).unwrap();
1705 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1706 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1707 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1708 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1709 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1710 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1711 platform_dt.unpack().unwrap();
1712
1713 let hypervisor = MockHypervisor {
1714 mmio_tokens: [((0xFF000, 0x1), 0xF000), ((0xFF100, 0x1), 0xF100)].into(),
1715 iommu_tokens: Default::default(),
1716 };
1717 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1718 device_info.filter(vm_dtbo).unwrap();
1719
1720 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1721 unsafe {
1722 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1723 }
1724 device_info.patch(platform_dt).unwrap();
1725
1726 let expected =
1727 Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH)).unwrap();
1728 let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1729
1730 assert_eq!(expected, platform_dt);
1731 }
1732
1733 #[test]
1734 fn device_info_dependency_loop() {
1735 let mut fdt_data = fs::read(FDT_WITH_DEPENDENCY_LOOP_FILE_PATH).unwrap();
1736 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1737 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1738 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1739 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1740 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1741 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1742 platform_dt.unpack().unwrap();
1743
1744 let hypervisor = MockHypervisor {
1745 mmio_tokens: [((0xFF200, 0x1), 0xF200)].into(),
1746 iommu_tokens: Default::default(),
1747 };
1748 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor).unwrap().unwrap();
1749 device_info.filter(vm_dtbo).unwrap();
1750
1751 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1752 unsafe {
1753 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1754 }
1755 device_info.patch(platform_dt).unwrap();
1756
1757 let expected =
1758 Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_DEPENDENCY_LOOP_FILE_PATH)).unwrap();
1759 let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1760
1761 assert_eq!(expected, platform_dt);
1762 }
Jaewan Kimc6e023b2023-10-12 15:11:05 +09001763}