Merge "Add example using VirtIO console device."
diff --git a/apex/product_packages.mk b/apex/product_packages.mk
index 4293c80..ef84551 100644
--- a/apex/product_packages.mk
+++ b/apex/product_packages.mk
@@ -19,6 +19,9 @@
# To include the APEX in your build, insert this in your device.mk:
# $(call inherit-product, packages/modules/Virtualization/apex/product_packages.mk)
+# If devices supports AVF it implies that it uses non-flattened APEXes.
+$(call inherit-product, $(SRC_TARGET_DIR)/product/updatable_apex.mk)
+
PRODUCT_PACKAGES += \
com.android.compos \
diff --git a/authfs/fd_server/Android.bp b/authfs/fd_server/Android.bp
index 5097408..db1fd44 100644
--- a/authfs/fd_server/Android.bp
+++ b/authfs/fd_server/Android.bp
@@ -12,6 +12,7 @@
"libauthfs_fsverity_metadata",
"libbinder_rs",
"libclap",
+ "libfsverity_rs",
"liblibc",
"liblog_rust",
"libnix",
@@ -31,6 +32,7 @@
"libauthfs_fsverity_metadata",
"libbinder_rs",
"libclap",
+ "libfsverity_rs",
"liblibc",
"liblog_rust",
"libnix",
diff --git a/authfs/fd_server/src/aidl.rs b/authfs/fd_server/src/aidl.rs
index 01b8209..ada3ffb 100644
--- a/authfs/fd_server/src/aidl.rs
+++ b/authfs/fd_server/src/aidl.rs
@@ -31,7 +31,6 @@
use std::path::{Component, Path, PathBuf, MAIN_SEPARATOR};
use std::sync::{Arc, RwLock};
-use crate::fsverity;
use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::{
BnVirtFdService, FsStat::FsStat, IVirtFdService, MAX_REQUESTING_DATA,
};
diff --git a/authfs/fd_server/src/fsverity.rs b/authfs/fd_server/src/fsverity.rs
deleted file mode 100644
index 576f9dd..0000000
--- a/authfs/fd_server/src/fsverity.rs
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use nix::ioctl_readwrite;
-use std::io;
-
-// Constants/values from uapi/linux/fsverity.h
-const FS_VERITY_METADATA_TYPE_MERKLE_TREE: u64 = 1;
-const FS_VERITY_METADATA_TYPE_SIGNATURE: u64 = 3;
-const FS_IOCTL_MAGIC: u8 = b'f';
-const FS_IOCTL_READ_VERITY_METADATA: u8 = 135;
-
-#[repr(C)]
-pub struct fsverity_read_metadata_arg {
- metadata_type: u64,
- offset: u64,
- length: u64,
- buf_ptr: u64,
- __reserved: u64,
-}
-
-ioctl_readwrite!(
- read_verity_metadata,
- FS_IOCTL_MAGIC,
- FS_IOCTL_READ_VERITY_METADATA,
- fsverity_read_metadata_arg
-);
-
-fn read_metadata(fd: i32, metadata_type: u64, offset: u64, buf: &mut [u8]) -> io::Result<usize> {
- let mut arg = fsverity_read_metadata_arg {
- metadata_type,
- offset,
- length: buf.len() as u64,
- buf_ptr: buf.as_mut_ptr() as u64,
- __reserved: 0,
- };
- Ok(unsafe { read_verity_metadata(fd, &mut arg) }? as usize)
-}
-
-/// Read the raw Merkle tree from the fd, if it exists. The API semantics is similar to a regular
-/// pread(2), and may not return full requested buffer.
-pub fn read_merkle_tree(fd: i32, offset: u64, buf: &mut [u8]) -> io::Result<usize> {
- read_metadata(fd, FS_VERITY_METADATA_TYPE_MERKLE_TREE, offset, buf)
-}
-
-/// Read the fs-verity signature from the fd (if exists). The returned signature should be complete.
-pub fn read_signature(fd: i32, buf: &mut [u8]) -> io::Result<usize> {
- read_metadata(fd, FS_VERITY_METADATA_TYPE_SIGNATURE, 0 /* offset */, buf)
-}
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index f91ebec..47983cb 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -23,7 +23,6 @@
//! client can then request the content of file 9 by offset and size.
mod aidl;
-mod fsverity;
use anyhow::{bail, Result};
use clap::Parser;
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index cee4b01..b0294dd 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -16,10 +16,13 @@
"libbinder_rs",
"libcompos_common",
"libcomposd_native_rust",
+ "libfsverity_rs",
"libminijail_rust",
"libnix",
"liblibc",
"liblog_rust",
+ "libodsign_proto_rust",
+ "libprotobuf",
"librustutils",
"libshared_child",
"libvmclient",
diff --git a/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl b/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
index 569bba5..a3ce553 100644
--- a/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
+++ b/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
@@ -25,6 +25,8 @@
CompilationFailed,
/** We ran compilation in the VM, but it reported a problem. */
UnexpectedCompilationResult,
+ /** We failed to enable fs-verity completely to the output artifacts. */
+ FailedToEnableFsverity,
}
/**
diff --git a/compos/composd/src/instance_manager.rs b/compos/composd/src/instance_manager.rs
index 98d4a1b..2ce12f8 100644
--- a/compos/composd/src/instance_manager.rs
+++ b/compos/composd/src/instance_manager.rs
@@ -19,16 +19,16 @@
use crate::instance_starter::{CompOsInstance, InstanceStarter};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice;
-use anyhow::{bail, Result};
+use anyhow::{anyhow, bail, Context, Result};
use binder::Strong;
use compos_common::compos_client::{VmCpuTopology, VmParameters};
use compos_common::{CURRENT_INSTANCE_DIR, TEST_INSTANCE_DIR};
+use log::info;
+use rustutils::system_properties;
+use std::str::FromStr;
use std::sync::{Arc, Mutex, Weak};
use virtualizationservice::IVirtualizationService::IVirtualizationService;
-// Enough memory to complete odrefresh in the VM.
-const VM_MEMORY_MIB: i32 = 1024;
-
pub struct InstanceManager {
service: Strong<dyn IVirtualizationService>,
state: Mutex<State>,
@@ -81,12 +81,33 @@
// number of dex2oat threads.
let cpu_topology = VmCpuTopology::MatchHost;
let task_profiles = vec!["SCHED_SP_COMPUTE".to_string()];
- Ok(VmParameters {
- cpu_topology,
- task_profiles,
- memory_mib: Some(VM_MEMORY_MIB),
- ..Default::default()
- })
+ let memory_mib = Some(compos_memory_mib()?);
+ Ok(VmParameters { cpu_topology, task_profiles, memory_mib, ..Default::default() })
+}
+
+fn compos_memory_mib() -> Result<i32> {
+ // Enough memory to complete odrefresh in the VM, for older versions of ART that don't set the
+ // property explicitly.
+ const DEFAULT_MEMORY_MIB: u32 = 400;
+
+ let art_requested_mib =
+ read_property("composd.vm.art.memory_mib.config")?.unwrap_or(DEFAULT_MEMORY_MIB);
+
+ let vm_adjustment_mib = read_property("composd.vm.vendor.memory_mib.config")?.unwrap_or(0);
+
+ info!(
+ "Compilation VM memory: ART requests {art_requested_mib} MiB, \
+ VM adjust is {vm_adjustment_mib}"
+ );
+ art_requested_mib
+ .checked_add_signed(vm_adjustment_mib)
+ .and_then(|x| x.try_into().ok())
+ .context("Invalid vm memory adjustment")
+}
+
+fn read_property<T: FromStr>(name: &str) -> Result<Option<T>> {
+ let str = system_properties::read(name).context("Failed to read {name}")?;
+ str.map(|s| s.parse().map_err(|_| anyhow!("Invalid {name}: {s}"))).transpose()
}
// Ensures we only run one instance at a time.
diff --git a/compos/composd/src/odrefresh_task.rs b/compos/composd/src/odrefresh_task.rs
index 3a699ab..a98f50d 100644
--- a/compos/composd/src/odrefresh_task.rs
+++ b/compos/composd/src/odrefresh_task.rs
@@ -28,11 +28,16 @@
CompilationMode::CompilationMode, ICompOsService, OdrefreshArgs::OdrefreshArgs,
};
use compos_common::odrefresh::{
- is_system_property_interesting, ExitCode, ODREFRESH_OUTPUT_ROOT_DIR,
+ is_system_property_interesting, ExitCode, CURRENT_ARTIFACTS_SUBDIR, ODREFRESH_OUTPUT_ROOT_DIR,
+ PENDING_ARTIFACTS_SUBDIR,
};
+use compos_common::BUILD_MANIFEST_SYSTEM_EXT_APK_PATH;
use log::{error, info, warn};
+use odsign_proto::odsign_info::OdsignInfo;
+use protobuf::Message;
use rustutils::system_properties;
-use std::fs::{remove_dir_all, OpenOptions};
+use std::fs::{remove_dir_all, File, OpenOptions};
+use std::os::fd::AsFd;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{AsRawFd, OwnedFd};
use std::path::Path;
@@ -103,8 +108,21 @@
let result = match exit_code {
Ok(ExitCode::CompilationSuccess) => {
- info!("CompilationSuccess");
- callback.onSuccess()
+ if compilation_mode == CompilationMode::TEST_COMPILE {
+ info!("Compilation success");
+ callback.onSuccess()
+ } else {
+ // compos.info is generated only during NORMAL_COMPILE
+ if let Err(e) = enable_fsverity_to_all() {
+ let message =
+ format!("Unexpected failure when enabling fs-verity: {:?}", e);
+ error!("{}", message);
+ callback.onFailure(FailureReason::FailedToEnableFsverity, &message)
+ } else {
+ info!("Compilation success, fs-verity enabled");
+ callback.onSuccess()
+ }
+ }
}
Ok(exit_code) => {
let message = format!("Unexpected odrefresh result: {:?}", exit_code);
@@ -161,13 +179,20 @@
let output_dir_raw_fd = output_dir_fd.as_raw_fd();
let staging_dir_raw_fd = staging_dir_fd.as_raw_fd();
- // Get the /system_ext FD differently because it may not exist.
- let (system_ext_dir_raw_fd, ro_dir_fds) =
- if let Ok(system_ext_dir_fd) = open_dir(Path::new("/system_ext")) {
- (system_ext_dir_fd.as_raw_fd(), vec![system_dir_fd, system_ext_dir_fd])
- } else {
- (-1, vec![system_dir_fd])
- };
+ // When the VM starts, it starts with or without mouting the extra build manifest APK from
+ // /system_ext. Later on request (here), we need to pass the directory FD of /system_ext, but
+ // only if the VM is configured to need it.
+ //
+ // It is possible to plumb the information from ComposClient to here, but it's extra complexity
+ // and feel slightly weird to encode the VM's state to the task itself, as it is a request to
+ // the VM.
+ let need_system_ext = Path::new(BUILD_MANIFEST_SYSTEM_EXT_APK_PATH).exists();
+ let (system_ext_dir_raw_fd, ro_dir_fds) = if need_system_ext {
+ let system_ext_dir_fd = open_dir(Path::new("/system_ext"))?;
+ (system_ext_dir_fd.as_raw_fd(), vec![system_dir_fd, system_ext_dir_fd])
+ } else {
+ (-1, vec![system_dir_fd])
+ };
// Spawn a fd_server to serve the FDs.
let fd_server_config = FdServerConfig {
@@ -197,6 +222,31 @@
ExitCode::from_i32(exit_code.into())
}
+/// Enable fs-verity to output artifacts according to compos.info in the pending directory. Any
+/// error before the completion will just abort, leaving the previous files enabled.
+fn enable_fsverity_to_all() -> Result<()> {
+ let odrefresh_current_dir = Path::new(ODREFRESH_OUTPUT_ROOT_DIR).join(CURRENT_ARTIFACTS_SUBDIR);
+ let pending_dir = Path::new(ODREFRESH_OUTPUT_ROOT_DIR).join(PENDING_ARTIFACTS_SUBDIR);
+ let mut reader =
+ File::open(&pending_dir.join("compos.info")).context("Failed to open compos.info")?;
+ let compos_info = OdsignInfo::parse_from_reader(&mut reader).context("Failed to parse")?;
+
+ for path_str in compos_info.file_hashes.keys() {
+ // Need to rebase the directory on to compos-pending first
+ if let Ok(relpath) = Path::new(path_str).strip_prefix(&odrefresh_current_dir) {
+ let path = pending_dir.join(relpath);
+ let file = File::open(&path).with_context(|| format!("Failed to open {:?}", path))?;
+ // We don't expect error. But when it happens, don't bother handle it here. For
+ // simplicity, just let odsign do the regular check.
+ fsverity::enable(file.as_fd())
+ .with_context(|| format!("Failed to enable fs-verity to {:?}", path))?;
+ } else {
+ warn!("Skip due to unexpected path: {}", path_str);
+ }
+ }
+ Ok(())
+}
+
/// Returns an `OwnedFD` of the directory.
fn open_dir(path: &Path) -> Result<OwnedFd> {
Ok(OwnedFd::from(
diff --git a/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java b/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
index 479ae7f..933ac7a 100644
--- a/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
+++ b/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
@@ -234,6 +234,10 @@
result = IsolatedCompilationMetrics.RESULT_UNEXPECTED_COMPILATION_RESULT;
break;
+ case ICompilationTaskCallback.FailureReason.FailedToEnableFsverity:
+ result = IsolatedCompilationMetrics.RESULT_FAILED_TO_ENABLE_FSVERITY;
+ break;
+
default:
result = IsolatedCompilationMetrics.RESULT_UNKNOWN_FAILURE;
break;
diff --git a/compos/service/java/com/android/server/compos/IsolatedCompilationMetrics.java b/compos/service/java/com/android/server/compos/IsolatedCompilationMetrics.java
index e333198..f7799a4 100644
--- a/compos/service/java/com/android/server/compos/IsolatedCompilationMetrics.java
+++ b/compos/service/java/com/android/server/compos/IsolatedCompilationMetrics.java
@@ -36,9 +36,17 @@
// TODO(b/218525257): Move the definition of these enums to atoms.proto
@Retention(RetentionPolicy.SOURCE)
- @IntDef({RESULT_UNKNOWN, RESULT_SUCCESS, RESULT_UNKNOWN_FAILURE, RESULT_FAILED_TO_START,
- RESULT_JOB_CANCELED, RESULT_COMPILATION_FAILED, RESULT_UNEXPECTED_COMPILATION_RESULT,
- RESULT_COMPOSD_DIED})
+ @IntDef({
+ RESULT_UNKNOWN,
+ RESULT_SUCCESS,
+ RESULT_UNKNOWN_FAILURE,
+ RESULT_FAILED_TO_START,
+ RESULT_JOB_CANCELED,
+ RESULT_COMPILATION_FAILED,
+ RESULT_UNEXPECTED_COMPILATION_RESULT,
+ RESULT_COMPOSD_DIED,
+ RESULT_FAILED_TO_ENABLE_FSVERITY
+ })
public @interface CompilationResult {}
// Keep this in sync with Result enum in IsolatedCompilationEnded in
@@ -59,6 +67,9 @@
.ISOLATED_COMPILATION_ENDED__COMPILATION_RESULT__RESULT_UNEXPECTED_COMPILATION_RESULT;
public static final int RESULT_COMPOSD_DIED =
ArtStatsLog.ISOLATED_COMPILATION_ENDED__COMPILATION_RESULT__RESULT_COMPOSD_DIED;
+ public static final int RESULT_FAILED_TO_ENABLE_FSVERITY =
+ ArtStatsLog
+ .ISOLATED_COMPILATION_ENDED__COMPILATION_RESULT__RESULT_FAILED_TO_ENABLE_FSVERITY;
@Retention(RetentionPolicy.SOURCE)
@IntDef({SCHEDULING_RESULT_UNKNOWN, SCHEDULING_SUCCESS, SCHEDULING_FAILURE})
diff --git a/demo/README.md b/demo/README.md
index c5c87d8..fa4e38a 100644
--- a/demo/README.md
+++ b/demo/README.md
@@ -8,13 +8,18 @@
## Installing
+You can install the app like this:
```
-adb install -t out/dist/MicrodroidDemoApp.apk
-adb shell pm grant com.android.microdroid.demo android.permission.MANAGE_VIRTUAL_MACHINE
+adb install -t -g out/dist/MicrodroidDemoApp.apk
```
-Don't run the app before granting the permission. Or you will have to uninstall
-the app, and then re-install it.
+(-t allows it to be installed even though it is marked as a test app, -g grants
+the necessary permission.)
+
+You can also explicitly grant or revoke the permission, e.g.
+```
+adb shell pm grant com.android.microdroid.demo android.permission.MANAGE_VIRTUAL_MACHINE
+```
## Running
diff --git a/docs/debug/tracing.md b/docs/debug/tracing.md
index ebc0ac3..facd9d0 100644
--- a/docs/debug/tracing.md
+++ b/docs/debug/tracing.md
@@ -16,11 +16,13 @@
* Only boot clock is supported, and there is no way for user space to change the tracing_clock.
* Hypervisor tracing periodically polls the data from the hypervisor, this is different from the
regular ftrace instance which pushes the events into the ring buffer.
+* Resetting ring buffers (by clearing the trace file) is only supported when there are no active
+ readers. If the trace file is cleared while there are active readers, then the ring buffers will
+ be cleared after the last reader disconnects.
+* Changing the size of the ring buffer while the tracing session is active is also not supported.
Note: the list above is not exhaustive.
-TODO(b/271412868): add more documentation on the user space interface.
-
### Perfetto integration
[Perfetto](https://perfetto.dev/docs/) is an open-source stack for performance instrumentation and
@@ -87,6 +89,61 @@
tracebox -t 15s -b 32mb hyp
```
+### Analysing traces using SQL
+
+On top of visualisation, Perfetto also provides a SQL interface to analyse traces. More
+documentation is available at https://perfetto.dev/docs/quickstart/trace-analysis and
+https://perfetto.dev/docs/analysis/trace-processor.
+
+Hypervisor events can be queried via `pkvm_hypervisor_events` SQL view. You can load that view by
+calling `SELECT IMPORT("pkvm.hypervisor");`, e.g.:
+
+```sql
+SELECT IMPORT("pkvm.hypervisor");
+SELECT * FROM pkvm_hypervisor_events limit 5;
+```
+
+Below are some SQL queries that might be useful when analysing hypervisor traces.
+
+**What is the longest time CPU spent in hypervisor, grouped by the reason to enter hypervisor**
+```sql
+SELECT IMPORT("pkvm.hypervisor");
+
+SELECT
+ cpu,
+ reason,
+ ts,
+ dur
+FROM pkvm_hypervisor_events
+JOIN (
+ SELECT
+ MAX(dur) as dur2,
+ cpu as cpu2,
+ reason as reason2
+ FROM pkvm_hypervisor_events
+ GROUP BY 2, 3) AS sc
+ON
+ cpu = sc.cpu2
+ AND dur = sc.dur2
+ AND (reason = sc.reason2 OR (reason IS NULL AND sc.reason2 IS NULL))
+ORDER BY dur desc;
+```
+
+**What are the 10 longest times CPU spent in hypervisor because of host_mem_abort**
+```sql
+SELECT
+ hyp.dur as dur,
+ hyp.ts as ts,
+ EXTRACT_ARG(slices.arg_set_id, 'esr') as esr,
+ EXTRACT_ARG(slices.arg_set_id, 'addr') as addr
+FROM pkvm_hypervisor_events as hyp
+JOIN slices
+ON hyp.slice_id = slices.id
+WHERE hyp.reason = 'host_mem_abort'
+ORDER BY dur desc
+LIMIT 10;
+```
+
## Microdroid VM tracing
IMPORTANT: Tracing is only supported for debuggable Microdroid VMs.
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachine.java b/javalib/src/android/system/virtualmachine/VirtualMachine.java
index 5f39b1c..7713faf 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachine.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachine.java
@@ -459,7 +459,7 @@
}
}
- IVirtualizationService service = vm.mVirtualizationService.connect();
+ IVirtualizationService service = vm.mVirtualizationService.getBinder();
try {
service.initializeWritablePartition(
@@ -785,7 +785,7 @@
throw new VirtualMachineException("Failed to create APK signature file", e);
}
- IVirtualizationService service = mVirtualizationService.connect();
+ IVirtualizationService service = mVirtualizationService.getBinder();
try {
if (mVmOutputCaptured) {
diff --git a/javalib/src/android/system/virtualmachine/VirtualizationService.java b/javalib/src/android/system/virtualmachine/VirtualizationService.java
index c3f2ba3..1cf97b5 100644
--- a/javalib/src/android/system/virtualmachine/VirtualizationService.java
+++ b/javalib/src/android/system/virtualmachine/VirtualizationService.java
@@ -41,6 +41,9 @@
*/
private final ParcelFileDescriptor mClientFd;
+ /* Persistent connection to IVirtualizationService. */
+ private final IVirtualizationService mBinder;
+
private static native int nativeSpawn();
private native IBinder nativeConnect(int clientFd);
@@ -57,15 +60,18 @@
throw new VirtualMachineException("Could not spawn VirtualizationService");
}
mClientFd = ParcelFileDescriptor.adoptFd(clientFd);
- }
- /* Connects to the VirtualizationService AIDL service. */
- public IVirtualizationService connect() throws VirtualMachineException {
IBinder binder = nativeConnect(mClientFd.getFd());
if (binder == null) {
throw new VirtualMachineException("Could not connect to VirtualizationService");
}
- return IVirtualizationService.Stub.asInterface(binder);
+ mBinder = IVirtualizationService.Stub.asInterface(binder);
+ }
+
+ /* Returns the IVirtualizationService binder. */
+ @NonNull
+ IVirtualizationService getBinder() {
+ return mBinder;
}
/*
diff --git a/libs/libfdt/src/iterators.rs b/libs/libfdt/src/iterators.rs
index a7ea0ee..05fdb4a 100644
--- a/libs/libfdt/src/iterators.rs
+++ b/libs/libfdt/src/iterators.rs
@@ -85,6 +85,31 @@
}
}
+// Converts two cells into bytes of the same size
+fn two_cells_to_bytes(cells: [u32; 2]) -> [u8; 2 * size_of::<u32>()] {
+ // SAFETY: the size of the two arrays are the same
+ unsafe { core::mem::transmute::<[u32; 2], [u8; 2 * size_of::<u32>()]>(cells) }
+}
+
+impl Reg<u64> {
+ const NUM_CELLS: usize = 2;
+ /// Converts addr and (optional) size to the format that is consumable by libfdt.
+ pub fn to_cells(
+ &self,
+ ) -> ([u8; Self::NUM_CELLS * size_of::<u32>()], Option<[u8; Self::NUM_CELLS * size_of::<u32>()]>)
+ {
+ let addr =
+ two_cells_to_bytes([((self.addr >> 32) as u32).to_be(), (self.addr as u32).to_be()]);
+ let size = if self.size.is_some() {
+ let size = self.size.unwrap();
+ Some(two_cells_to_bytes([((size >> 32) as u32).to_be(), (size as u32).to_be()]))
+ } else {
+ None
+ };
+ (addr, size)
+ }
+}
+
/// Iterator over the address ranges defined by the /memory/ node.
#[derive(Debug)]
pub struct MemRegIterator<'a> {
@@ -122,7 +147,7 @@
}
/// An address range from the 'ranges' property of a DT node.
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, Default)]
pub struct AddressRange<A, P, S> {
/// The physical address of the range within the child bus's address space.
pub addr: A,
@@ -202,3 +227,25 @@
})
}
}
+
+impl AddressRange<(u32, u64), u64, u64> {
+ const SIZE_CELLS: usize = 7;
+ /// Converts to the format that is consumable by libfdt
+ pub fn to_cells(&self) -> [u8; Self::SIZE_CELLS * size_of::<u32>()] {
+ let buf = [
+ self.addr.0.to_be(),
+ ((self.addr.1 >> 32) as u32).to_be(),
+ (self.addr.1 as u32).to_be(),
+ ((self.parent_addr >> 32) as u32).to_be(),
+ (self.parent_addr as u32).to_be(),
+ ((self.size >> 32) as u32).to_be(),
+ (self.size as u32).to_be(),
+ ];
+ // SAFETY: the size of the two arrays are the same
+ unsafe {
+ core::mem::transmute::<[u32; Self::SIZE_CELLS], [u8; Self::SIZE_CELLS * size_of::<u32>()]>(
+ buf,
+ )
+ }
+ }
+}
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index 1d295eb..7ddf680 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -21,6 +21,7 @@
pub use iterators::{AddressRange, CellIterator, MemRegIterator, RangesIterator, Reg, RegIterator};
+use core::cmp::max;
use core::ffi::{c_int, c_void, CStr};
use core::fmt;
use core::mem;
@@ -196,6 +197,10 @@
}
impl<'a> FdtNode<'a> {
+ /// Create immutable node from a mutable node at the same offset
+ pub fn from_mut(other: &'a FdtNodeMut) -> Self {
+ FdtNode { fdt: other.fdt, offset: other.offset }
+ }
/// Find parent node.
pub fn parent(&self) -> Result<Self> {
// SAFETY - Accesses (read-only) are constrained to the DT totalsize.
@@ -285,13 +290,31 @@
/// Retrieve the value of a given property.
pub fn getprop(&self, name: &CStr) -> Result<Option<&'a [u8]>> {
+ if let Some((prop, len)) = Self::getprop_internal(self.fdt, self.offset, name)? {
+ let offset = (prop as usize)
+ .checked_sub(self.fdt.as_ptr() as usize)
+ .ok_or(FdtError::Internal)?;
+
+ Ok(Some(self.fdt.buffer.get(offset..(offset + len)).ok_or(FdtError::Internal)?))
+ } else {
+ Ok(None) // property was not found
+ }
+ }
+
+ /// Return the pointer and size of the property named `name`, in a node at offset `offset`, in
+ /// a device tree `fdt`. The pointer is guaranteed to be non-null, in which case error returns.
+ fn getprop_internal(
+ fdt: &'a Fdt,
+ offset: c_int,
+ name: &CStr,
+ ) -> Result<Option<(*const c_void, usize)>> {
let mut len: i32 = 0;
// SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) and the
// function respects the passed number of characters.
let prop = unsafe {
libfdt_bindgen::fdt_getprop_namelen(
- self.fdt.as_ptr(),
- self.offset,
+ fdt.as_ptr(),
+ offset,
name.as_ptr(),
// *_namelen functions don't include the trailing nul terminator in 'len'.
name.to_bytes().len().try_into().map_err(|_| FdtError::BadPath)?,
@@ -308,11 +331,7 @@
// We expected an error code in len but still received a valid value?!
return Err(FdtError::Internal);
}
-
- let offset =
- (prop as usize).checked_sub(self.fdt.as_ptr() as usize).ok_or(FdtError::Internal)?;
-
- Ok(Some(self.fdt.buffer.get(offset..(offset + len)).ok_or(FdtError::Internal)?))
+ Ok(Some((prop.cast::<c_void>(), len)))
}
/// Get reference to the containing device tree.
@@ -405,6 +424,23 @@
fdt_err_expect_zero(ret)
}
+ /// Replace the value of the given property with the given value, and ensure that the given
+ /// value has the same length as the current value length
+ pub fn setprop_inplace(&mut self, name: &CStr, value: &[u8]) -> Result<()> {
+ // SAFETY - fdt size is not altered
+ let ret = unsafe {
+ libfdt_bindgen::fdt_setprop_inplace(
+ self.fdt.as_mut_ptr(),
+ self.offset,
+ name.as_ptr(),
+ value.as_ptr().cast::<c_void>(),
+ value.len().try_into().map_err(|_| FdtError::BadValue)?,
+ )
+ };
+
+ fdt_err_expect_zero(ret)
+ }
+
/// Create or change a flag-like empty property.
pub fn setprop_empty(&mut self, name: &CStr) -> Result<()> {
self.setprop(name, &[])
@@ -423,6 +459,31 @@
fdt_err_expect_zero(ret)
}
+ /// Reduce the size of the given property to new_size
+ pub fn trimprop(&mut self, name: &CStr, new_size: usize) -> Result<()> {
+ let (prop, len) =
+ FdtNode::getprop_internal(self.fdt, self.offset, name)?.ok_or(FdtError::NotFound)?;
+ if len == new_size {
+ return Ok(());
+ }
+ if new_size > len {
+ return Err(FdtError::NoSpace);
+ }
+
+ // SAFETY - new_size is smaller than the old size
+ let ret = unsafe {
+ libfdt_bindgen::fdt_setprop(
+ self.fdt.as_mut_ptr(),
+ self.offset,
+ name.as_ptr(),
+ prop.cast::<c_void>(),
+ new_size.try_into().map_err(|_| FdtError::BadValue)?,
+ )
+ };
+
+ fdt_err_expect_zero(ret)
+ }
+
/// Get reference to the containing device tree.
pub fn fdt(&mut self) -> &mut Fdt {
self.fdt
@@ -444,6 +505,51 @@
Ok(FdtNode { fdt: &*self.fdt, offset: fdt_err(ret)? })
}
+
+ /// Return the compatible node of the given name that is next to this node
+ pub fn next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
+ // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe {
+ libfdt_bindgen::fdt_node_offset_by_compatible(
+ self.fdt.as_ptr(),
+ self.offset,
+ compatible.as_ptr(),
+ )
+ };
+
+ Ok(fdt_err_or_option(ret)?.map(|offset| Self { fdt: self.fdt, offset }))
+ }
+
+ /// Replace this node and its subtree with nop tags, effectively removing it from the tree, and
+ /// then return the next compatible node of the given name.
+ // Side note: without this, filterint out excessive compatible nodes from the DT is impossible.
+ // The reason is that libfdt ensures that the node from where the search for the next
+ // compatible node is started is always a valid one -- except for the special case of offset =
+ // -1 which is to find the first compatible node. So, we can't delete a node and then find the
+ // next compatible node from it.
+ //
+ // We can't do in the opposite direction either. If we call next_compatible to find the next
+ // node, and delete the current node, the Rust borrow checker kicks in. The next node has a
+ // mutable reference to DT, so we can't use current node (which also has a mutable reference to
+ // DT).
+ pub fn delete_and_next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
+ // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe {
+ libfdt_bindgen::fdt_node_offset_by_compatible(
+ self.fdt.as_ptr(),
+ self.offset,
+ compatible.as_ptr(),
+ )
+ };
+ let next_offset = fdt_err_or_option(ret)?;
+
+ // SAFETY - fdt_nop_node alter only the bytes in the blob which contain the node and its
+ // properties and subnodes, and will not alter or move any other part of the tree.
+ let ret = unsafe { libfdt_bindgen::fdt_nop_node(self.fdt.as_mut_ptr(), self.offset) };
+ fdt_err_expect_zero(ret)?;
+
+ Ok(next_offset.map(|offset| Self { fdt: self.fdt, offset }))
+ }
}
/// Iterator over nodes sharing a same compatible string.
@@ -518,6 +624,21 @@
mem::transmute::<&mut [u8], &mut Self>(fdt)
}
+ /// Update this FDT from a slice containing another FDT
+ pub fn copy_from_slice(&mut self, new_fdt: &[u8]) -> Result<()> {
+ if self.buffer.len() < new_fdt.len() {
+ Err(FdtError::NoSpace)
+ } else {
+ let totalsize = self.totalsize();
+ self.buffer[..new_fdt.len()].clone_from_slice(new_fdt);
+ // Zeroize the remaining part. We zeroize up to the size of the original DT because
+ // zeroizing the entire buffer (max 2MB) is not necessary and may increase the VM boot
+ // time.
+ self.buffer[new_fdt.len()..max(new_fdt.len(), totalsize)].fill(0_u8);
+ Ok(())
+ }
+ }
+
/// Make the whole slice containing the DT available to libfdt.
pub fn unpack(&mut self) -> Result<()> {
// SAFETY - "Opens" the DT in-place (supported use-case) by updating its header and
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 0abaf79..de06d01 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -72,8 +72,6 @@
"apexd.microdroid",
"debuggerd",
"linker",
- "tombstoned.microdroid",
- "tombstone_transmit.microdroid",
"cgroups.json",
"task_profiles.json",
"public.libraries.android.txt",
@@ -112,13 +110,26 @@
"authfs",
"authfs_service",
"encryptedstore",
- "microdroid_crashdump_kernel",
"microdroid_kexec",
"microdroid_manager",
"zipfuse",
],
},
},
+ arch: {
+ // b/273792258: These could be in multilib.lib64 except that
+ // microdroid_crashdump_kernel doesn't exist for riscv64 yet
+ arm64: {
+ deps: [
+ "microdroid_crashdump_kernel",
+ ],
+ },
+ x86_64: {
+ deps: [
+ "microdroid_crashdump_kernel",
+ ],
+ },
+ },
linker_config_src: "linker.config.json",
base_dir: "system",
dirs: microdroid_rootdirs,
diff --git a/microdroid/README.md b/microdroid/README.md
index 28785fd..f70965a 100644
--- a/microdroid/README.md
+++ b/microdroid/README.md
@@ -221,6 +221,11 @@
Use `vm_shell` tool above, and then run `lldbclient.py`.
```sh
+adb -s localhost:8000 shell 'mount -o remount,exec /data'
development/scripts/lldbclient.py -s localhost:8000 --chroot . --user '' \
(-p PID | -n NAME | -r ...)
```
+
+**Note:** We need to pass `--chroot .` to skip verifying device, because
+microdroid doesn't match with the host's lunch target. We need to also pass
+`--user ''` as there is no `su` binary in microdroid.
diff --git a/microdroid/init.rc b/microdroid/init.rc
index c997bfd..29f8970 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -126,14 +126,6 @@
mkdir /data/vendor_de 0771 root root
mkdir /data/vendor/hardware 0771 root root
- # Start tombstoned early to be able to store tombstones.
- # microdroid doesn't have anr, but tombstoned requires it
- mkdir /data/anr 0775 system system
- mkdir /data/tombstones 0771 system system
- mkdir /data/vendor/tombstones 0771 root root
-
- start tombstoned
-
# For security reasons, /data/local/tmp should always be empty.
# Do not place files or directories in /data/local/tmp
mkdir /data/local 0751 root root
@@ -146,15 +138,6 @@
# Mark boot completed. This will notify microdroid_manager to run payload.
setprop dev.bootcomplete 1
-on property:tombstone_transmit.start=1
- mkdir /data/tombstones 0771 system system
- start tombstone_transmit
-
-service tombstone_transmit /system/bin/tombstone_transmit.microdroid -cid 2 -port 2000 -remove_tombstones_after_transmitting
- user system
- group system
- shutdown critical
-
service apexd-vm /system/bin/apexd --vm
user root
group system
diff --git a/microdroid/kdump/kexec.c b/microdroid/kdump/kexec.c
index 8d88951..d3e8e02 100644
--- a/microdroid/kdump/kexec.c
+++ b/microdroid/kdump/kexec.c
@@ -23,6 +23,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
@@ -53,6 +54,20 @@
if (syscall(SYS_kexec_file_load, open_checked(KERNEL), open_checked(INITRD), cmdline_len,
CMDLINE, KEXEC_FILE_ON_CRASH) == -1) {
fprintf(stderr, "Failed to load panic kernel: %s\n", strerror(errno));
+ if (errno == EADDRNOTAVAIL) {
+ struct stat st;
+ off_t kernel_size = 0;
+ off_t initrd_size = 0;
+
+ if (stat(KERNEL, &st) == 0) {
+ kernel_size = st.st_size;
+ }
+ if (stat(INITRD, &st) == 0) {
+ initrd_size = st.st_size;
+ }
+ fprintf(stderr, "Image size too big? %s:%ld bytes, %s:%ld bytes", KERNEL, kernel_size,
+ INITRD, initrd_size);
+ }
return 1;
}
return 0;
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index fa96bf4..8732be1 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -55,8 +55,8 @@
use std::convert::TryInto;
use std::env;
use std::ffi::CString;
-use std::fs::{self, create_dir, OpenOptions};
-use std::io::Write;
+use std::fs::{self, create_dir, OpenOptions, File};
+use std::io::{Read, Write};
use std::os::unix::process::CommandExt;
use std::os::unix::process::ExitStatusExt;
use std::path::Path;
@@ -73,6 +73,7 @@
const DM_MOUNTED_APK_PATH: &str = "/dev/block/mapper/microdroid-apk";
const AVF_STRICT_BOOT: &str = "/sys/firmware/devicetree/base/chosen/avf,strict-boot";
const AVF_NEW_INSTANCE: &str = "/sys/firmware/devicetree/base/chosen/avf,new-instance";
+const AVF_DEBUG_POLICY_RAMDUMP: &str = "/sys/firmware/devicetree/base/avf/guest/common/ramdump";
const DEBUG_MICRODROID_NO_VERIFIED_BOOT: &str =
"/sys/firmware/devicetree/base/virtualization/guest/debug-microdroid,no-verified-boot";
@@ -81,7 +82,6 @@
const ZIPFUSE_BIN: &str = "/system/bin/zipfuse";
const APEX_CONFIG_DONE_PROP: &str = "apex_config.done";
-const TOMBSTONE_TRANSMIT_DONE_PROP: &str = "tombstone_transmit.init_done";
const DEBUGGABLE_PROP: &str = "ro.boot.microdroid.debuggable";
// SYNC WITH virtualizationservice/src/crosvm.rs
@@ -315,6 +315,21 @@
}
}
+/// Get debug policy value in bool. It's true iff the value is explicitly set to <1>.
+fn get_debug_policy_bool(path: &'static str) -> Result<Option<bool>> {
+ let mut file = match File::open(path) {
+ Ok(dp) => dp,
+ Err(e) => {
+ info!("{e:?}. Assumes <0>");
+ return Ok(Some(false));
+ }
+ };
+ let mut log: [u8; 4] = Default::default();
+ file.read_exact(&mut log).context("Malformed data in {path}")?;
+ // DT spec uses big endian although Android is always little endian.
+ Ok(Some(u32::from_be_bytes(log) == 1))
+}
+
fn try_run_payload(service: &Strong<dyn IVirtualMachineService>) -> Result<i32> {
let metadata = load_metadata().context("Failed to load payload metadata")?;
let dice = DiceDriver::new(Path::new("/dev/open-dice0")).context("Failed to load DICE")?;
@@ -423,12 +438,11 @@
setup_config_sysprops(&config)?;
- // Start tombstone_transmit if enabled
+ // Set export_tombstones if enabled
if should_export_tombstones(&config) {
- system_properties::write("tombstone_transmit.start", "1")
- .context("set tombstone_transmit.start")?;
- } else {
- control_service("stop", "tombstoned")?;
+ // This property is read by tombstone_handler.
+ system_properties::write("microdroid_manager.export_tombstones.enabled", "1")
+ .context("set microdroid_manager.export_tombstones.enabled")?;
}
// Wait until zipfuse has mounted the APKs so we can access the payload
@@ -448,20 +462,10 @@
system_properties::write("microdroid_manager.init_done", "1")
.context("set microdroid_manager.init_done")?;
- // Wait for tombstone_transmit to init
- if should_export_tombstones(&config) {
- wait_for_tombstone_transmit_done()?;
- }
-
info!("boot completed, time to run payload");
exec_task(task, service).context("Failed to run payload")
}
-fn control_service(action: &str, service: &str) -> Result<()> {
- system_properties::write(&format!("ctl.{}", action), service)
- .with_context(|| format!("Failed to {} {}", action, service))
-}
-
struct ApkDmverityArgument<'a> {
apk: &'a str,
idsig: &'a str,
@@ -733,11 +737,6 @@
wait_for_property_true(APEX_CONFIG_DONE_PROP).context("Failed waiting for apex config done")
}
-fn wait_for_tombstone_transmit_done() -> Result<()> {
- wait_for_property_true(TOMBSTONE_TRANSMIT_DONE_PROP)
- .context("Failed waiting for tombstone transmit done")
-}
-
fn wait_for_property_true(property_name: &str) -> Result<()> {
let mut prop = PropertyWatcher::new(property_name)?;
loop {
@@ -798,16 +797,27 @@
}
}
-/// Loads the crashkernel into memory using kexec if the VM is loaded with `crashkernel=' parameter
-/// in the cmdline.
+/// Loads the crashkernel into memory using kexec if debuggable or debug policy says so.
+/// The VM should be loaded with `crashkernel=' parameter in the cmdline to allocate memory
+/// for crashkernel.
fn load_crashkernel_if_supported() -> Result<()> {
let supported = std::fs::read_to_string("/proc/cmdline")?.contains(" crashkernel=");
info!("ramdump supported: {}", supported);
- if supported {
+
+ if !supported {
+ return Ok(());
+ }
+
+ let debuggable = system_properties::read_bool(DEBUGGABLE_PROP, true)?;
+ let ramdump = get_debug_policy_bool(AVF_DEBUG_POLICY_RAMDUMP)?.unwrap_or_default();
+ let requested = debuggable | ramdump;
+
+ if requested {
let status = Command::new("/system/bin/kexec_load").status()?;
if !status.success() {
return Err(anyhow!("Failed to load crashkernel: {:?}", status));
}
+ info!("ramdump is loaded: debuggable={debuggable}, ramdump={ramdump}");
}
Ok(())
}
diff --git a/pvmfw/platform.dts b/pvmfw/platform.dts
index 127f69a..a7b1de7 100644
--- a/pvmfw/platform.dts
+++ b/pvmfw/platform.dts
@@ -8,6 +8,8 @@
#define PLACEHOLDER2 PLACEHOLDER PLACEHOLDER
#define PLACEHOLDER4 PLACEHOLDER2 PLACEHOLDER2
+#define IRQ_BASE 4
+
/dts-v1/;
/ {
@@ -214,13 +216,14 @@
bus-range = <0x00 0x00>;
reg = <0x00 0x10000 0x00 0x1000000>;
interrupt-map = <
- 0x0800 0x0 0x0 1 &intc 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
- 0x1000 0x0 0x0 1 &intc 0 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH
- 0x1800 0x0 0x0 1 &intc 0 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH
- 0x2000 0x0 0x0 1 &intc 0 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH
- 0x2800 0x0 0x0 1 &intc 0 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH
- 0x3000 0x0 0x0 1 &intc 0 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH
- 0x3800 0x0 0x0 1 &intc 0 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH
+ 0x0800 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 0) IRQ_TYPE_LEVEL_HIGH
+ 0x1000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 1) IRQ_TYPE_LEVEL_HIGH
+ 0x1800 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 2) IRQ_TYPE_LEVEL_HIGH
+ 0x2000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 3) IRQ_TYPE_LEVEL_HIGH
+ 0x2800 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 4) IRQ_TYPE_LEVEL_HIGH
+ 0x3000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 5) IRQ_TYPE_LEVEL_HIGH
+ 0x3800 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 6) IRQ_TYPE_LEVEL_HIGH
+ 0x4000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 7) IRQ_TYPE_LEVEL_HIGH
>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7
0xf800 0x0 0x0 0x7
@@ -228,6 +231,7 @@
0xf800 0x0 0x0 0x7
0xf800 0x0 0x0 0x7
0xf800 0x0 0x0 0x7
+ 0xf800 0x0 0x0 0x7
0xf800 0x0 0x0 0x7>;
};
diff --git a/pvmfw/src/crypto.rs b/pvmfw/src/crypto.rs
index 85dc6c9..275de7a 100644
--- a/pvmfw/src/crypto.rs
+++ b/pvmfw/src/crypto.rs
@@ -14,6 +14,8 @@
//! Wrapper around BoringSSL/OpenSSL symbols.
+use crate::cstr;
+
use core::convert::AsRef;
use core::ffi::{c_char, c_int, CStr};
use core::fmt;
@@ -81,14 +83,10 @@
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- let unknown_library = CStr::from_bytes_with_nul(b"{unknown library}\0").unwrap();
- let unknown_reason = CStr::from_bytes_with_nul(b"{unknown reason}\0").unwrap();
- let unknown_file = CStr::from_bytes_with_nul(b"??\0").unwrap();
-
let packed = self.packed_value();
- let library = self.library_name().unwrap_or(unknown_library).to_str().unwrap();
- let reason = self.reason().unwrap_or(unknown_reason).to_str().unwrap();
- let file = self.file.unwrap_or(unknown_file).to_str().unwrap();
+ let library = self.library_name().unwrap_or(cstr!("{unknown library}")).to_str().unwrap();
+ let reason = self.reason().unwrap_or(cstr!("{unknown reason}")).to_str().unwrap();
+ let file = self.file.unwrap_or(cstr!("??")).to_str().unwrap();
let line = self.line;
write!(f, "{file}:{line}: {library}: {reason} ({packed:#x})")
diff --git a/pvmfw/src/debug_policy.rs b/pvmfw/src/debug_policy.rs
index 23d3e1d..f4b99a6 100644
--- a/pvmfw/src/debug_policy.rs
+++ b/pvmfw/src/debug_policy.rs
@@ -14,7 +14,8 @@
//! Support for the debug policy overlay in pvmfw
-use alloc::{vec, vec::Vec};
+use crate::cstr;
+use alloc::vec::Vec;
use core::ffi::CStr;
use core::fmt;
use libfdt::FdtError;
@@ -63,76 +64,11 @@
fdt.pack().map_err(|e| DebugPolicyError::OverlaidFdt("Failed to re-pack", e))
}
-/// Disables ramdump by removing crashkernel from bootargs in /chosen.
-fn disable_ramdump(fdt: &mut libfdt::Fdt) -> Result<(), DebugPolicyError> {
- let chosen_path = CStr::from_bytes_with_nul(b"/chosen\0").unwrap();
- let bootargs_name = CStr::from_bytes_with_nul(b"bootargs\0").unwrap();
-
- let chosen = match fdt
- .node(chosen_path)
- .map_err(|e| DebugPolicyError::Fdt("Failed to find /chosen", e))?
- {
- Some(node) => node,
- None => return Ok(()),
- };
-
- let bootargs = match chosen
- .getprop_str(bootargs_name)
- .map_err(|e| DebugPolicyError::Fdt("Failed to find bootargs prop", e))?
- {
- Some(value) if !value.to_bytes().is_empty() => value,
- _ => return Ok(()),
- };
-
- // TODO: Improve add 'crashkernel=17MB' only when it's unnecessary.
- // Currently 'crashkernel=17MB' in virtualizationservice and passed by
- // chosen node, because it's not exactly a debug policy but a
- // configuration. However, it's actually microdroid specific
- // so we need a way to generalize it.
- let mut args = vec![];
- for arg in bootargs.to_bytes().split(|byte| byte.is_ascii_whitespace()) {
- if arg.is_empty() || arg.starts_with(b"crashkernel=") {
- continue;
- }
- args.push(arg);
- }
- let mut new_bootargs = args.as_slice().join(&b" "[..]);
- new_bootargs.push(b'\0');
-
- // We've checked existence of /chosen node at the beginning.
- let mut chosen_mut = fdt.node_mut(chosen_path).unwrap().unwrap();
- chosen_mut.setprop(bootargs_name, new_bootargs.as_slice()).map_err(|e| {
- DebugPolicyError::OverlaidFdt("Failed to remove crashkernel. FDT might be corrupted", e)
- })
-}
-
-/// Returns true only if fdt has ramdump prop in the /avf/guest/common node with value <1>
-fn is_ramdump_enabled(fdt: &libfdt::Fdt) -> Result<bool, DebugPolicyError> {
- let common = match fdt
- .node(CStr::from_bytes_with_nul(b"/avf/guest/common\0").unwrap())
- .map_err(|e| DebugPolicyError::DebugPolicyFdt("Failed to find /avf/guest/common node", e))?
- {
- Some(node) => node,
- None => return Ok(false),
- };
-
- match common
- .getprop_u32(CStr::from_bytes_with_nul(b"ramdump\0").unwrap())
- .map_err(|e| DebugPolicyError::DebugPolicyFdt("Failed to find ramdump prop", e))?
- {
- Some(1) => Ok(true),
- _ => Ok(false),
- }
-}
-
/// Enables console output by adding kernel.printk.devkmsg and kernel.console to bootargs.
/// This uses hardcoded console name 'hvc0' and it should be match with microdroid's bootconfig.debuggable.
fn enable_console_output(fdt: &mut libfdt::Fdt) -> Result<(), DebugPolicyError> {
- let chosen_path = CStr::from_bytes_with_nul(b"/chosen\0").unwrap();
- let bootargs_name = CStr::from_bytes_with_nul(b"bootargs\0").unwrap();
-
let chosen = match fdt
- .node(chosen_path)
+ .node(cstr!("/chosen"))
.map_err(|e| DebugPolicyError::Fdt("Failed to find /chosen", e))?
{
Some(node) => node,
@@ -140,7 +76,7 @@
};
let bootargs = match chosen
- .getprop_str(bootargs_name)
+ .getprop_str(cstr!("bootargs"))
.map_err(|e| DebugPolicyError::Fdt("Failed to find bootargs prop", e))?
{
Some(value) if !value.to_bytes().is_empty() => value,
@@ -154,8 +90,8 @@
fdt.unpack().map_err(|e| DebugPolicyError::OverlaidFdt("Failed to unpack", e))?;
// We've checked existence of /chosen node at the beginning.
- let mut chosen_mut = fdt.node_mut(chosen_path).unwrap().unwrap();
- chosen_mut.setprop(bootargs_name, new_bootargs.as_slice()).map_err(|e| {
+ let mut chosen_mut = fdt.node_mut(cstr!("/chosen")).unwrap().unwrap();
+ chosen_mut.setprop(cstr!("bootargs"), new_bootargs.as_slice()).map_err(|e| {
DebugPolicyError::OverlaidFdt("Failed to enabled console output. FDT might be corrupted", e)
})?;
@@ -166,7 +102,7 @@
/// Returns true only if fdt has log prop in the /avf/guest/common node with value <1>
fn is_console_output_enabled(fdt: &libfdt::Fdt) -> Result<bool, DebugPolicyError> {
let common = match fdt
- .node(CStr::from_bytes_with_nul(b"/avf/guest/common\0").unwrap())
+ .node(cstr!("/avf/guest/common"))
.map_err(|e| DebugPolicyError::DebugPolicyFdt("Failed to find /avf/guest/common node", e))?
{
Some(node) => node,
@@ -174,7 +110,7 @@
};
match common
- .getprop_u32(CStr::from_bytes_with_nul(b"log\0").unwrap())
+ .getprop_u32(cstr!("log"))
.map_err(|e| DebugPolicyError::DebugPolicyFdt("Failed to find log prop", e))?
{
Some(1) => Ok(true),
@@ -196,13 +132,6 @@
apply_debug_policy(fdt, dp)?;
}
- // Handles ramdump in the debug policy
- if is_ramdump_enabled(fdt)? {
- info!("ramdump is enabled by debug policy");
- } else {
- disable_ramdump(fdt)?;
- }
-
// Handles console output in the debug policy
if is_console_output_enabled(fdt)? {
enable_console_output(fdt)?;
diff --git a/pvmfw/src/dice.rs b/pvmfw/src/dice.rs
index 3ceb8ef..bad3453 100644
--- a/pvmfw/src/dice.rs
+++ b/pvmfw/src/dice.rs
@@ -14,6 +14,7 @@
//! Support for DICE derivation and BCC generation.
+use crate::cstr;
use crate::helpers::flushed_zeroize;
use core::ffi::c_void;
use core::ffi::CStr;
@@ -60,10 +61,9 @@
self,
salt: &[u8; HIDDEN_SIZE],
) -> diced_open_dice::Result<InputValues> {
- let component_name = CStr::from_bytes_with_nul(b"vm_entry\0").unwrap();
let mut config_descriptor_buffer = [0; 128];
let config_descriptor_size = bcc_format_config_descriptor(
- Some(component_name),
+ Some(cstr!("vm_entry")),
None, // component_version
false, // resettable
&mut config_descriptor_buffer,
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 106a4ef..8219882 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -109,37 +109,17 @@
RebootReason::InvalidFdt
})?;
+ let info = fdt::sanitize_device_tree(fdt)?;
debug!("Fdt passed validation!");
- let memory_range = fdt
- .memory()
- .map_err(|e| {
- error!("Failed to get /memory from the DT: {e}");
- RebootReason::InvalidFdt
- })?
- .ok_or_else(|| {
- error!("Node /memory was found empty");
- RebootReason::InvalidFdt
- })?
- .next()
- .ok_or_else(|| {
- error!("Failed to read the memory size from the FDT");
- RebootReason::InternalError
- })?;
-
+ let memory_range = info.memory_range;
debug!("Resizing MemoryTracker to range {memory_range:#x?}");
-
memory.shrink(&memory_range).map_err(|_| {
error!("Failed to use memory range value from DT: {memory_range:#x?}");
RebootReason::InvalidFdt
})?;
- let kernel_range = fdt::kernel_range(fdt).map_err(|e| {
- error!("Error while attempting to read the kernel range from the DT: {e}");
- RebootReason::InvalidFdt
- })?;
-
- let kernel_range = if let Some(r) = kernel_range {
+ let kernel_range = if let Some(r) = info.kernel_range {
memory.alloc_range(&r).map_err(|e| {
error!("Failed to obtain the kernel range with DT range: {e}");
RebootReason::InternalError
@@ -165,12 +145,7 @@
let kernel =
unsafe { slice::from_raw_parts(kernel_range.start as *const u8, kernel_range.len()) };
- let ramdisk_range = fdt::initrd_range(fdt).map_err(|e| {
- error!("An error occurred while locating the ramdisk in the device tree: {e}");
- RebootReason::InternalError
- })?;
-
- let ramdisk = if let Some(r) = ramdisk_range {
+ let ramdisk = if let Some(r) = info.initrd_range {
debug!("Located ramdisk at {r:?}");
let r = memory.alloc_range(&r).map_err(|e| {
error!("Failed to obtain the initrd range: {e}");
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index f4b0244..7d88455 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -14,10 +14,18 @@
//! High-level FDT functions.
+use crate::cstr;
+use crate::helpers::flatten;
use crate::helpers::GUEST_PAGE_SIZE;
+use crate::helpers::SIZE_4KB;
+use crate::memory::BASE_ADDR;
+use crate::memory::MAX_ADDR;
use crate::RebootReason;
+use alloc::ffi::CString;
+use core::cmp::max;
+use core::cmp::min;
use core::ffi::CStr;
-use core::num::NonZeroUsize;
+use core::mem::size_of;
use core::ops::Range;
use fdtpci::PciMemoryFlags;
use fdtpci::PciRangeType;
@@ -25,16 +33,18 @@
use libfdt::CellIterator;
use libfdt::Fdt;
use libfdt::FdtError;
+use libfdt::FdtNode;
+use log::debug;
use log::error;
use tinyvec::ArrayVec;
-/// Extract from /config the address range containing the pre-loaded kernel.
-pub fn kernel_range(fdt: &libfdt::Fdt) -> libfdt::Result<Option<Range<usize>>> {
- let config = CStr::from_bytes_with_nul(b"/config\0").unwrap();
- let addr = CStr::from_bytes_with_nul(b"kernel-address\0").unwrap();
- let size = CStr::from_bytes_with_nul(b"kernel-size\0").unwrap();
+/// Extract from /config the address range containing the pre-loaded kernel. Absence of /config is
+/// not an error.
+fn read_kernel_range_from(fdt: &Fdt) -> libfdt::Result<Option<Range<usize>>> {
+ let addr = cstr!("kernel-address");
+ let size = cstr!("kernel-size");
- if let Some(config) = fdt.node(config)? {
+ if let Some(config) = fdt.node(cstr!("/config"))? {
if let (Some(addr), Some(size)) = (config.getprop_u32(addr)?, config.getprop_u32(size)?) {
let addr = addr as usize;
let size = size as usize;
@@ -46,10 +56,11 @@
Ok(None)
}
-/// Extract from /chosen the address range containing the pre-loaded ramdisk.
-pub fn initrd_range(fdt: &libfdt::Fdt) -> libfdt::Result<Option<Range<usize>>> {
- let start = CStr::from_bytes_with_nul(b"linux,initrd-start\0").unwrap();
- let end = CStr::from_bytes_with_nul(b"linux,initrd-end\0").unwrap();
+/// Extract from /chosen the address range containing the pre-loaded ramdisk. Absence is not an
+/// error as there can be initrd-less VM.
+fn read_initrd_range_from(fdt: &Fdt) -> libfdt::Result<Option<Range<usize>>> {
+ let start = cstr!("linux,initrd-start");
+ let end = cstr!("linux,initrd-end");
if let Some(chosen) = fdt.chosen()? {
if let (Some(start), Some(end)) = (chosen.getprop_u32(start)?, chosen.getprop_u32(end)?) {
@@ -60,145 +71,118 @@
Ok(None)
}
-/// Read and validate the size and base address of memory, and returns the size
-fn parse_memory_node(fdt: &libfdt::Fdt) -> Result<NonZeroUsize, RebootReason> {
- let memory_range = fdt
- .memory()
- // Actually, these checks are unnecessary because we read /memory node in entry.rs
- // where the exactly same checks are done. We are repeating the same check just for
- // extra safety (in case when the code structure changes in the future).
- .map_err(|e| {
- error!("Failed to get /memory from the DT: {e}");
- RebootReason::InvalidFdt
- })?
- .ok_or_else(|| {
- error!("Node /memory was found empty");
- RebootReason::InvalidFdt
- })?
- .next()
- .ok_or_else(|| {
- error!("Failed to read memory range from the DT");
- RebootReason::InvalidFdt
- })?;
+fn patch_initrd_range(fdt: &mut Fdt, initrd_range: &Range<usize>) -> libfdt::Result<()> {
+ let start = u32::try_from(initrd_range.start).unwrap();
+ let end = u32::try_from(initrd_range.end).unwrap();
- let base = memory_range.start;
- if base as u64 != DeviceTreeInfo::RAM_BASE_ADDR {
- error!("Memory base address {:#x} is not {:#x}", base, DeviceTreeInfo::RAM_BASE_ADDR);
+ let mut node = fdt.chosen_mut()?.ok_or(FdtError::NotFound)?;
+ node.setprop(cstr!("linux,initrd-start"), &start.to_be_bytes())?;
+ node.setprop(cstr!("linux,initrd-end"), &end.to_be_bytes())?;
+ Ok(())
+}
+
+fn read_bootargs_from(fdt: &Fdt) -> libfdt::Result<Option<CString>> {
+ if let Some(chosen) = fdt.chosen()? {
+ if let Some(bootargs) = chosen.getprop_str(cstr!("bootargs"))? {
+ // We need to copy the string to heap because the original fdt will be invalidated
+ // by the templated DT
+ let copy = CString::new(bootargs.to_bytes()).map_err(|_| FdtError::BadValue)?;
+ return Ok(Some(copy));
+ }
+ }
+ Ok(None)
+}
+
+fn patch_bootargs(fdt: &mut Fdt, bootargs: &CStr) -> libfdt::Result<()> {
+ let mut node = fdt.chosen_mut()?.ok_or(FdtError::NotFound)?;
+ // TODO(b/275306568) filter out dangerous options
+ node.setprop(cstr!("bootargs"), bootargs.to_bytes_with_nul())
+}
+
+/// Read the first range in /memory node in DT
+fn read_memory_range_from(fdt: &Fdt) -> libfdt::Result<Range<usize>> {
+ fdt.memory()?.ok_or(FdtError::NotFound)?.next().ok_or(FdtError::NotFound)
+}
+
+/// Check if memory range is ok
+fn validate_memory_range(range: &Range<usize>) -> Result<(), RebootReason> {
+ let base = range.start;
+ if base != BASE_ADDR {
+ error!("Memory base address {:#x} is not {:#x}", base, BASE_ADDR);
return Err(RebootReason::InvalidFdt);
}
- let size = memory_range.len(); // end is exclusive
+ let size = range.len();
if size % GUEST_PAGE_SIZE != 0 {
error!("Memory size {:#x} is not a multiple of page size {:#x}", size, GUEST_PAGE_SIZE);
return Err(RebootReason::InvalidFdt);
}
- // In the u-boot implementation, we checked if base + size > u64::MAX, but we don't need that
- // because memory() function uses checked_add when constructing the Range object. If an
- // overflow happened, we should have gotten None from the next() call above and would have
- // bailed already.
- NonZeroUsize::new(size).ok_or_else(|| {
- error!("Memory size can't be 0");
- RebootReason::InvalidFdt
- })
+ if size == 0 {
+ error!("Memory size is 0");
+ return Err(RebootReason::InvalidFdt);
+ }
+ Ok(())
}
-/// Read the number of CPUs
-fn parse_cpu_nodes(fdt: &libfdt::Fdt) -> Result<NonZeroUsize, RebootReason> {
- let num = fdt
- .compatible_nodes(CStr::from_bytes_with_nul(b"arm,arm-v8\0").unwrap())
- .map_err(|e| {
- error!("Failed to read compatible nodes \"arm,arm-v8\" from DT: {e}");
- RebootReason::InvalidFdt
- })?
- .count();
- NonZeroUsize::new(num).ok_or_else(|| {
+fn patch_memory_range(fdt: &mut Fdt, memory_range: &Range<usize>) -> libfdt::Result<()> {
+ let size = memory_range.len() as u64;
+ fdt.node_mut(cstr!("/memory"))?
+ .ok_or(FdtError::NotFound)?
+ .setprop_inplace(cstr!("reg"), flatten(&[BASE_ADDR.to_be_bytes(), size.to_be_bytes()]))
+}
+
+/// Read the number of CPUs from DT
+fn read_num_cpus_from(fdt: &Fdt) -> libfdt::Result<usize> {
+ Ok(fdt.compatible_nodes(cstr!("arm,arm-v8"))?.count())
+}
+
+/// Validate number of CPUs
+fn validate_num_cpus(num_cpus: usize) -> Result<(), RebootReason> {
+ if num_cpus == 0 {
error!("Number of CPU can't be 0");
- RebootReason::InvalidFdt
- })
+ return Err(RebootReason::InvalidFdt);
+ }
+ if DeviceTreeInfo::GIC_REDIST_SIZE_PER_CPU.checked_mul(num_cpus.try_into().unwrap()).is_none() {
+ error!("Too many CPUs for gic: {}", num_cpus);
+ return Err(RebootReason::InvalidFdt);
+ }
+ Ok(())
+}
+
+/// Patch DT by keeping `num_cpus` number of arm,arm-v8 compatible nodes, and pruning the rest.
+fn patch_num_cpus(fdt: &mut Fdt, num_cpus: usize) -> libfdt::Result<()> {
+ let cpu = cstr!("arm,arm-v8");
+ let mut next = fdt.root_mut()?.next_compatible(cpu)?;
+ for _ in 0..num_cpus {
+ next = if let Some(current) = next {
+ current.next_compatible(cpu)?
+ } else {
+ return Err(FdtError::NoSpace);
+ };
+ }
+ while let Some(current) = next {
+ next = current.delete_and_next_compatible(cpu)?;
+ }
+ Ok(())
}
#[derive(Debug)]
-#[allow(dead_code)] // TODO: remove this
struct PciInfo {
- ranges: [Range<u64>; 2],
- num_irq: usize,
+ ranges: [PciAddrRange; 2],
+ irq_masks: ArrayVec<[PciIrqMask; PciInfo::MAX_IRQS]>,
+ irq_maps: ArrayVec<[PciIrqMap; PciInfo::MAX_IRQS]>,
}
-/// Read and validate PCI node
-fn parse_pci_nodes(fdt: &libfdt::Fdt) -> Result<PciInfo, RebootReason> {
- let node = fdt
- .compatible_nodes(CStr::from_bytes_with_nul(b"pci-host-cam-generic\0").unwrap())
- .map_err(|e| {
- error!("Failed to read compatible node \"pci-host-cam-generic\" from DT: {e}");
- RebootReason::InvalidFdt
- })?
- .next()
- .ok_or_else(|| {
- // pvmfw requires at least one pci device (virtio-blk) for the instance disk. So,
- // let's fail early.
- error!("Compatible node \"pci-host-cam-generic\" doesn't exist");
- RebootReason::InvalidFdt
- })?;
-
- let mut iter = node
- .ranges::<(u32, u64), u64, u64>()
- .map_err(|e| {
- error!("Failed to read ranges from PCI node: {e}");
- RebootReason::InvalidFdt
- })?
- .ok_or_else(|| {
- error!("PCI node missing ranges property");
- RebootReason::InvalidFdt
- })?;
-
- let range0 = iter.next().ok_or_else(|| {
- error!("First range missing in PCI node");
- RebootReason::InvalidFdt
- })?;
- let range0 = get_and_validate_pci_range(&range0)?;
-
- let range1 = iter.next().ok_or_else(|| {
- error!("Second range missing in PCI node");
- RebootReason::InvalidFdt
- })?;
- let range1 = get_and_validate_pci_range(&range1)?;
-
- let num_irq = count_and_validate_pci_irq_masks(&node)?;
-
- validate_pci_irq_maps(&node)?;
-
- Ok(PciInfo { ranges: [range0, range1], num_irq })
+impl PciInfo {
+ const IRQ_MASK_CELLS: usize = 4;
+ const IRQ_MAP_CELLS: usize = 10;
+ const MAX_IRQS: usize = 8;
}
-fn get_and_validate_pci_range(
- range: &AddressRange<(u32, u64), u64, u64>,
-) -> Result<Range<u64>, RebootReason> {
- let mem_flags = PciMemoryFlags(range.addr.0);
- let range_type = mem_flags.range_type();
- let prefetchable = mem_flags.prefetchable();
- let bus_addr = range.addr.1;
- let cpu_addr = range.parent_addr;
- let size = range.size;
- if range_type != PciRangeType::Memory64 {
- error!("Invalid range type {:?} for bus address {:#x} in PCI node", range_type, bus_addr);
- return Err(RebootReason::InvalidFdt);
- }
- if prefetchable {
- error!("PCI bus address {:#x} in PCI node is prefetchable", bus_addr);
- return Err(RebootReason::InvalidFdt);
- }
- // Enforce ID bus-to-cpu mappings, as used by crosvm.
- if bus_addr != cpu_addr {
- error!("PCI bus address: {:#x} is different from CPU address: {:#x}", bus_addr, cpu_addr);
- return Err(RebootReason::InvalidFdt);
- }
- let bus_end = bus_addr.checked_add(size).ok_or_else(|| {
- error!("PCI address range size {:#x} too big", size);
- RebootReason::InvalidFdt
- })?;
- Ok(bus_addr..bus_end)
-}
+type PciAddrRange = AddressRange<(u32, u64), u64, u64>;
+type PciIrqMask = [u32; PciInfo::IRQ_MASK_CELLS];
+type PciIrqMap = [u32; PciInfo::IRQ_MAP_CELLS];
/// Iterator that takes N cells as a chunk
struct CellChunkIterator<'a, const N: usize> {
@@ -222,39 +206,104 @@
}
}
-fn count_and_validate_pci_irq_masks(pci_node: &libfdt::FdtNode) -> Result<usize, RebootReason> {
- const IRQ_MASK_CELLS: usize = 4;
+/// Read pci host controller ranges, irq maps, and irq map masks from DT
+fn read_pci_info_from(fdt: &Fdt) -> libfdt::Result<PciInfo> {
+ let node =
+ fdt.compatible_nodes(cstr!("pci-host-cam-generic"))?.next().ok_or(FdtError::NotFound)?;
+
+ let mut ranges = node.ranges::<(u32, u64), u64, u64>()?.ok_or(FdtError::NotFound)?;
+ let range0 = ranges.next().ok_or(FdtError::NotFound)?;
+ let range1 = ranges.next().ok_or(FdtError::NotFound)?;
+
+ let irq_masks = node.getprop_cells(cstr!("interrupt-map-mask"))?.ok_or(FdtError::NotFound)?;
+ let irq_masks = CellChunkIterator::<{ PciInfo::IRQ_MASK_CELLS }>::new(irq_masks);
+ let irq_masks: ArrayVec<[PciIrqMask; PciInfo::MAX_IRQS]> =
+ irq_masks.take(PciInfo::MAX_IRQS).collect();
+
+ let irq_maps = node.getprop_cells(cstr!("interrupt-map"))?.ok_or(FdtError::NotFound)?;
+ let irq_maps = CellChunkIterator::<{ PciInfo::IRQ_MAP_CELLS }>::new(irq_maps);
+ let irq_maps: ArrayVec<[PciIrqMap; PciInfo::MAX_IRQS]> =
+ irq_maps.take(PciInfo::MAX_IRQS).collect();
+
+ Ok(PciInfo { ranges: [range0, range1], irq_masks, irq_maps })
+}
+
+fn validate_pci_info(pci_info: &PciInfo, memory_range: &Range<usize>) -> Result<(), RebootReason> {
+ for range in pci_info.ranges.iter() {
+ validate_pci_addr_range(range, memory_range)?;
+ }
+ for irq_mask in pci_info.irq_masks.iter() {
+ validate_pci_irq_mask(irq_mask)?;
+ }
+ for (idx, irq_map) in pci_info.irq_maps.iter().enumerate() {
+ validate_pci_irq_map(irq_map, idx)?;
+ }
+ Ok(())
+}
+
+fn validate_pci_addr_range(
+ range: &PciAddrRange,
+ memory_range: &Range<usize>,
+) -> Result<(), RebootReason> {
+ let mem_flags = PciMemoryFlags(range.addr.0);
+ let range_type = mem_flags.range_type();
+ let prefetchable = mem_flags.prefetchable();
+ let bus_addr = range.addr.1;
+ let cpu_addr = range.parent_addr;
+ let size = range.size;
+
+ if range_type != PciRangeType::Memory64 {
+ error!("Invalid range type {:?} for bus address {:#x} in PCI node", range_type, bus_addr);
+ return Err(RebootReason::InvalidFdt);
+ }
+ if prefetchable {
+ error!("PCI bus address {:#x} in PCI node is prefetchable", bus_addr);
+ return Err(RebootReason::InvalidFdt);
+ }
+ // Enforce ID bus-to-cpu mappings, as used by crosvm.
+ if bus_addr != cpu_addr {
+ error!("PCI bus address: {:#x} is different from CPU address: {:#x}", bus_addr, cpu_addr);
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ let Some(bus_end) = bus_addr.checked_add(size) else {
+ error!("PCI address range size {:#x} overflows", size);
+ return Err(RebootReason::InvalidFdt);
+ };
+ if bus_end > MAX_ADDR.try_into().unwrap() {
+ error!("PCI address end {:#x} is outside of translatable range", bus_end);
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ let memory_start = memory_range.start.try_into().unwrap();
+ let memory_end = memory_range.end.try_into().unwrap();
+
+ if max(bus_addr, memory_start) < min(bus_end, memory_end) {
+ error!(
+ "PCI address range {:#x}-{:#x} overlaps with main memory range {:#x}-{:#x}",
+ bus_addr, bus_end, memory_start, memory_end
+ );
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ Ok(())
+}
+
+fn validate_pci_irq_mask(irq_mask: &PciIrqMask) -> Result<(), RebootReason> {
const IRQ_MASK_ADDR_HI: u32 = 0xf800;
const IRQ_MASK_ADDR_ME: u32 = 0x0;
const IRQ_MASK_ADDR_LO: u32 = 0x0;
const IRQ_MASK_ANY_IRQ: u32 = 0x7;
- const EXPECTED: [u32; IRQ_MASK_CELLS] =
+ const EXPECTED: PciIrqMask =
[IRQ_MASK_ADDR_HI, IRQ_MASK_ADDR_ME, IRQ_MASK_ADDR_LO, IRQ_MASK_ANY_IRQ];
- let name = CStr::from_bytes_with_nul(b"interrupt-map-mask\0").unwrap();
- let mut irq_count: usize = 0;
- for irq_mask in CellChunkIterator::<IRQ_MASK_CELLS>::new(
- pci_node
- .getprop_cells(name)
- .map_err(|e| {
- error!("Failed to read interrupt-map-mask property: {e}");
- RebootReason::InvalidFdt
- })?
- .ok_or_else(|| {
- error!("PCI node missing interrupt-map-mask property");
- RebootReason::InvalidFdt
- })?,
- ) {
- if irq_mask != EXPECTED {
- error!("invalid irq mask {:?}", irq_mask);
- return Err(RebootReason::InvalidFdt);
- }
- irq_count += 1;
+ if *irq_mask != EXPECTED {
+ error!("Invalid PCI irq mask {:#?}", irq_mask);
+ return Err(RebootReason::InvalidFdt);
}
- Ok(irq_count)
+ Ok(())
}
-fn validate_pci_irq_maps(pci_node: &libfdt::FdtNode) -> Result<(), RebootReason> {
- const IRQ_MAP_CELLS: usize = 10;
+fn validate_pci_irq_map(irq_map: &PciIrqMap, idx: usize) -> Result<(), RebootReason> {
const PCI_DEVICE_IDX: usize = 11;
const PCI_IRQ_ADDR_ME: u32 = 0;
const PCI_IRQ_ADDR_LO: u32 = 0;
@@ -263,164 +312,139 @@
const GIC_SPI: u32 = 0;
const IRQ_TYPE_LEVEL_HIGH: u32 = 4;
- let mut phys_hi: u32 = 0;
- let mut irq_nr = AARCH64_IRQ_BASE;
+ let pci_addr = (irq_map[0], irq_map[1], irq_map[2]);
+ let pci_irq_number = irq_map[3];
+ let _controller_phandle = irq_map[4]; // skipped.
+ let gic_addr = (irq_map[5], irq_map[6]); // address-cells is <2> for GIC
+ // interrupt-cells is <3> for GIC
+ let gic_peripheral_interrupt_type = irq_map[7];
+ let gic_irq_number = irq_map[8];
+ let gic_irq_type = irq_map[9];
- let name = CStr::from_bytes_with_nul(b"interrupt-map\0").unwrap();
- for irq_map in CellChunkIterator::<IRQ_MAP_CELLS>::new(
- pci_node
- .getprop_cells(name)
- .map_err(|e| {
- error!("Failed to read interrupt-map property: {e}");
- RebootReason::InvalidFdt
- })?
- .ok_or_else(|| {
- error!("PCI node missing interrupt-map property");
- RebootReason::InvalidFdt
- })?,
- ) {
- phys_hi += 0x1 << PCI_DEVICE_IDX;
+ let phys_hi: u32 = (0x1 << PCI_DEVICE_IDX) * (idx + 1) as u32;
+ let expected_pci_addr = (phys_hi, PCI_IRQ_ADDR_ME, PCI_IRQ_ADDR_LO);
- let pci_addr = (irq_map[0], irq_map[1], irq_map[2]);
- let pci_irq_number = irq_map[3];
- let _controller_phandle = irq_map[4]; // skipped.
- let gic_addr = (irq_map[5], irq_map[6]); // address-cells is <2> for GIC
- // interrupt-cells is <3> for GIC
- let gic_peripheral_interrupt_type = irq_map[7];
- let gic_irq_number = irq_map[8];
- let gic_irq_type = irq_map[9];
+ if pci_addr != expected_pci_addr {
+ error!("PCI device address {:#x} {:#x} {:#x} in interrupt-map is different from expected address \
+ {:#x} {:#x} {:#x}",
+ pci_addr.0, pci_addr.1, pci_addr.2, expected_pci_addr.0, expected_pci_addr.1, expected_pci_addr.2);
+ return Err(RebootReason::InvalidFdt);
+ }
- let expected_pci_addr = (phys_hi, PCI_IRQ_ADDR_ME, PCI_IRQ_ADDR_LO);
+ if pci_irq_number != PCI_IRQ_INTC {
+ error!(
+ "PCI INT# {:#x} in interrupt-map is different from expected value {:#x}",
+ pci_irq_number, PCI_IRQ_INTC
+ );
+ return Err(RebootReason::InvalidFdt);
+ }
- if pci_addr != expected_pci_addr {
- error!("PCI device address {:#x} {:#x} {:#x} in interrupt-map is different from expected address \
- {:#x} {:#x} {:#x}",
- pci_addr.0, pci_addr.1, pci_addr.2, expected_pci_addr.0, expected_pci_addr.1, expected_pci_addr.2);
- return Err(RebootReason::InvalidFdt);
- }
- if pci_irq_number != PCI_IRQ_INTC {
- error!(
- "PCI INT# {:#x} in interrupt-map is different from expected value {:#x}",
- pci_irq_number, PCI_IRQ_INTC
- );
- return Err(RebootReason::InvalidFdt);
- }
- if gic_addr != (0, 0) {
- error!(
- "GIC address {:#x} {:#x} in interrupt-map is different from expected address \
- {:#x} {:#x}",
- gic_addr.0, gic_addr.1, 0, 0
- );
- return Err(RebootReason::InvalidFdt);
- }
- if gic_peripheral_interrupt_type != GIC_SPI {
- error!("GIC peripheral interrupt type {:#x} in interrupt-map is different from expected value \
- {:#x}", gic_peripheral_interrupt_type, GIC_SPI);
- return Err(RebootReason::InvalidFdt);
- }
- if gic_irq_number != irq_nr {
- error!(
- "GIC irq number {:#x} in interrupt-map is unexpected. Expected {:#x}",
- gic_irq_number, irq_nr
- );
- return Err(RebootReason::InvalidFdt);
- }
- irq_nr += 1; // move to next irq
- if gic_irq_type != IRQ_TYPE_LEVEL_HIGH {
- error!(
- "IRQ type in {:#x} is invalid. Must be LEVEL_HIGH {:#x}",
- gic_irq_type, IRQ_TYPE_LEVEL_HIGH
- );
- return Err(RebootReason::InvalidFdt);
+ if gic_addr != (0, 0) {
+ error!(
+ "GIC address {:#x} {:#x} in interrupt-map is different from expected address \
+ {:#x} {:#x}",
+ gic_addr.0, gic_addr.1, 0, 0
+ );
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ if gic_peripheral_interrupt_type != GIC_SPI {
+ error!("GIC peripheral interrupt type {:#x} in interrupt-map is different from expected value \
+ {:#x}", gic_peripheral_interrupt_type, GIC_SPI);
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ let irq_nr: u32 = AARCH64_IRQ_BASE + (idx as u32);
+ if gic_irq_number != irq_nr {
+ error!(
+ "GIC irq number {:#x} in interrupt-map is unexpected. Expected {:#x}",
+ gic_irq_number, irq_nr
+ );
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ if gic_irq_type != IRQ_TYPE_LEVEL_HIGH {
+ error!(
+ "IRQ type in {:#x} is invalid. Must be LEVEL_HIGH {:#x}",
+ gic_irq_type, IRQ_TYPE_LEVEL_HIGH
+ );
+ return Err(RebootReason::InvalidFdt);
+ }
+ Ok(())
+}
+
+fn patch_pci_info(fdt: &mut Fdt, pci_info: &PciInfo) -> libfdt::Result<()> {
+ let mut node = fdt
+ .root_mut()?
+ .next_compatible(cstr!("pci-host-cam-generic"))?
+ .ok_or(FdtError::NotFound)?;
+
+ let irq_masks_size = pci_info.irq_masks.len() * size_of::<PciIrqMask>();
+ node.trimprop(cstr!("interrupt-map-mask"), irq_masks_size)?;
+
+ let irq_maps_size = pci_info.irq_maps.len() * size_of::<PciIrqMap>();
+ node.trimprop(cstr!("interrupt-map"), irq_maps_size)?;
+
+ node.setprop_inplace(
+ cstr!("ranges"),
+ flatten(&[pci_info.ranges[0].to_cells(), pci_info.ranges[1].to_cells()]),
+ )
+}
+
+#[derive(Default, Debug)]
+struct SerialInfo {
+ addrs: ArrayVec<[u64; Self::MAX_SERIALS]>,
+}
+
+impl SerialInfo {
+ const MAX_SERIALS: usize = 4;
+}
+
+fn read_serial_info_from(fdt: &Fdt) -> libfdt::Result<SerialInfo> {
+ let mut addrs: ArrayVec<[u64; SerialInfo::MAX_SERIALS]> = Default::default();
+ for node in fdt.compatible_nodes(cstr!("ns16550a"))?.take(SerialInfo::MAX_SERIALS) {
+ let reg = node.reg()?.ok_or(FdtError::NotFound)?.next().ok_or(FdtError::NotFound)?;
+ addrs.push(reg.addr);
+ }
+ Ok(SerialInfo { addrs })
+}
+
+/// Patch the DT by deleting the ns16550a compatible nodes whose address are unknown
+fn patch_serial_info(fdt: &mut Fdt, serial_info: &SerialInfo) -> libfdt::Result<()> {
+ let name = cstr!("ns16550a");
+ let mut next = fdt.root_mut()?.next_compatible(name);
+ while let Some(current) = next? {
+ let reg = FdtNode::from_mut(¤t)
+ .reg()?
+ .ok_or(FdtError::NotFound)?
+ .next()
+ .ok_or(FdtError::NotFound)?;
+ next = if !serial_info.addrs.contains(®.addr) {
+ current.delete_and_next_compatible(name)
+ } else {
+ current.next_compatible(name)
}
}
Ok(())
}
-#[derive(Default, Debug)]
-#[allow(dead_code)] // TODO: remove this
-pub struct SerialInfo {
- addrs: ArrayVec<[u64; Self::SERIAL_MAX_COUNT]>,
-}
-
-impl SerialInfo {
- const SERIAL_MAX_COUNT: usize = 4;
-}
-
-fn parse_serial_nodes(fdt: &libfdt::Fdt) -> Result<SerialInfo, RebootReason> {
- let mut ret: SerialInfo = Default::default();
- for (i, node) in fdt
- .compatible_nodes(CStr::from_bytes_with_nul(b"ns16550a\0").unwrap())
- .map_err(|e| {
- error!("Failed to read compatible nodes \"ns16550a\" from DT: {e}");
- RebootReason::InvalidFdt
- })?
- .enumerate()
- {
- if i >= ret.addrs.capacity() {
- error!("Too many serials: {i}");
- return Err(RebootReason::InvalidFdt);
- }
- let reg = node
- .reg()
- .map_err(|e| {
- error!("Failed to read reg property from \"ns16550a\" node: {e}");
- RebootReason::InvalidFdt
- })?
- .ok_or_else(|| {
- error!("No reg property in \"ns16550a\" node");
- RebootReason::InvalidFdt
- })?
- .next()
- .ok_or_else(|| {
- error!("No value in reg property of \"ns16550a\" node");
- RebootReason::InvalidFdt
- })?;
- ret.addrs.push(reg.addr);
- }
- Ok(ret)
-}
-
#[derive(Debug)]
-#[allow(dead_code)] // TODO: remove this
-pub struct SwiotlbInfo {
+struct SwiotlbInfo {
size: u64,
align: u64,
}
-fn parse_swiotlb_nodes(fdt: &libfdt::Fdt) -> Result<SwiotlbInfo, RebootReason> {
- let node = fdt
- .compatible_nodes(CStr::from_bytes_with_nul(b"restricted-dma-pool\0").unwrap())
- .map_err(|e| {
- error!("Failed to read compatible nodes \"restricted-dma-pool\" from DT: {e}");
- RebootReason::InvalidFdt
- })?
- .next()
- .ok_or_else(|| {
- error!("No compatible node \"restricted-dma-pool\" in DT");
- RebootReason::InvalidFdt
- })?;
- let size = node
- .getprop_u64(CStr::from_bytes_with_nul(b"size\0").unwrap())
- .map_err(|e| {
- error!("Failed to read \"size\" property of \"restricted-dma-pool\": {e}");
- RebootReason::InvalidFdt
- })?
- .ok_or_else(|| {
- error!("No \"size\" property in \"restricted-dma-pool\"");
- RebootReason::InvalidFdt
- })?;
+fn read_swiotlb_info_from(fdt: &Fdt) -> libfdt::Result<SwiotlbInfo> {
+ let node =
+ fdt.compatible_nodes(cstr!("restricted-dma-pool"))?.next().ok_or(FdtError::NotFound)?;
+ let size = node.getprop_u64(cstr!("size"))?.ok_or(FdtError::NotFound)?;
+ let align = node.getprop_u64(cstr!("alignment"))?.ok_or(FdtError::NotFound)?;
+ Ok(SwiotlbInfo { size, align })
+}
- let align = node
- .getprop_u64(CStr::from_bytes_with_nul(b"alignment\0").unwrap())
- .map_err(|e| {
- error!("Failed to read \"alignment\" property of \"restricted-dma-pool\": {e}");
- RebootReason::InvalidFdt
- })?
- .ok_or_else(|| {
- error!("No \"alignment\" property in \"restricted-dma-pool\"");
- RebootReason::InvalidFdt
- })?;
+fn validate_swiotlb_info(swiotlb_info: &SwiotlbInfo) -> Result<(), RebootReason> {
+ let size = swiotlb_info.size;
+ let align = swiotlb_info.align;
if size == 0 || (size % GUEST_PAGE_SIZE as u64) != 0 {
error!("Invalid swiotlb size {:#x}", size);
@@ -431,34 +455,217 @@
error!("Invalid swiotlb alignment {:#x}", align);
return Err(RebootReason::InvalidFdt);
}
+ Ok(())
+}
- Ok(SwiotlbInfo { size, align })
+fn patch_swiotlb_info(fdt: &mut Fdt, swiotlb_info: &SwiotlbInfo) -> libfdt::Result<()> {
+ let mut node =
+ fdt.root_mut()?.next_compatible(cstr!("restricted-dma-pool"))?.ok_or(FdtError::NotFound)?;
+ node.setprop_inplace(cstr!("size"), &swiotlb_info.size.to_be_bytes())?;
+ node.setprop_inplace(cstr!("alignment"), &swiotlb_info.align.to_be_bytes())?;
+ Ok(())
+}
+
+fn patch_gic(fdt: &mut Fdt, num_cpus: usize) -> libfdt::Result<()> {
+ let node = fdt.compatible_nodes(cstr!("arm,gic-v3"))?.next().ok_or(FdtError::NotFound)?;
+ let mut ranges = node.reg()?.ok_or(FdtError::NotFound)?;
+ let range0 = ranges.next().ok_or(FdtError::NotFound)?;
+ let mut range1 = ranges.next().ok_or(FdtError::NotFound)?;
+
+ let addr = range0.addr;
+ // SAFETY - doesn't overflow. checked in validate_num_cpus
+ let size: u64 =
+ DeviceTreeInfo::GIC_REDIST_SIZE_PER_CPU.checked_mul(num_cpus.try_into().unwrap()).unwrap();
+
+ // range1 is just below range0
+ range1.addr = addr - size;
+ range1.size = Some(size);
+
+ let range0 = range0.to_cells();
+ let range1 = range1.to_cells();
+ let value = [
+ range0.0, // addr
+ range0.1.unwrap(), //size
+ range1.0, // addr
+ range1.1.unwrap(), //size
+ ];
+
+ let mut node =
+ fdt.root_mut()?.next_compatible(cstr!("arm,gic-v3"))?.ok_or(FdtError::NotFound)?;
+ node.setprop_inplace(cstr!("reg"), flatten(&value))
+}
+
+fn patch_timer(fdt: &mut Fdt, num_cpus: usize) -> libfdt::Result<()> {
+ const NUM_INTERRUPTS: usize = 4;
+ const CELLS_PER_INTERRUPT: usize = 3;
+ let node = fdt.compatible_nodes(cstr!("arm,armv8-timer"))?.next().ok_or(FdtError::NotFound)?;
+ let interrupts = node.getprop_cells(cstr!("interrupts"))?.ok_or(FdtError::NotFound)?;
+ let mut value: ArrayVec<[u32; NUM_INTERRUPTS * CELLS_PER_INTERRUPT]> =
+ interrupts.take(NUM_INTERRUPTS * CELLS_PER_INTERRUPT).collect();
+
+ let num_cpus: u32 = num_cpus.try_into().unwrap();
+ let cpu_mask: u32 = (((0x1 << num_cpus) - 1) & 0xff) << 8;
+ for v in value.iter_mut().skip(2).step_by(CELLS_PER_INTERRUPT) {
+ *v |= cpu_mask;
+ }
+ for v in value.iter_mut() {
+ *v = v.to_be();
+ }
+
+ // SAFETY - array size is the same
+ let value = unsafe {
+ core::mem::transmute::<
+ [u32; NUM_INTERRUPTS * CELLS_PER_INTERRUPT],
+ [u8; NUM_INTERRUPTS * CELLS_PER_INTERRUPT * size_of::<u32>()],
+ >(value.into_inner())
+ };
+
+ let mut node =
+ fdt.root_mut()?.next_compatible(cstr!("arm,armv8-timer"))?.ok_or(FdtError::NotFound)?;
+ node.setprop_inplace(cstr!("interrupts"), value.as_slice())
}
#[derive(Debug)]
-#[allow(dead_code)] // TODO: remove this
pub struct DeviceTreeInfo {
- memory_size: NonZeroUsize,
- num_cpu: NonZeroUsize,
+ pub kernel_range: Option<Range<usize>>,
+ pub initrd_range: Option<Range<usize>>,
+ pub memory_range: Range<usize>,
+ bootargs: Option<CString>,
+ num_cpus: usize,
pci_info: PciInfo,
serial_info: SerialInfo,
swiotlb_info: SwiotlbInfo,
}
impl DeviceTreeInfo {
- const RAM_BASE_ADDR: u64 = 0x8000_0000;
+ const GIC_REDIST_SIZE_PER_CPU: u64 = (32 * SIZE_4KB) as u64;
}
-pub fn parse_device_tree(fdt: &libfdt::Fdt) -> Result<DeviceTreeInfo, RebootReason> {
+pub fn sanitize_device_tree(fdt: &mut Fdt) -> Result<DeviceTreeInfo, RebootReason> {
+ let info = parse_device_tree(fdt)?;
+ debug!("Device tree info: {:?}", info);
+
+ fdt.copy_from_slice(pvmfw_fdt_template::RAW).map_err(|e| {
+ error!("Failed to instantiate FDT from the template DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ patch_device_tree(fdt, &info)?;
+ Ok(info)
+}
+
+fn parse_device_tree(fdt: &libfdt::Fdt) -> Result<DeviceTreeInfo, RebootReason> {
+ let kernel_range = read_kernel_range_from(fdt).map_err(|e| {
+ error!("Failed to read kernel range from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ let initrd_range = read_initrd_range_from(fdt).map_err(|e| {
+ error!("Failed to read initrd range from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ let memory_range = read_memory_range_from(fdt).map_err(|e| {
+ error!("Failed to read memory range from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ validate_memory_range(&memory_range)?;
+
+ let bootargs = read_bootargs_from(fdt).map_err(|e| {
+ error!("Failed to read bootargs from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ let num_cpus = read_num_cpus_from(fdt).map_err(|e| {
+ error!("Failed to read num cpus from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ validate_num_cpus(num_cpus)?;
+
+ let pci_info = read_pci_info_from(fdt).map_err(|e| {
+ error!("Failed to read pci info from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ validate_pci_info(&pci_info, &memory_range)?;
+
+ let serial_info = read_serial_info_from(fdt).map_err(|e| {
+ error!("Failed to read serial info from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ let swiotlb_info = read_swiotlb_info_from(fdt).map_err(|e| {
+ error!("Failed to read swiotlb info from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ validate_swiotlb_info(&swiotlb_info)?;
+
Ok(DeviceTreeInfo {
- memory_size: parse_memory_node(fdt)?,
- num_cpu: parse_cpu_nodes(fdt)?,
- pci_info: parse_pci_nodes(fdt)?,
- serial_info: parse_serial_nodes(fdt)?,
- swiotlb_info: parse_swiotlb_nodes(fdt)?,
+ kernel_range,
+ initrd_range,
+ memory_range,
+ bootargs,
+ num_cpus,
+ pci_info,
+ serial_info,
+ swiotlb_info,
})
}
+fn patch_device_tree(fdt: &mut Fdt, info: &DeviceTreeInfo) -> Result<(), RebootReason> {
+ fdt.unpack().map_err(|e| {
+ error!("Failed to unpack DT for patching: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ if let Some(initrd_range) = &info.initrd_range {
+ patch_initrd_range(fdt, initrd_range).map_err(|e| {
+ error!("Failed to patch initrd range to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ }
+ patch_memory_range(fdt, &info.memory_range).map_err(|e| {
+ error!("Failed to patch memory range to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ if let Some(bootargs) = &info.bootargs {
+ patch_bootargs(fdt, bootargs.as_c_str()).map_err(|e| {
+ error!("Failed to patch bootargs to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ }
+ patch_num_cpus(fdt, info.num_cpus).map_err(|e| {
+ error!("Failed to patch cpus to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ patch_pci_info(fdt, &info.pci_info).map_err(|e| {
+ error!("Failed to patch pci info to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ patch_serial_info(fdt, &info.serial_info).map_err(|e| {
+ error!("Failed to patch serial info to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ patch_swiotlb_info(fdt, &info.swiotlb_info).map_err(|e| {
+ error!("Failed to patch swiotlb info to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ patch_gic(fdt, info.num_cpus).map_err(|e| {
+ error!("Failed to patch gic info to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ patch_timer(fdt, info.num_cpus).map_err(|e| {
+ error!("Failed to patch timer info to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ fdt.pack().map_err(|e| {
+ error!("Failed to pack DT after patching: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ Ok(())
+}
+
/// Modifies the input DT according to the fields of the configuration.
pub fn modify_for_next_stage(
fdt: &mut Fdt,
@@ -468,46 +675,27 @@
) -> libfdt::Result<()> {
fdt.unpack()?;
- add_dice_node(fdt, bcc.as_ptr() as usize, bcc.len())?;
+ patch_dice_node(fdt, bcc.as_ptr() as usize, bcc.len())?;
- set_or_clear_chosen_flag(
- fdt,
- CStr::from_bytes_with_nul(b"avf,strict-boot\0").unwrap(),
- strict_boot,
- )?;
- set_or_clear_chosen_flag(
- fdt,
- CStr::from_bytes_with_nul(b"avf,new-instance\0").unwrap(),
- new_instance,
- )?;
+ set_or_clear_chosen_flag(fdt, cstr!("avf,strict-boot"), strict_boot)?;
+ set_or_clear_chosen_flag(fdt, cstr!("avf,new-instance"), new_instance)?;
fdt.pack()?;
Ok(())
}
-/// Add a "google,open-dice"-compatible reserved-memory node to the tree.
-fn add_dice_node(fdt: &mut Fdt, addr: usize, size: usize) -> libfdt::Result<()> {
- let reserved_memory = CStr::from_bytes_with_nul(b"/reserved-memory\0").unwrap();
+/// Patch the "google,open-dice"-compatible reserved-memory node to point to the bcc range
+fn patch_dice_node(fdt: &mut Fdt, addr: usize, size: usize) -> libfdt::Result<()> {
// We reject DTs with missing reserved-memory node as validation should have checked that the
// "swiotlb" subnode (compatible = "restricted-dma-pool") was present.
- let mut reserved_memory = fdt.node_mut(reserved_memory)?.ok_or(libfdt::FdtError::NotFound)?;
+ let node = fdt.node_mut(cstr!("/reserved-memory"))?.ok_or(libfdt::FdtError::NotFound)?;
- let dice = CStr::from_bytes_with_nul(b"dice\0").unwrap();
- let mut dice = reserved_memory.add_subnode(dice)?;
+ let mut node = node.next_compatible(cstr!("google,open-dice"))?.ok_or(FdtError::NotFound)?;
- let compatible = CStr::from_bytes_with_nul(b"compatible\0").unwrap();
- dice.appendprop(compatible, b"google,open-dice\0")?;
-
- let no_map = CStr::from_bytes_with_nul(b"no-map\0").unwrap();
- dice.appendprop(no_map, &[])?;
-
- let addr = addr.try_into().unwrap();
- let size = size.try_into().unwrap();
- let reg = CStr::from_bytes_with_nul(b"reg\0").unwrap();
- dice.appendprop_addrrange(reg, addr, size)?;
-
- Ok(())
+ let addr: u64 = addr.try_into().unwrap();
+ let size: u64 = size.try_into().unwrap();
+ node.setprop_inplace(cstr!("reg"), flatten(&[addr.to_be_bytes(), size.to_be_bytes()]))
}
fn set_or_clear_chosen_flag(fdt: &mut Fdt, flag: &CStr, value: bool) -> libfdt::Result<()> {
diff --git a/pvmfw/src/helpers.rs b/pvmfw/src/helpers.rs
index e6e3406..4df9386 100644
--- a/pvmfw/src/helpers.rs
+++ b/pvmfw/src/helpers.rs
@@ -113,3 +113,20 @@
reg.zeroize();
flush(reg)
}
+
+/// Flatten [[T; N]] into &[T]
+/// TODO: use slice::flatten when it graduates from experimental
+pub fn flatten<T, const N: usize>(original: &[[T; N]]) -> &[T] {
+ // SAFETY: no overflow because original (whose size is len()*N) is already in memory
+ let len = original.len() * N;
+ // SAFETY: [T] has the same layout as [T;N]
+ unsafe { core::slice::from_raw_parts(original.as_ptr().cast(), len) }
+}
+
+/// Create &CStr out of &str literal
+#[macro_export]
+macro_rules! cstr {
+ ($str:literal) => {{
+ CStr::from_bytes_with_nul(concat!($str, "\0").as_bytes()).unwrap()
+ }};
+}
diff --git a/pvmfw/src/instance.rs b/pvmfw/src/instance.rs
index fbf2040..a974543 100644
--- a/pvmfw/src/instance.rs
+++ b/pvmfw/src/instance.rs
@@ -258,11 +258,11 @@
impl EntryHeader {
fn new(uuid: Uuid, payload_size: usize) -> Self {
- Self { uuid: uuid.as_u128(), payload_size: u64::try_from(payload_size).unwrap().to_le() }
+ Self { uuid: uuid.to_u128_le(), payload_size: u64::try_from(payload_size).unwrap().to_le() }
}
fn uuid(&self) -> Uuid {
- Uuid::from_u128(self.uuid)
+ Uuid::from_u128_le(self.uuid)
}
fn payload_size(&self) -> usize {
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index e1ecac4..577ad6e 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -45,7 +45,6 @@
use crate::dice::PartialInputs;
use crate::entry::RebootReason;
use crate::fdt::modify_for_next_stage;
-use crate::fdt::parse_device_tree;
use crate::helpers::flush;
use crate::helpers::GUEST_PAGE_SIZE;
use crate::instance::get_or_generate_instance_salt;
@@ -84,11 +83,6 @@
})?;
trace!("BCC: {bcc_handover:x?}");
- // This parsing step includes validation. So this effectively ensures that the DT can't be
- // abused by the host to attack pvmfw in pci::initialize below.
- let device_tree_info = parse_device_tree(fdt)?;
- debug!("Device tree info: {:?}", device_tree_info);
-
// Set up PCI bus for VirtIO devices.
let pci_info = PciInfo::from_fdt(fdt).map_err(handle_pci_error)?;
debug!("PCI: {:#x?}", pci_info);
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 17dd36b..b223f82 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -35,6 +35,11 @@
use log::error;
use tinyvec::ArrayVec;
+/// Base of the system's contiguous "main" memory.
+pub const BASE_ADDR: usize = 0x8000_0000;
+/// First address that can't be translated by a level 1 TTBR0_EL1.
+pub const MAX_ADDR: usize = 1 << 40;
+
pub type MemoryRange = Range<usize>;
#[derive(Clone, Copy, Debug, Default)]
@@ -129,15 +134,11 @@
impl MemoryTracker {
const CAPACITY: usize = 5;
const MMIO_CAPACITY: usize = 5;
- /// Base of the system's contiguous "main" memory.
- const BASE: usize = 0x8000_0000;
- /// First address that can't be translated by a level 1 TTBR0_EL1.
- const MAX_ADDR: usize = 1 << 39;
/// Create a new instance from an active page table, covering the maximum RAM size.
pub fn new(page_table: mmu::PageTable) -> Self {
Self {
- total: Self::BASE..Self::MAX_ADDR,
+ total: BASE_ADDR..MAX_ADDR,
page_table,
regions: ArrayVec::new(),
mmio_regions: ArrayVec::new(),
diff --git a/tests/benchmark/AndroidTest.xml b/tests/benchmark/AndroidTest.xml
index 0214cd9..29bc95a 100644
--- a/tests/benchmark/AndroidTest.xml
+++ b/tests/benchmark/AndroidTest.xml
@@ -25,6 +25,11 @@
<target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
<option name="force-root" value="true" />
</target_preparer>
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="push" value="perf-setup.sh->/data/local/tmp/perf-setup.sh" />
+ <option name="post-push" value="chmod 755 /data/local/tmp/perf-setup.sh;/data/local/tmp/perf-setup.sh" />
+ <option name="cleanup" value="true" />
+ </target_preparer>
<test class="com.android.tradefed.testtype.AndroidJUnitTest" >
<option name="package" value="com.android.microdroid.benchmark" />
<option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
diff --git a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
index 4b11d77..9851a17 100644
--- a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
+++ b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
@@ -36,6 +36,7 @@
import android.system.virtualmachine.VirtualMachine;
import android.system.virtualmachine.VirtualMachineConfig;
import android.system.virtualmachine.VirtualMachineException;
+import android.system.Os;
import android.util.Log;
import com.android.microdroid.test.common.MetricsProcessor;
@@ -347,16 +348,7 @@
CrosvmStats(Function<String, String> shellExecutor) {
try {
- List<Integer> crosvmPids =
- ProcessUtil.getProcessMap(shellExecutor).entrySet().stream()
- .filter(e -> e.getValue().contains("crosvm"))
- .map(e -> e.getKey())
- .collect(java.util.stream.Collectors.toList());
- if (crosvmPids.size() != 1) {
- throw new IllegalStateException(
- "expected to find exactly one crosvm processes, found "
- + crosvmPids.size());
- }
+ int crosvmPid = ProcessUtil.getCrosvmPid(Os.getpid(), shellExecutor);
long hostRss = 0;
long hostPss = 0;
@@ -364,7 +356,7 @@
long guestPss = 0;
boolean hasGuestMaps = false;
for (ProcessUtil.SMapEntry entry :
- ProcessUtil.getProcessSmaps(crosvmPids.get(0), shellExecutor)) {
+ ProcessUtil.getProcessSmaps(crosvmPid, shellExecutor)) {
long rss = entry.metrics.get("Rss");
long pss = entry.metrics.get("Pss");
if (entry.name.contains("crosvm_guest")) {
diff --git a/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java b/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java
index 940ec9c..c72d91e 100644
--- a/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java
+++ b/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java
@@ -22,9 +22,12 @@
import java.util.List;
import java.util.Map;
import java.util.function.Function;
+import java.util.stream.IntStream;
/** This class provides process utility for both device tests and host tests. */
public final class ProcessUtil {
+ private static final String CROSVM_BIN = "/apex/com.android.virt/bin/crosvm";
+ private static final String VIRTMGR_BIN = "/apex/com.android.virt/bin/virtmgr";
/** A memory map entry from /proc/{pid}/smaps */
public static class SMapEntry {
@@ -89,6 +92,35 @@
return processMap;
}
+ private static IntStream getChildProcesses(
+ int pid, String cmdlineFilter, Function<String, String> shellExecutor) {
+ String cmd = "pgrep -P " + pid;
+ if (cmdlineFilter != null) {
+ cmd += " -f " + cmdlineFilter;
+ }
+ return shellExecutor.apply(cmd).trim().lines().mapToInt(Integer::parseInt);
+ }
+
+ private static int getSingleChildProcess(
+ int parentPid, String cmdlineFilter, Function<String, String> shellExecutor) {
+ int[] pids = getChildProcesses(parentPid, cmdlineFilter, shellExecutor).toArray();
+ if (pids.length == 0) {
+ throw new IllegalStateException("No process found for " + cmdlineFilter);
+ } else if (pids.length > 1) {
+ throw new IllegalStateException("More than one process found for " + cmdlineFilter);
+ }
+ return pids[0];
+ }
+
+ public static int getVirtmgrPid(int parentPid, Function<String, String> shellExecutor) {
+ return getSingleChildProcess(parentPid, VIRTMGR_BIN, shellExecutor);
+ }
+
+ public static int getCrosvmPid(int parentPid, Function<String, String> shellExecutor) {
+ int virtmgrPid = getVirtmgrPid(parentPid, shellExecutor);
+ return getSingleChildProcess(virtmgrPid, CROSVM_BIN, shellExecutor);
+ }
+
// To ensures that only one object is created at a time.
private ProcessUtil() {}
diff --git a/tests/hostside/Android.bp b/tests/hostside/Android.bp
index 78500af..4b5cbda 100644
--- a/tests/hostside/Android.bp
+++ b/tests/hostside/Android.bp
@@ -9,31 +9,17 @@
}
genrule {
- name: "test_avf_debug_policy_with_ramdump",
+ name: "test_avf_debug_policy_with_log.dtbo",
defaults: ["test_avf_debug_policy_overlay"],
- srcs: ["assets/avf_debug_policy_with_ramdump.dts"],
- out: ["avf_debug_policy_with_ramdump.dtbo"],
+ srcs: ["assets/avf_debug_policy_with_log.dts"],
+ out: ["avf_debug_policy_with_log.dtbo"],
}
genrule {
- name: "test_avf_debug_policy_without_ramdump",
+ name: "test_avf_debug_policy_without_log.dtbo",
defaults: ["test_avf_debug_policy_overlay"],
- srcs: ["assets/avf_debug_policy_without_ramdump.dts"],
- out: ["avf_debug_policy_without_ramdump.dtbo"],
-}
-
-genrule {
- name: "test_avf_debug_policy_with_console_output",
- defaults: ["test_avf_debug_policy_overlay"],
- srcs: ["assets/avf_debug_policy_with_console_output.dts"],
- out: ["avf_debug_policy_with_console_output.dtbo"],
-}
-
-genrule {
- name: "test_avf_debug_policy_without_console_output",
- defaults: ["test_avf_debug_policy_overlay"],
- srcs: ["assets/avf_debug_policy_without_console_output.dts"],
- out: ["avf_debug_policy_without_console_output.dtbo"],
+ srcs: ["assets/avf_debug_policy_without_log.dts"],
+ out: ["avf_debug_policy_without_log.dtbo"],
}
genrule {
@@ -74,10 +60,8 @@
":test.com.android.virt.pem",
":test2.com.android.virt.pem",
":pvmfw_test",
- ":test_avf_debug_policy_with_ramdump",
- ":test_avf_debug_policy_without_ramdump",
- ":test_avf_debug_policy_with_console_output",
- ":test_avf_debug_policy_without_console_output",
+ ":test_avf_debug_policy_with_log.dtbo",
+ ":test_avf_debug_policy_without_log.dtbo",
":test_avf_debug_policy_with_adb",
":test_avf_debug_policy_without_adb",
"assets/bcc.dat",
diff --git a/tests/hostside/assets/avf_debug_policy_with_console_output.dts b/tests/hostside/assets/avf_debug_policy_with_log.dts
similarity index 100%
rename from tests/hostside/assets/avf_debug_policy_with_console_output.dts
rename to tests/hostside/assets/avf_debug_policy_with_log.dts
diff --git a/tests/hostside/assets/avf_debug_policy_with_ramdump.dts b/tests/hostside/assets/avf_debug_policy_with_ramdump.dts
deleted file mode 100644
index 26db7be..0000000
--- a/tests/hostside/assets/avf_debug_policy_with_ramdump.dts
+++ /dev/null
@@ -1,21 +0,0 @@
-/dts-v1/;
-/plugin/;
-
-/ {
- fragment@avf {
- target-path = "/";
-
- __overlay__ {
- avf {
- guest {
- common {
- ramdump = <1>;
- };
- microdroid {
- adb = <1>; // adb is required to check VM's bootargs.
- };
- };
- };
- };
- };
-};
\ No newline at end of file
diff --git a/tests/hostside/assets/avf_debug_policy_without_console_output.dts b/tests/hostside/assets/avf_debug_policy_without_log.dts
similarity index 100%
rename from tests/hostside/assets/avf_debug_policy_without_console_output.dts
rename to tests/hostside/assets/avf_debug_policy_without_log.dts
diff --git a/tests/hostside/assets/avf_debug_policy_without_ramdump.dts b/tests/hostside/assets/avf_debug_policy_without_ramdump.dts
deleted file mode 100644
index 194e314..0000000
--- a/tests/hostside/assets/avf_debug_policy_without_ramdump.dts
+++ /dev/null
@@ -1,21 +0,0 @@
-/dts-v1/;
-/plugin/;
-
-/ {
- fragment@avf {
- target-path = "/";
-
- __overlay__ {
- avf {
- guest {
- common {
- ramdump = <0>;
- };
- microdroid {
- adb = <1>; // adb is required to check VM's bootargs.
- };
- };
- };
- };
- };
-};
\ No newline at end of file
diff --git a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
index 20a6045..a7f7906 100644
--- a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
+++ b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
@@ -67,6 +67,8 @@
// remove any leftover files under test root
android.tryRun("rm", "-rf", TEST_ROOT + "*");
+
+ android.tryRun("mkdir " + TEST_ROOT);
}
public static void cleanUpVirtualizationTestSetup(ITestDevice androidDevice)
diff --git a/tests/hostside/java/com/android/microdroid/test/PvmfwDebugPolicyHostTests.java b/tests/hostside/java/com/android/microdroid/test/PvmfwDebugPolicyHostTests.java
index 10f7003..18aa273 100644
--- a/tests/hostside/java/com/android/microdroid/test/PvmfwDebugPolicyHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/PvmfwDebugPolicyHostTests.java
@@ -23,6 +23,7 @@
import static org.junit.Assume.assumeTrue;
import static org.junit.Assume.assumeFalse;
+import static org.junit.Assert.assertThrows;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
@@ -31,8 +32,9 @@
import com.android.microdroid.test.host.MicrodroidHostTestCaseBase;
import com.android.microdroid.test.host.Pvmfw;
import com.android.tradefed.device.DeviceNotAvailableException;
-import com.android.tradefed.device.TestDevice;
+import com.android.tradefed.device.DeviceRuntimeException;
import com.android.tradefed.device.ITestDevice;
+import com.android.tradefed.device.TestDevice;
import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
import com.android.tradefed.util.CommandStatus;
import com.android.tradefed.util.CommandResult;
@@ -68,6 +70,9 @@
@NonNull private static final String CUSTOM_PVMFW_IMG_PATH = TEST_ROOT + PVMFW_FILE_NAME;
@NonNull private static final String CUSTOM_PVMFW_IMG_PATH_PROP = "hypervisor.pvmfw.path";
+ @NonNull
+ private static final String AVF_DEBUG_POLICY_ADB_DT_PROP_PATH = "/avf/guest/microdroid/adb";
+
@NonNull private static final String MICRODROID_CMDLINE_PATH = "/proc/cmdline";
@NonNull private static final String MICRODROID_DT_ROOT_PATH = "/proc/device-tree";
@@ -146,35 +151,8 @@
}
@Test
- public void testRamdump() throws Exception {
- Pvmfw pvmfw = createPvmfw("avf_debug_policy_with_ramdump.dtbo");
- pvmfw.serialize(mCustomPvmfwBinFileOnHost);
- mMicrodroidDevice = launchProtectedVmAndWaitForBootCompleted(MICRODROID_DEBUG_FULL);
-
- assertThat(readMicrodroidFileAsString(MICRODROID_CMDLINE_PATH)).contains("crashkernel=");
- assertThat(readMicrodroidFileAsString(MICRODROID_DT_BOOTARGS_PATH))
- .contains("crashkernel=");
- assertThat(readMicrodroidFileAsHexString(MICRODROID_DT_RAMDUMP_PATH))
- .isEqualTo(HEX_STRING_ONE);
- }
-
- @Test
- public void testNoRamdump() throws Exception {
- Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_ramdump.dtbo");
- pvmfw.serialize(mCustomPvmfwBinFileOnHost);
- mMicrodroidDevice = launchProtectedVmAndWaitForBootCompleted(MICRODROID_DEBUG_FULL);
-
- assertThat(readMicrodroidFileAsString(MICRODROID_CMDLINE_PATH))
- .doesNotContain("crashkernel=");
- assertThat(readMicrodroidFileAsString(MICRODROID_DT_BOOTARGS_PATH))
- .doesNotContain("crashkernel=");
- assertThat(readMicrodroidFileAsHexString(MICRODROID_DT_RAMDUMP_PATH))
- .isEqualTo(HEX_STRING_ZERO);
- }
-
- @Test
- public void testConsoleOutput() throws Exception {
- Pvmfw pvmfw = createPvmfw("avf_debug_policy_with_console_output.dtbo");
+ public void testLog_consoleOutput() throws Exception {
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_with_log.dtbo");
pvmfw.serialize(mCustomPvmfwBinFileOnHost);
CommandResult result = tryLaunchProtectedNonDebuggableVm();
@@ -185,8 +163,20 @@
}
@Test
- public void testNoConsoleOutput() throws Exception {
- Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_console_output.dtbo");
+ public void testLog_logcat() throws Exception {
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_with_log.dtbo");
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ tryLaunchProtectedNonDebuggableVm();
+
+ assertWithMessage("Microdroid's logcat should have been enabled")
+ .that(hasMicrodroidLogcatOutput())
+ .isTrue();
+ }
+
+ @Test
+ public void testNoLog_noConsoleOutput() throws Exception {
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_log.dtbo");
pvmfw.serialize(mCustomPvmfwBinFileOnHost);
CommandResult result = tryLaunchProtectedNonDebuggableVm();
@@ -197,6 +187,32 @@
}
@Test
+ public void testNoLog_noLogcat() throws Exception {
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_log.dtbo");
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ assertThrows(
+ "Microdroid shouldn't be recognized because of missing adb connection",
+ DeviceRuntimeException.class,
+ () ->
+ launchProtectedVmAndWaitForBootCompleted(
+ MICRODROID_DEBUG_NONE, BOOT_FAILURE_WAIT_TIME_MS));
+ assertThat(hasMicrodroidLogcatOutput()).isFalse();
+ }
+
+ @Test
+ public void testAdb_boots() throws Exception {
+ assumeTrue(
+ "Skip if host wouldn't install adbd",
+ isDebugPolicyEnabled(AVF_DEBUG_POLICY_ADB_DT_PROP_PATH));
+
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_with_adb.dtbo");
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ launchProtectedVmAndWaitForBootCompleted(MICRODROID_DEBUG_NONE);
+ }
+
+ @Test
public void testNoAdb_boots() throws Exception {
Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_adb.dtbo");
pvmfw.serialize(mCustomPvmfwBinFileOnHost);
@@ -214,13 +230,23 @@
Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_adb.dtbo");
pvmfw.serialize(mCustomPvmfwBinFileOnHost);
- try {
- launchProtectedVmAndWaitForBootCompleted(
- MICRODROID_DEBUG_NONE, BOOT_FAILURE_WAIT_TIME_MS);
- assertWithMessage("adb shouldn't be available").fail();
- } catch (Exception e) {
- // expected exception. passthrough.
+ assertThrows(
+ "Microdroid shouldn't be recognized because of missing adb connection",
+ DeviceRuntimeException.class,
+ () ->
+ launchProtectedVmAndWaitForBootCompleted(
+ MICRODROID_DEBUG_NONE, BOOT_FAILURE_WAIT_TIME_MS));
+ }
+
+ private boolean isDebugPolicyEnabled(@NonNull String dtPropertyPath)
+ throws DeviceNotAvailableException {
+ CommandRunner runner = new CommandRunner(mAndroidDevice);
+ CommandResult result =
+ runner.runForResult("xxd", "-p", "/proc/device-tree" + dtPropertyPath);
+ if (result.getStatus() == CommandStatus.SUCCESS) {
+ return HEX_STRING_ONE.equals(result.getStdout().trim());
}
+ return false;
}
@NonNull
@@ -245,11 +271,17 @@
.build();
}
- @NonNull
- private boolean hasConsoleOutput(CommandResult result) throws DeviceNotAvailableException {
+ private boolean hasConsoleOutput(@NonNull CommandResult result)
+ throws DeviceNotAvailableException {
return result.getStdout().contains("Run /init as init process");
}
+ private boolean hasMicrodroidLogcatOutput() throws DeviceNotAvailableException {
+ CommandResult result =
+ new CommandRunner(mAndroidDevice).runForResult("test", "-s", MICRODROID_LOG_PATH);
+ return result.getExitCode() == 0;
+ }
+
private ITestDevice launchProtectedVmAndWaitForBootCompleted(String debugLevel)
throws DeviceNotAvailableException {
return launchProtectedVmAndWaitForBootCompleted(debugLevel, BOOT_COMPLETE_TIMEOUT_MS);
@@ -271,10 +303,10 @@
}
// Try to launch protected non-debuggable VM for a while and quit.
- // Non-debuggable VM doesn't enable adb, so there's no ITestDevice instance of it.
+ // Non-debuggable VM might not enable adb, so there's no ITestDevice instance of it.
private CommandResult tryLaunchProtectedNonDebuggableVm() throws DeviceNotAvailableException {
// Can't use MicrodroidBuilder because it expects adb connection
- // but non-debuggable VM doesn't enable adb.
+ // but non-debuggable VM may not enable adb.
CommandRunner runner = new CommandRunner(mAndroidDevice);
runner.run("mkdir", "-p", TEST_ROOT);
mAndroidDevice.pushFile(mCustomPvmfwBinFileOnHost, TEST_ROOT + PVMFW_FILE_NAME);
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index 542f595..7044ae7 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -30,6 +30,7 @@
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
import static org.junit.Assume.assumeFalse;
import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
import com.google.common.base.Strings;
import com.google.common.truth.BooleanSubject;
@@ -56,6 +57,7 @@
import android.util.Log;
import com.android.compatibility.common.util.CddTest;
+import com.android.compatibility.common.util.VsrTest;
import com.android.microdroid.test.device.MicrodroidDeviceTestBase;
import com.android.microdroid.test.vmshare.IVmShareTestService;
import com.android.microdroid.testservice.IAppCallback;
@@ -64,7 +66,6 @@
import org.junit.After;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.function.ThrowingRunnable;
@@ -635,6 +636,14 @@
@Test
@CddTest(requirements = {"9.17/C-1-1"})
+ public void testAvfRequiresUpdatableApex() throws Exception {
+ assertWithMessage("Devices that support AVF must also support updatable APEX")
+ .that(SystemProperties.getBoolean("ro.apex.updatable", false))
+ .isTrue();
+ }
+
+ @Test
+ @CddTest(requirements = {"9.17/C-1-1"})
public void vmmGetAndCreate() throws Exception {
assumeSupportedDevice();
@@ -1173,7 +1182,6 @@
}
@Test
- @Ignore("b/249723852")
@CddTest(requirements = {
"9.17/C-1-1",
"9.17/C-2-7"
@@ -1951,6 +1959,25 @@
.isEqualTo(MS_NOEXEC);
}
+ @Test
+ @VsrTest(requirements = {"VSR-7.1-001.003"})
+ public void kernelVersionRequirement() throws Exception {
+ int firstApiLevel = SystemProperties.getInt("ro.product.first_api_level", 0);
+ assume().withMessage("Skip on devices launched before Android 14 (API level 34)")
+ .that(firstApiLevel)
+ .isAtLeast(34);
+
+ String[] tokens = KERNEL_VERSION.split("\\.");
+ int major = Integer.parseInt(tokens[0]);
+ int minor = Integer.parseInt(tokens[1]);
+
+ // Check kernel version >= 5.15
+ assertTrue(major >= 5);
+ if (major == 5) {
+ assertTrue(minor >= 15);
+ }
+ }
+
private static class VmShareServiceConnection implements ServiceConnection {
private final CountDownLatch mLatch = new CountDownLatch(1);
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index e015d9d..749d75f 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -725,10 +725,11 @@
/// user devices (W^X).
fn check_label_is_allowed(context: &SeContext) -> Result<()> {
match context.selinux_type()? {
- | "system_file" // immutable dm-verity protected partition
| "apk_data_file" // APKs of an installed app
- | "staging_data_file" // updated/staged APEX images
| "shell_data_file" // test files created via adb shell
+ | "staging_data_file" // updated/staged APEX images
+ | "system_file" // immutable dm-verity protected partition
+ | "virtualizationservice_data_file" // files created by VS / VirtMgr
=> Ok(()),
_ => bail!("Label {} is not allowed", context),
}
diff --git a/virtualizationmanager/src/crosvm.rs b/virtualizationmanager/src/crosvm.rs
index 9db0971..7201670 100644
--- a/virtualizationmanager/src/crosvm.rs
+++ b/virtualizationmanager/src/crosvm.rs
@@ -53,6 +53,7 @@
use rpcbinder::RpcServer;
/// external/crosvm
+use base::AsRawDescriptor;
use base::UnixSeqpacketListener;
use vm_control::{BalloonControlCommand, VmRequest, VmResponse};
@@ -491,6 +492,10 @@
// first, as monitor_vm_exit() takes it as well.
monitor_vm_exit_thread.map(JoinHandle::join);
+ // Now that the VM has been killed, shut down the VirtualMachineService
+ // server to eagerly free up the server threads.
+ self.vm_context.vm_server.shutdown()?;
+
Ok(())
}
@@ -722,8 +727,7 @@
command.arg("--unmap-guest-memory-on-fork");
if config.ramdump.is_some() {
- // Protected VM needs to reserve memory for ramdump here. pvmfw will drop This
- // if ramdump should be disabled (via debug policy). Note that we reserve more
+ // Protected VM needs to reserve memory for ramdump here. Note that we reserve more
// memory for the restricted dma pool.
let ramdump_reserve = RAMDUMP_RESERVED_MIB + swiotlb_size_mib;
command.arg("--params").arg(format!("crashkernel={ramdump_reserve}M"));
@@ -823,7 +827,9 @@
let control_server_socket = UnixSeqpacketListener::bind(crosvm_control_socket_path)
.context("failed to create control server")?;
- command.arg("--socket").arg(add_preserved_fd(&mut preserved_fds, &control_server_socket));
+ command
+ .arg("--socket")
+ .arg(add_preserved_fd(&mut preserved_fds, &control_server_socket.as_raw_descriptor()));
debug!("Preserving FDs {:?}", preserved_fds);
command.preserved_fds(preserved_fds);
diff --git a/virtualizationmanager/src/debug_config.rs b/virtualizationmanager/src/debug_config.rs
index 666c98d..ec3d591 100644
--- a/virtualizationmanager/src/debug_config.rs
+++ b/virtualizationmanager/src/debug_config.rs
@@ -48,10 +48,7 @@
let enabled_in_dp =
get_debug_policy_bool("/proc/device-tree/avf/guest/common/ramdump").unwrap_or_default();
let debuggable = match config {
- VirtualMachineConfig::RawConfig(_) => {
- // custom VMs are considered debuggable for flexibility
- true
- }
+ VirtualMachineConfig::RawConfig(_) => false,
VirtualMachineConfig::AppConfig(config) => config.debugLevel == DebugLevel::FULL,
};
diff --git a/vmbase/README.md b/vmbase/README.md
index 3554ae6..552ac31 100644
--- a/vmbase/README.md
+++ b/vmbase/README.md
@@ -25,28 +25,18 @@
```soong
rust_ffi_static {
name: "libvmbase_example",
+ defaults: ["vmbase_ffi_defaults"],
crate_name: "vmbase_example",
srcs: ["src/main.rs"],
- edition: "2021",
- no_stdlibs: true,
- stdlibs: [
- "libcompiler_builtins.rust_sysroot",
- "libcore.rust_sysroot",
- ],
rustlibs: [
"libvmbase",
],
- enabled: false,
- target: {
- android_arm64: {
- enabled: true,
- },
- },
}
```
-Note that stdlibs must be explicitly specified, as we don't want the normal set of libraries used
-for a C++ binary intended to run in Android userspace.
+`vmbase_ffi_defaults`, among other things, specifies the stdlibs including the `compiler_builtins`
+and `core` crate. These must be explicitly specified as we don't want the normal set of libraries
+used for a C++ binary intended to run in Android userspace.
### Entry point
@@ -139,30 +129,18 @@
```soong
cc_binary {
- name: "vmbase_example_elf",
- stem: "vmbase_example",
+ name: "vmbase_example",
+ defaults: ["vmbase_elf_defaults"],
srcs: [
"idmap.S",
],
static_libs: [
- "libvmbase_entry",
"libvmbase_example",
],
- static_executable: true,
- nocrt: true,
- system_shared_libs: ["libc"],
- stl: "none",
linker_scripts: [
"image.ld",
":vmbase_sections",
],
- installable: false,
- enabled: false,
- target: {
- android_arm64: {
- enabled: true,
- },
- },
}
```
@@ -174,9 +152,9 @@
```soong
raw_binary {
- name: "vmbase_example",
- src: ":vmbase_example_elf",
+ name: "vmbase_example_bin",
stem: "vmbase_example.bin",
+ src: ":vmbase_example",
enabled: false,
target: {
android_arm64: {