Merge Android U (ab/10368041)
Bug: 291102124
Merged-In: I67a535a58d1fe77acb06e51700c2f80d11e298bb
Change-Id: I916694b2050c8296d7f4645c454acdee5fd07f38
diff --git a/OWNERS b/OWNERS
index 310add7..e560cec 100644
--- a/OWNERS
+++ b/OWNERS
@@ -12,16 +12,19 @@
# Other owners
alanstokes@google.com
aliceywang@google.com
-ardb@google.com
-ascull@google.com
inseob@google.com
+jaewan@google.com
+jakobvukalovic@google.com
jeffv@google.com
jooyung@google.com
-mzyngier@google.com
+keirf@google.com
ptosi@google.com
qperret@google.com
qwandor@google.com
-serbanc@google.com
+sebastianene@google.com
+seungjaeyoo@google.com
shikhapanwar@google.com
+smostafa@google.com
tabba@google.com
+vdonnefort@google.com
victorhsieh@google.com
diff --git a/README.md b/README.md
index eb28e94..eaa2579 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,29 @@
-# Virtualization
+# Android Virtualization Framework (AVF)
-This repository contains userspace services related to running virtual machines on Android,
-especially protected virtual machines. See the
-[getting started documentation](docs/getting_started/index.md) and
-[Microdroid README](microdroid/README.md) for more information.
+Android Virtualization Framework (AVF) provides secure and private execution environments for
+executing code. AVF is ideal for security-oriented use cases that require stronger isolation
+assurances over those offered by Android’s app sandbox.
+
+Visit [our public doc site](https://source.android.com/docs/core/virtualization) to learn more about
+what AVF is, what it is for, and how it is structured. This repository contains source code for
+userspace components of AVF.
+
+If you want a quick start, see the [getting started guideline](docs/getting_started.md)
+and follow the steps there.
+
+For in-depth explanations about individual topics and components, visit the following links.
+
+AVF components:
+
+* [pVM firmware](pvmfw/README.md)
+* [Microdroid](microdroid/README.md)
+* [Microdroid kernel](microdroid/kernel/README.md)
+* [Microdroid payload](microdroid/payload/README.md)
+* [vmbase](vmbase/README.md)
+* [VM Payload API](vm_payload/README.md)
+
+How-Tos:
+* [Building and running a demo app in Java](demo/README.md)
+* [Building and running a demo app in C++](demo_native/README.md)
+* [Debugging](docs/debug)
+* [Using custom VM](docs/custom_vm.md)
diff --git a/TEST_MAPPING b/TEST_MAPPING
index e2cf9a5..323b827 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -6,6 +6,9 @@
"name": "MicrodroidHostTestCases"
},
{
+ "name": "ComposHostTestCases"
+ },
+ {
"name": "MicrodroidTestApp"
},
{
@@ -35,9 +38,6 @@
"name": "ComposBenchmarkApp"
},
{
- "name": "ComposHostTestCases"
- },
- {
"name": "AVFHostTestCases"
}
],
diff --git a/apex/Android.bp b/apex/Android.bp
index fedcfcd..ccbdb3b 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -76,6 +76,7 @@
arm64: {
binaries: [
"crosvm",
+ "vfio_handler",
"virtmgr",
"virtualizationservice",
],
@@ -84,6 +85,7 @@
x86_64: {
binaries: [
"crosvm",
+ "vfio_handler",
"virtmgr",
"virtualizationservice",
],
@@ -101,11 +103,11 @@
"microdroid_initrd_normal",
"microdroid.json",
"microdroid_kernel",
- // rialto_bin is a prebuilt target wrapping the signed bare-metal service VM.
"rialto_bin",
],
host_required: [
"vm_shell",
+ "prepare_device_vfio",
],
apps: [
"EmptyPayloadApp",
@@ -136,6 +138,12 @@
installable: false,
}
+sh_binary_host {
+ name: "prepare_device_vfio",
+ src: "prepare_device_vfio.sh",
+ filename: "prepare_device_vfio.sh",
+}
+
// Virt apex needs a custom signer for its payload
python_binary_host {
name: "sign_virt_apex",
diff --git a/apex/prepare_device_vfio.sh b/apex/prepare_device_vfio.sh
new file mode 100755
index 0000000..de2d502
--- /dev/null
+++ b/apex/prepare_device_vfio.sh
@@ -0,0 +1,176 @@
+#!/bin/bash
+
+# Copyright 2023 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# prepare_device_vfio.sh: prepares a device for VFIO assignment by binding a VFIO driver to it
+
+adb="${ADB:="adb"}" # ADB command to use
+vfio_dir="/dev/vfio"
+platform_bus="/sys/bus/platform"
+vfio_reset_required="/sys/module/vfio_platform/parameters/reset_required"
+vfio_noiommu_param="/sys/module/vfio/parameters/enable_unsafe_noiommu_mode"
+vfio_unsafe_interrupts_param="/sys/module/vfio_iommu_type1/parameters/allow_unsafe_interrupts"
+
+function print_help() {
+ echo "prepare_device_vfio.sh prepares a device for VFIO assignment"
+ echo ""
+ echo " Usage:"
+ echo " $0 DEVICE_NAME"
+ echo " Prepare device DEVICE_NAME for VFIO assignment."
+ echo ""
+ echo " help - prints this help message"
+}
+
+function cmd() {
+ $adb shell $@
+}
+
+function tcmd() {
+ trap "echo \"Error: adb shell command '$@' failed\" ; exit 1" ERR
+ $adb shell $@
+}
+
+function ensure_root() {
+ # Check user id
+ if [ $(cmd "id -u") != 0 ]; then
+ read -p "Must run as root; restart ADBD? [y/n] " answer
+ case $answer in
+ [Yy]* )
+ $adb root && $adb wait-for-device && sleep 3 || exit 1
+ ;;
+ * )
+ exit 1
+ esac
+ fi
+}
+
+function check_vfio() {
+ cmd "[ -c $vfio_dir/vfio ]"
+ if [ $? -ne 0 ]; then
+ echo "cannot find $vfio_dir/vfio"
+ exit 1
+ fi
+
+ cmd "[ -d $platform_bus/drivers/vfio-platform ]"
+ if [ $? -ne 0 ]; then
+ echo "VFIO-platform is not supported"
+ exit 1
+ fi
+}
+
+function check_device() {
+ cmd "[ -d $device_sys ]"
+ if [ $? -ne 0 ]; then
+ echo "no device $device ($device_sys)"
+ exit 1
+ fi
+}
+
+function get_device_iommu_group() {
+ local group=$(cmd "basename \$(readlink \"$device_sys/iommu_group\")")
+ if [ $? -eq 0 ]; then
+ echo $group
+ else
+ echo ""
+ fi
+}
+
+function misc_setup() {
+ # VFIO NOIOMMU check
+ if [ -z "$group" ]; then
+ echo "$device_sys does not have an IOMMU group - setting $vfio_noiommu_param"
+ tcmd "echo y > \"$vfio_noiommu_param\""
+ fi
+
+ # Disable SELinux to allow virtualizationmanager and crosvm to access sysfs
+ echo "[*WARN*] setenforce=0: SELinux is disabled"
+ tcmd "setenforce 0"
+
+ # Samsung IOMMU does not report interrupt remapping support, so enable unsafe uinterrupts
+ if [ -n "$group" ]; then
+ local iommu_drv=$(cmd "basename \$(readlink \"$device_sys/iommu/device/driver\")")
+ if [ "$iommu_drv" = "samsung-sysmmu-v9" ]; then
+ tcmd "echo y > \"$vfio_unsafe_interrupts_param\""
+ fi
+ fi
+}
+
+function bind_vfio_driver() {
+ # Check if non-VFIO driver is currently bound, ie unbinding is needed
+ cmd "[ -e \"$device_driver\" ] && \
+ [ ! \$(basename \$(readlink \"$device_driver\")) = \"vfio-platform\" ]"
+ if [ $? -eq 0 ]; then
+ # Unbind current driver
+ tcmd "echo \"$device\" > \"$device_driver/unbind\""
+ fi
+
+ # Bind to VFIO driver
+ cmd "[ ! -e \"$device_driver\" ]"
+ if [ $? -eq 0 ]; then
+ # Bind vfio-platform driver
+ tcmd "echo \"vfio-platform\" > \"$device_sys/driver_override\""
+ tcmd "echo \"$device\" > \"$platform_bus/drivers_probe\""
+ sleep 2
+ fi
+}
+
+function verify_vfio_driver() {
+ # Verify new VFIO file structure
+ group=$(get_device_iommu_group)
+ if [ -z "$group" ]; then
+ echo "cannot setup VFIO-NOIOMMU for $device_sys"
+ exit 1
+ fi
+
+ cmd "[ ! -c \"$vfio_dir/$group\" ] || \
+ [ ! -e \"$device_driver\" ] || \
+ [ ! \$(basename \$(readlink \"$device_driver\")) = \"vfio-platform\" ]"
+ if [ $? -eq 0 ]; then
+ echo "could not bind $device to VFIO platform driver"
+
+ if [ $(cmd "cat $vfio_reset_required") = Y ]; then
+ echo "VFIO device reset handler must be registered. Either unset $vfio_reset_required, \
+or register a reset handler for $device_sys"
+ fi
+ exit 1
+ fi
+}
+
+function prepare_device() {
+ device="$1"
+ device_sys="/sys/bus/platform/devices/$device"
+ device_driver="$device_sys/driver"
+
+ ensure_root
+ check_vfio
+ check_device
+ group=$(get_device_iommu_group)
+ misc_setup
+
+ bind_vfio_driver
+ verify_vfio_driver
+
+ echo "Device: $device_sys"
+ echo "IOMMU group: $group"
+ echo "VFIO group file: $vfio_dir/$group"
+ echo "Ready!"
+}
+
+cmd=$1
+
+case $cmd in
+ ""|help) print_help ;;
+ *) prepare_device "$cmd" $@ ;;
+esac
diff --git a/apex/virtualizationservice.rc b/apex/virtualizationservice.rc
index 02b2081..8283594 100644
--- a/apex/virtualizationservice.rc
+++ b/apex/virtualizationservice.rc
@@ -19,3 +19,10 @@
interface aidl android.system.virtualizationservice
disabled
oneshot
+
+service vfio_handler /apex/com.android.virt/bin/vfio_handler
+ user root
+ group system
+ interface aidl android.system.virtualizationservice_internal.IVfioHandler
+ disabled
+ oneshot
diff --git a/apkdmverity/Android.bp b/apkdmverity/Android.bp
index fae7e99..8429263 100644
--- a/apkdmverity/Android.bp
+++ b/apkdmverity/Android.bp
@@ -40,7 +40,7 @@
name: "apkdmverity.test",
defaults: [
"apkdmverity.defaults",
- "ignorabletest.defaults",
+ "rdroidtest.defaults",
],
test_suites: ["general-tests"],
compile_multilib: "first",
diff --git a/apkdmverity/src/main.rs b/apkdmverity/src/main.rs
index 55953a9..d9e9e2b 100644
--- a/apkdmverity/src/main.rs
+++ b/apkdmverity/src/main.rs
@@ -153,12 +153,12 @@
}
#[cfg(test)]
-ignorabletest::test_main!();
+rdroidtest::test_main!();
#[cfg(test)]
mod tests {
use crate::*;
- use ignorabletest::test;
+ use rdroidtest::test;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::ops::Deref;
diff --git a/authfs/Android.bp b/authfs/Android.bp
index 2532026..154a1d6 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -23,7 +23,7 @@
"liblog_rust",
"libnix",
"libopenssl",
- "libprotobuf_deprecated",
+ "libprotobuf",
"librpcbinder_rs",
"libthiserror",
],
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index 87bdffc..64b340a 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -386,7 +386,8 @@
}
cfg_if::cfg_if! {
- if #[cfg(all(target_arch = "aarch64", target_pointer_width = "64"))] {
+ if #[cfg(all(any(target_arch = "aarch64", target_arch = "riscv64"),
+ target_pointer_width = "64"))] {
fn blk_size() -> libc::c_int { CHUNK_SIZE as libc::c_int }
} else {
fn blk_size() -> libc::c_long { CHUNK_SIZE as libc::c_long }
@@ -992,8 +993,8 @@
fn statfs(&self, _ctx: Context, _inode: Self::Inode) -> io::Result<libc::statvfs64> {
let remote_stat = self.remote_fs_stats_reader.statfs()?;
- // Safe because we are zero-initializing a struct with only POD fields. Not all fields
- // matter to FUSE. See also:
+ // SAFETY: We are zero-initializing a struct with only POD fields. Not all fields matter to
+ // FUSE. See also:
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/fuse/inode.c?h=v5.15#n460
let mut st: libc::statvfs64 = unsafe { zeroed() };
diff --git a/authfs/tests/benchmarks/Android.bp b/authfs/tests/benchmarks/Android.bp
index 110d000..cea5a81 100644
--- a/authfs/tests/benchmarks/Android.bp
+++ b/authfs/tests/benchmarks/Android.bp
@@ -17,12 +17,10 @@
test_suites: ["general-tests"],
data_device_bins_first: [
"open_then_run",
- "fsverity",
],
per_testcase_directory: true,
data: [
":authfs_test_files",
- ":CtsApkVerityTestPrebuiltFiles",
":MicrodroidTestApp",
],
required: ["MicrodroidTestPreparer"],
diff --git a/authfs/tests/benchmarks/AndroidTest.xml b/authfs/tests/benchmarks/AndroidTest.xml
index 9216006..715f352 100644
--- a/authfs/tests/benchmarks/AndroidTest.xml
+++ b/authfs/tests/benchmarks/AndroidTest.xml
@@ -34,32 +34,16 @@
<!-- Test executable -->
<option name="push-file" key="open_then_run" value="/data/local/tmp/open_then_run" />
- <option name="push-file" key="fsverity" value="/data/local/tmp/fsverity" />
<!-- Test data files -->
<option name="push-file" key="cert.der" value="/data/local/tmp/authfs/cert.der" />
<option name="push-file" key="input.4m" value="/data/local/tmp/authfs/input.4m" />
<option name="push-file" key="input.4m.fsv_meta"
value="/data/local/tmp/authfs/input.4m.fsv_meta" />
-
- <!-- Just pick a file with signature that can be trused on the device. -->
- <option name="push-file" key="CtsApkVerityTestAppPrebuilt.apk"
- value="/data/local/tmp/authfs/input.apk" />
- <option name="push-file" key="CtsApkVerityTestAppPrebuilt.apk.fsv_sig"
- value="/data/local/tmp/authfs/input.apk.fsv_sig" />
</target_preparer>
<target_preparer class="com.android.microdroid.test.preparer.DisableMicrodroidDebugPolicyPreparer" />
- <target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
- <option name="throw-if-cmd-fail" value="true" />
- <!-- Now that the files are pushed to the device, enable fs-verity for the targeting file.
- It works because the signature is trusted on all CTS compatible devices. -->
- <option name="run-command"
- value="cd /data/local/tmp/authfs;
- ../fsverity enable input.apk --signature=input.apk.fsv_sig" />
- </target_preparer>
-
<test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
<option name="jar" value="AuthFsBenchmarks.jar" />
</test>
diff --git a/authfs/tests/hosttests/Android.bp b/authfs/tests/hosttests/Android.bp
index 4b8151d..83ef853 100644
--- a/authfs/tests/hosttests/Android.bp
+++ b/authfs/tests/hosttests/Android.bp
@@ -20,7 +20,6 @@
per_testcase_directory: true,
data: [
":authfs_test_files",
- ":CtsApkVerityTestPrebuiltFiles",
":MicrodroidTestApp",
],
}
diff --git a/authfs/tests/hosttests/AndroidTest.xml b/authfs/tests/hosttests/AndroidTest.xml
index 2ccc45f..5920630 100644
--- a/authfs/tests/hosttests/AndroidTest.xml
+++ b/authfs/tests/hosttests/AndroidTest.xml
@@ -50,18 +50,13 @@
<option name="push-file" key="input.4m.fsv_meta.bad_merkle"
value="/data/local/tmp/authfs/input.4m.fsv_meta.bad_merkle" />
- <!-- Just pick a file with signature that can be trused on the device. -->
- <option name="push-file" key="CtsApkVerityTestAppPrebuilt.apk"
- value="/data/local/tmp/authfs/input.apk" />
- <option name="push-file" key="CtsApkVerityTestAppPrebuilt.apk.fsv_sig"
- value="/data/local/tmp/authfs/input.apk.fsv_sig" />
+ <option name="push-file" key="input.4m" value="/data/local/tmp/authfs/input.file" />
</target_preparer>
<target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
<option name="throw-if-cmd-fail" value="true" />
- <!-- Now that the files are pushed to the device, enable fs-verity for the targeting file.
- It works because the signature is trusted on all CTS compatible devices. -->
- <option name="run-command" value="cd /data/local/tmp/authfs; ../fsverity enable input.apk --signature=input.apk.fsv_sig" />
+ <!-- Now that the files are pushed to the device, enable fs-verity for the targeting file. -->
+ <option name="run-command" value="cd /data/local/tmp/authfs; ../fsverity enable input.file" />
</target_preparer>
<test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
diff --git a/authfs/tests/hosttests/java/src/com/android/fs/AuthFsHostTest.java b/authfs/tests/hosttests/java/src/com/android/fs/AuthFsHostTest.java
index 440f5ca..d0a7c66 100644
--- a/authfs/tests/hosttests/java/src/com/android/fs/AuthFsHostTest.java
+++ b/authfs/tests/hosttests/java/src/com/android/fs/AuthFsHostTest.java
@@ -145,17 +145,17 @@
@Test
public void testReadWithFsverityVerification_FdServerUsesRealFsverityData() throws Exception {
- // Setup (fs-verity is enabled for input.apk in AndroidTest.xml)
- runFdServerOnAndroid("--open-ro 3:input.apk", "--ro-fds 3");
- String expectedDigest = sAndroid.run(
- FSVERITY_BIN + " digest --compact " + TEST_DIR + "/input.apk");
+ // Setup (fs-verity is enabled for input.file in AndroidTest.xml)
+ runFdServerOnAndroid("--open-ro 3:input.file", "--ro-fds 3");
+ String expectedDigest =
+ sAndroid.run(FSVERITY_BIN + " digest --compact " + TEST_DIR + "/input.file");
runAuthFsOnMicrodroid("--remote-ro-file 3:sha256-" + expectedDigest);
// Action
String actualHash = computeFileHash(sMicrodroid, MOUNT_DIR + "/3");
// Verify
- String expectedHash = computeFileHash(sAndroid, TEST_DIR + "/input.apk");
+ String expectedHash = computeFileHash(sAndroid, TEST_DIR + "/input.file");
assertEquals("Inconsistent hash from /authfs/3: ", expectedHash, actualHash);
}
diff --git a/compos/Android.bp b/compos/Android.bp
index c120b0f..2f6be98 100644
--- a/compos/Android.bp
+++ b/compos/Android.bp
@@ -18,7 +18,7 @@
"libminijail_rust",
"libnix",
"libodsign_proto_rust",
- "libprotobuf_deprecated",
+ "libprotobuf",
"libregex",
"librpcbinder_rs",
"librustutils",
diff --git a/compos/common/binder.rs b/compos/common/binder.rs
index d3550f7..aea0072 100644
--- a/compos/common/binder.rs
+++ b/compos/common/binder.rs
@@ -16,7 +16,7 @@
//! Helper for converting Error types to what Binder expects
-use binder::{Result as BinderResult, Status};
+use binder::{IntoBinderResult, Result as BinderResult};
use log::warn;
use std::fmt::Debug;
@@ -24,9 +24,9 @@
/// preserving the content as far as possible.
/// Also log the error if there is one.
pub fn to_binder_result<T, E: Debug>(result: Result<T, E>) -> BinderResult<T> {
- result.map_err(|e| {
+ result.or_service_specific_exception_with(-1, |e| {
let message = format!("{:?}", e);
- warn!("Returning binder error: {}", &message);
- Status::new_service_specific_error_str(-1, Some(message))
+ warn!("Returning binder error: {message}");
+ message
})
}
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index b03addf..a8a176a 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -24,7 +24,10 @@
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
CpuTopology::CpuTopology,
IVirtualizationService::IVirtualizationService,
- VirtualMachineAppConfig::{DebugLevel::DebugLevel, Payload::Payload, VirtualMachineAppConfig},
+ VirtualMachineAppConfig::{
+ CustomConfig::CustomConfig, DebugLevel::DebugLevel, Payload::Payload,
+ VirtualMachineAppConfig,
+ },
VirtualMachineConfig::VirtualMachineConfig,
};
use anyhow::{anyhow, bail, Context, Result};
@@ -128,16 +131,24 @@
protectedVm: protected_vm,
memoryMib: parameters.memory_mib.unwrap_or(0), // 0 means use the default
cpuTopology: cpu_topology,
- taskProfiles: parameters.task_profiles.clone(),
- gdbPort: 0, // Don't start gdb-server
- customKernelImage: None,
+ customConfig: Some(CustomConfig {
+ taskProfiles: parameters.task_profiles.clone(),
+ ..Default::default()
+ }),
});
// Let logs go to logcat.
let (console_fd, log_fd) = (None, None);
let callback = Box::new(Callback {});
- let instance = VmInstance::create(service, &config, console_fd, log_fd, Some(callback))
- .context("Failed to create VM")?;
+ let instance = VmInstance::create(
+ service,
+ &config,
+ console_fd,
+ /*console_in_fd */ None,
+ log_fd,
+ Some(callback),
+ )
+ .context("Failed to create VM")?;
instance.start()?;
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index f66de32..b0294dd 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -22,7 +22,7 @@
"liblibc",
"liblog_rust",
"libodsign_proto_rust",
- "libprotobuf_deprecated",
+ "libprotobuf",
"librustutils",
"libshared_child",
"libvmclient",
diff --git a/compos/composd/src/fd_server_helper.rs b/compos/composd/src/fd_server_helper.rs
index 777ec27..24371b5 100644
--- a/compos/composd/src/fd_server_helper.rs
+++ b/compos/composd/src/fd_server_helper.rs
@@ -107,8 +107,9 @@
fn create_pipe() -> Result<(File, File)> {
let (raw_read, raw_write) = pipe2(OFlag::O_CLOEXEC)?;
- // SAFETY: We are the sole owners of these fds as they were just created.
+ // SAFETY: We are the sole owner of raw_read and it is valid as it was just created.
let read_fd = unsafe { File::from_raw_fd(raw_read) };
+ // SAFETY: We are the sole owner of raw_write and it is valid as it was just created.
let write_fd = unsafe { File::from_raw_fd(raw_write) };
Ok((read_fd, write_fd))
}
diff --git a/compos/src/artifact_signer.rs b/compos/src/artifact_signer.rs
index d3843fc..76da00a 100644
--- a/compos/src/artifact_signer.rs
+++ b/compos/src/artifact_signer.rs
@@ -63,7 +63,7 @@
/// with accompanying sigature file.
pub fn write_info_and_signature(self, info_path: &Path) -> Result<()> {
let mut info = OdsignInfo::new();
- info.mut_file_hashes().extend(self.file_digests.into_iter());
+ info.file_hashes.extend(self.file_digests);
let bytes = info.write_to_bytes()?;
let signature = compos_key::sign(&bytes)?;
diff --git a/compos/src/compsvc.rs b/compos/src/compsvc.rs
index 8febd52..fe83ba2 100644
--- a/compos/src/compsvc.rs
+++ b/compos/src/compsvc.rs
@@ -33,7 +33,9 @@
use authfs_aidl_interface::aidl::com::android::virt::fs::IAuthFsService::{
IAuthFsService, AUTHFS_SERVICE_SOCKET_NAME,
};
-use binder::{BinderFeatures, ExceptionCode, Interface, Result as BinderResult, Status, Strong};
+use binder::{
+ BinderFeatures, ExceptionCode, Interface, IntoBinderResult, Result as BinderResult, Strong,
+};
use compos_aidl_interface::aidl::com::android::compos::ICompOsService::{
BnCompOsService, ICompOsService, OdrefreshArgs::OdrefreshArgs,
};
@@ -66,29 +68,23 @@
fn initializeSystemProperties(&self, names: &[String], values: &[String]) -> BinderResult<()> {
let mut initialized = self.initialized.write().unwrap();
if initialized.is_some() {
- return Err(Status::new_exception_str(
- ExceptionCode::ILLEGAL_STATE,
- Some(format!("Already initialized: {:?}", initialized)),
- ));
+ return Err(format!("Already initialized: {initialized:?}"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE);
}
*initialized = Some(false);
if names.len() != values.len() {
- return Err(Status::new_exception_str(
- ExceptionCode::ILLEGAL_ARGUMENT,
- Some(format!(
- "Received inconsistent number of keys ({}) and values ({})",
- names.len(),
- values.len()
- )),
- ));
+ return Err(format!(
+ "Received inconsistent number of keys ({}) and values ({})",
+ names.len(),
+ values.len()
+ ))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
}
for (name, value) in zip(names, values) {
if !is_system_property_interesting(name) {
- return Err(Status::new_exception_str(
- ExceptionCode::ILLEGAL_ARGUMENT,
- Some(format!("Received invalid system property {}", &name)),
- ));
+ return Err(format!("Received invalid system property {name}"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
}
let result = system_properties::write(name, value);
if result.is_err() {
@@ -103,10 +99,8 @@
fn odrefresh(&self, args: &OdrefreshArgs) -> BinderResult<i8> {
let initialized = *self.initialized.read().unwrap();
if !initialized.unwrap_or(false) {
- return Err(Status::new_exception_str(
- ExceptionCode::ILLEGAL_STATE,
- Some("Service has not been initialized"),
- ));
+ return Err("Service has not been initialized")
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE);
}
to_binder_result(self.do_odrefresh(args))
diff --git a/compos/src/compsvc_main.rs b/compos/src/compsvc_main.rs
index 77e2daa..b0fc323 100644
--- a/compos/src/compsvc_main.rs
+++ b/compos/src/compsvc_main.rs
@@ -50,11 +50,11 @@
debug!("compsvc is starting as a rpc service.");
let param = ptr::null_mut();
let mut service = compsvc::new_binder()?.as_binder();
+ let service = service.as_native_mut() as *mut AIBinder;
+ // SAFETY: We hold a strong pointer, so the raw pointer remains valid. The bindgen AIBinder
+ // is the same type as sys::AIBinder. It is safe for on_ready to be invoked at any time, with
+ // any parameter.
unsafe {
- // SAFETY: We hold a strong pointer, so the raw pointer remains valid. The bindgen AIBinder
- // is the same type as sys::AIBinder.
- let service = service.as_native_mut() as *mut AIBinder;
- // SAFETY: It is safe for on_ready to be invoked at any time, with any parameter.
AVmPayload_runVsockRpcServer(service, COMPOS_VSOCK_PORT, Some(on_ready), param);
}
Ok(())
diff --git a/compos/tests/AndroidTest.xml b/compos/tests/AndroidTest.xml
index 4b414f1..e35b874 100644
--- a/compos/tests/AndroidTest.xml
+++ b/compos/tests/AndroidTest.xml
@@ -15,6 +15,7 @@
-->
<configuration description="Tests for CompOS">
<option name="config-descriptor:metadata" key="mainline-param" value="com.google.android.art.apex" />
+ <option name="config-descriptor:metadata" key="mainline-param" value="com.android.art.apex" />
<!-- Only run tests if the device under test is SDK version 33 (Android 13) or above. -->
<object type="module_controller"
diff --git a/compos/tests/java/android/compos/test/ComposTestCase.java b/compos/tests/java/android/compos/test/ComposTestCase.java
index 1cebd1a..4851321 100644
--- a/compos/tests/java/android/compos/test/ComposTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposTestCase.java
@@ -23,6 +23,8 @@
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.Truth.assertWithMessage;
+import static org.junit.Assume.assumeFalse;
+
import android.platform.test.annotations.RootPermissionTest;
import com.android.microdroid.test.host.CommandRunner;
@@ -81,6 +83,8 @@
@Before
public void setUp() throws Exception {
assumeDeviceIsCapable(getDevice());
+ // Test takes too long to run on Cuttlefish (b/292824951).
+ assumeFalse("Skipping test on Cuttlefish", isCuttlefish());
String value = getDevice().getProperty(SYSTEM_SERVER_COMPILER_FILTER_PROP_NAME);
if (value == null) {
@@ -188,7 +192,7 @@
.runTimedCmd(
10000,
validator.getAbsolutePath(),
- "verify-dice-chain",
+ "dice-chain",
bcc_file.getAbsolutePath());
assertWithMessage("hwtrust failed").about(command_results()).that(result).isSuccess();
}
diff --git a/demo_native/main.cpp b/demo_native/main.cpp
index fa87549..bc42036 100644
--- a/demo_native/main.cpp
+++ b/demo_native/main.cpp
@@ -223,10 +223,11 @@
std::shared_ptr<IVirtualMachine> vm;
VirtualMachineConfig config = std::move(app_config);
- ScopedFileDescriptor console_fd(fcntl(fileno(stdout), F_DUPFD_CLOEXEC));
+ ScopedFileDescriptor console_out_fd(fcntl(fileno(stdout), F_DUPFD_CLOEXEC));
+ ScopedFileDescriptor console_in_fd(fcntl(fileno(stdin), F_DUPFD_CLOEXEC));
ScopedFileDescriptor log_fd(fcntl(fileno(stdout), F_DUPFD_CLOEXEC));
- ScopedAStatus ret = service.createVm(config, console_fd, log_fd, &vm);
+ ScopedAStatus ret = service.createVm(config, console_out_fd, console_in_fd, log_fd, &vm);
if (!ret.isOk()) {
return Error() << "Failed to create VM";
}
diff --git a/docs/custom_vm.md b/docs/custom_vm.md
new file mode 100644
index 0000000..270ea36
--- /dev/null
+++ b/docs/custom_vm.md
@@ -0,0 +1,23 @@
+# Custom VM
+
+You can spawn your own custom VMs by passing a JSON config file to the
+VirtualizationService via the `vm` tool on a rooted AVF-enabled device. If your
+device is attached over ADB, you can run:
+
+```shell
+cat > vm_config.json <<EOF
+{
+ "kernel": "/data/local/tmp/kernel",
+ "initrd": "/data/local/tmp/ramdisk",
+ "params": "rdinit=/bin/init"
+}
+EOF
+adb root
+adb push <kernel> /data/local/tmp/kernel
+adb push <ramdisk> /data/local/tmp/ramdisk
+adb push vm_config.json /data/local/tmp/vm_config.json
+adb shell "/apex/com.android.virt/bin/vm run /data/local/tmp/vm_config.json"
+```
+
+The `vm` command also has other subcommands for debugging; run
+`/apex/com.android.virt/bin/vm help` for details.
diff --git a/docs/debug/README.md b/docs/debug/README.md
new file mode 100644
index 0000000..1e5f096
--- /dev/null
+++ b/docs/debug/README.md
@@ -0,0 +1,121 @@
+# Debugging protected VMs
+
+AVF is largely about protected VMs. This in turn means that anything that is
+happening inside the VM cannot be observed from outside of the VM. But as a
+developer, you need to be able to look into it when there’s an error in your
+VM. To satisfy such contradictory needs, AVF allows you to start a protected VM
+in a debuggable mode and provides a bunch of debugging mechanisms you can use
+to better understand the behavior of the VM and diagnose issues.
+
+Note: running a protected VM in a debuggable mode introduces many loopholes
+which can be used to nullify the protection provided by the hypervisor.
+Therefore, the debugable mode should never be used in production.
+
+## Enable debugging
+
+The following sections describe the two ways debugging can be enabled.
+
+### Debug level
+
+Debug level is a per-VM property which indicates how debuggable the VM is.
+There currently are two levels defined: NONE and FULL. NONE means that the VM
+is not debuggable at all, and FULL means that [all the debugging
+features](#debugging-features) are supported.
+
+Debug level is by default NONE. You can set it to FULL either via a Java API
+call in your app or via a command line argument `--debug` as follows:
+
+```java
+VirtualMachineConfig.Builder.setDebugLevel(DEBUG_LEVEL_FULL);
+```
+
+or
+
+```shell
+adb shell /apex/com.android.virt/bin/vm run-microdroid --debug full
+```
+
+or
+
+```shell
+m vm_shell
+vm_shell start-microdroid --auto-connect -- --protected --debug full
+```
+
+Note: `--debug full` is the default option when omitted. You need to explicitly
+use `--debug none` to set the debug level to NONE.
+
+### Debug policy
+
+Debug policy is a per-device property which forcibly enables selected debugging
+features, even for the VMs with debug level NONE.
+
+The main purpose of debug policy is in-field debugging by the platform
+developers (device makers, SoC vendors, etc.) To understand it, let’s imagine
+that you have an application of pVM. It’s configured as debug level NONE
+because you finished the development and the team-level testing. However, you
+get a bug report from your QA team or from beta testers. To fix the bug, you
+should be able to look into the pVM but you do not want to change the source
+code to make the VM debuggable and rebuild the entire software, because that
+may hurt the reproducibility of the bug.
+
+Note: Not every devices is guaranteed to support debug policy. It is up to the
+device manufacturer to implement this in their bootloader. Google Pixel
+devices for example support this after Pixel 7 and 7 Pro. Pixel 6 and 6 Pro
+don't support debug policy.
+
+In the Pixel phones supporting debug policy, it is provisioned by installing a
+device tree overlay like below to the Pixel-specific partition `dpm`.
+
+```
+/ {
+ fragment@avf {
+ target-path = "/";
+
+ __overlay__ {
+ avf {
+ guest {
+ common {
+ log = <1>; // Enable kernel log and logcat
+ ramdump = <1>; // Enable ramdump
+ }
+ microdroid {
+ adb = <1>; // Enable ADB connection
+ }
+ }
+ };
+ };
+ };
+}; /* end of avf */
+```
+
+To not enable a specific debugging feature, set the corresponding property
+value to other than `<1>`, or delete the property.
+
+As a reference, in Pixel phones, debug policy is loaded as below:
+
+1. Bootloader loads it from the `dpm` partition and verifies it.
+1. Bootloader appends the loaded debug policy as the [configuration
+ data](../../pvmfw/README.md#configuration-data) of the pvmfw.
+1. When a pVM is started, pvmfw [overlays][apply_debug_policy] the debug policy to the baseline
+ device tree from crosvm.
+1. OS payload (e.g. Microdroid) [reads][read_debug_policy] the device tree and enables specific
+ debugging feature accordingly.
+
+**Note**: Bootloader MUST NOT load debug policy when the bootloader is in LOCKED state.
+
+[apply_debug_policy]: https://cs.android.com/android/platform/superproject/main/+/main:packages/modules/Virtualization/pvmfw/src/fdt.rs;drc=0d52747770baa14d44c0779b5505095b4251f2e9;l=790
+[read_debug_policy]: https://cs.android.com/android/platform/superproject/main/+/main:packages/modules/Virtualization/microdroid_manager/src/main.rs;drc=65c9f1f0eee4375535f2025584646a0dbb0ea25c;l=834
+
+## Debugging features
+
+AVF currently supports the following debugging features:
+
+* ADB connection (only for Microdroid)
+* Capturing console output
+* Capturing logcat output (only for Microdroid)
+* [Capturing kernel ramdump](ramdump.md) (only for Microdroid)
+* Capturing userspace crash dump (only for Microdroid)
+* [Attaching GDB to the kernel](gdb_kernel.md)
+* [Attaching GDB to the userspace process](gdb_userspace.md) (only for Microdroid)
+* [Tracing hypervisor events](tracing.md)
diff --git a/docs/debug/gdb.md b/docs/debug/gdb_kernel.md
similarity index 100%
rename from docs/debug/gdb.md
rename to docs/debug/gdb_kernel.md
diff --git a/docs/debug/gdb_userspace.md b/docs/debug/gdb_userspace.md
new file mode 100644
index 0000000..c8af702
--- /dev/null
+++ b/docs/debug/gdb_userspace.md
@@ -0,0 +1,18 @@
+# Debugging the payload on microdroid
+
+Like a normal adb device, you can debug native processes running on a
+Microdroid-base VM using [`lldbclient.py`][lldbclient] script, either by
+running a new process, or attaching to an existing process. Use `vm_shell`
+tool above, and then run `lldbclient.py`.
+
+```sh
+adb -s localhost:8000 shell 'mount -o remount,exec /data'
+development/scripts/lldbclient.py -s localhost:8000 --chroot . --user '' \
+ (-p PID | -n NAME | -r ...)
+```
+
+**Note:** We need to pass `--chroot .` to skip verifying device, because
+microdroid doesn't match with the host's lunch target. We need to also pass
+`--user ''` as there is no `su` binary in microdroid.
+
+[lldbclient]: https://android.googlesource.com/platform/development/+/refs/heads/main/scripts/lldbclient.py
diff --git a/docs/getting_started.md b/docs/getting_started.md
new file mode 100644
index 0000000..74f2012
--- /dev/null
+++ b/docs/getting_started.md
@@ -0,0 +1,156 @@
+# Getting started with Android Virtualization Framework
+
+## Step 1: Prepare a device
+
+We support the following devices:
+
+* aosp\_panther (Pixel 7)
+* aosp\_cheetah (Pixel 7 Pro)
+* aosp\_oriole (Pixel 6)
+* aosp\_raven (Pixel 6 Pro)
+* aosp\_felix (Pixel Fold)
+* aosp\_tangopro (Pixel Tablet)
+* aosp\_cf\_x86\_64\_phone (Cuttlefish a.k.a. Cloud Android). Follow [this
+ instruction](https://source.android.com/docs/setup/create/cuttlefish-use) to
+ use.
+
+### Note on Pixel 6 and 6 Pro
+AVF is shipped in Pixel 6 and 6 Pro, but isn't enabled by default. To enable
+it, follow the instructions below:
+
+1. If the device is running Android 13 or earlier, upgrade to Android 14.
+
+1. Once upgraded to Android 14, execute the following command to enable pKVM.
+ ```shell
+ adb reboot bootloader
+ fastboot flashing unlock
+ fastboot oem pkvm enable
+ fastboot reboot
+ ```
+### Note on Cuttlefish
+Cuttlefish does not support protected VMs. Only non-protected VMs are
+supported.
+
+## Step 2: Build Android image
+
+This step is optional unless you want to build AVF by yourself or try the
+in-development version of AVF.
+
+AVF is implemented as an APEX named `com.android.virt`. However, in order for
+you to install it to your device (be it Pixel or Cuttlefish), you first need to
+re-build the entire Android from AOSP. This is because the official Android
+build you have in your device is release-key signed and therefore you can't
+install your custom-built AVF APEX to it - because it is test-key signed.
+
+### Pixel
+
+1. [Download](https://source.android.com/docs/setup/download/downloading)
+ source code from AOSP. Use the `main` branch.
+
+1. [Download](https://developers.google.com/android/blobs-preview) the preview
+ vendor blob that matches your device.
+
+1. [Build](https://source.android.com/docs/setup/build/building) the `aosp_`
+ variant of your device. For example, if your device is Pixel 7 (`panther`),
+ build `aosp_panther`.
+
+1. [Flash](https://source.android.com/docs/setup/build/running) the built
+ images to the device.
+
+
+### Cuttlefish
+
+1. [Download](https://source.android.com/docs/setup/download/downloading)
+ source code from AOSP. Use the `main` branch.
+
+1. Build Cuttlefish:
+ ```shell
+ source build/envsetup.sh
+ lunch aosp_cf_x86_64_phone-userdebug
+ m
+ ```
+
+1. Run Cuttlefish:
+ ```shell
+ cvd start
+ ```
+
+## Step 3: Build AVF
+
+Then you can repeat building and installing AVF to the device as follows:
+
+1. Build the AVF APEX.
+ ```sh
+ banchan com.android.virt aosp_arm64
+ UNBUNDLED_BUILD_SDKS_FROM_SOURCE=true m apps_only dist
+ ```
+ Replace `aosp_arm64` with `aosp_x86_64` if you are building for Cuttlefish.
+
+1. Install the AVF APEX to the device.
+ ```sh
+ adb install out/dist/com.android.virt.apex
+ adb reboot; adb wait-for-device
+ ```
+
+## Step 4: Run a Microdroid VM
+
+[Microdroid](../../microdroid/README.md) is a lightweight version of Android
+that is intended to run on pVM. You can run a Microdroid-based VM with an empty
+payload using the following command:
+
+```shell
+packages/modules/Virtualization/vm/vm_shell.sh start-microdroid --auto-connect -- --protected
+```
+
+You will see the log messages like the below.
+
+```
+found path /apex/com.android.virt/app/EmptyPayloadAppGoogle@MASTER/EmptyPayloadAppGoogle.apk
+creating work dir /data/local/tmp/microdroid/7CI6QtktSluD3OZgv
+apk.idsig path: /data/local/tmp/microdroid/7CI6QtktSluD3OZgv/apk.idsig
+instance.img path: /data/local/tmp/microdroid/7CI6QtktSluD3OZgv/instance.img
+Created VM from "/apex/com.android.virt/app/EmptyPayloadAppGoogle@MASTER/EmptyPayloadAppGoogle.apk"!PayloadConfig(VirtualMachinePayloadConfig { payloadBinaryName: "MicrodroidEmptyPayloadJniLib.so" }) with CID 2052, state is STARTING.
+[2023-07-07T14:50:43.420766770+09:00 INFO crosvm] crosvm started.
+[2023-07-07T14:50:43.422545090+09:00 INFO crosvm] CLI arguments parsed.
+[2023-07-07T14:50:43.440984015+09:00 INFO crosvm::crosvm::sys::unix::device_helpers] Trying to attach block device: /proc/self/fd/49
+[2023-07-07T14:50:43.441730922+09:00 INFO crosvm::crosvm::sys::unix::device_helpers] Trying to attach block device: /proc/self/fd/54
+[2023-07-07T14:50:43.462055141+09:00 INFO crosvm::crosvm::sys::unix::device_helpers] Trying to attach block device: /proc/self/fd/63
+[WARN] Config entry DebugPolicy uses non-zero offset with zero size
+[WARN] Config entry DebugPolicy uses non-zero offset with zero size
+[INFO] pVM firmware
+avb_slot_verify.c:443: ERROR: initrd_normal: Hash of data does not match digest in descriptor.
+[INFO] device features: SEG_MAX | RO | BLK_SIZE | RING_EVENT_IDX | VERSION_1 | ACCESS_PLATFORM
+[INFO] config: 0x201a000
+[INFO] found a block device of size 50816KB
+[INFO] device features: SEG_MAX | BLK_SIZE | FLUSH | DISCARD | WRITE_ZEROES | RING_EVENT_IDX | VERSION_1 | ACCESS_PLATFORM
+[INFO] config: 0x2022000
+[INFO] found a block device of size 10304KB
+[INFO] No debug policy found.
+[INFO] Starting payload...
+<omitted>
+07-07 05:52:01.322 69 69 I vm_payload: vm_payload: Notified host payload ready successfully
+07-07 05:52:01.364 70 70 I adbd : persist.adb.watchdog set to ''
+07-07 05:52:01.364 70 70 I adbd : persist.sys.test_harness set to ''
+07-07 05:52:01.365 70 70 I adbd : adb watchdog timeout set to 600 seconds
+07-07 05:52:01.366 70 70 I adbd : Setup mdns on port= 5555
+07-07 05:52:01.366 70 70 I adbd : adbd listening on vsock:5555
+07-07 05:52:01.366 70 70 I adbd : adbd started
+#
+```
+
+The `--auto-connect` option provides you an adb-shell connection to the VM. The
+shell promot (`#`) at the end of the log is for that.
+
+## Step 5: Run tests
+
+There are various tests that spawn guest VMs and check different aspects of the
+architecture. They all can run via `atest`.
+
+```shell
+atest MicrodroidHostTestCases
+atest MicrodroidTestApp
+```
+
+If you run into problems, inspect the logs produced by `atest`. Their location
+is printed at the end. The `host_log_*.zip` file should contain the output of
+individual commands as well as VM logs.
diff --git a/docs/getting_started/index.md b/docs/getting_started/index.md
deleted file mode 100644
index 0e4f2be..0000000
--- a/docs/getting_started/index.md
+++ /dev/null
@@ -1,123 +0,0 @@
-# Getting started with Protected Virtual Machines
-
-## Prepare a device
-
-First you will need a device that is capable of running virtual machines. On arm64, this means a
-device which boots the kernel in EL2 and the kernel was built with KVM enabled. Unfortunately at the
-moment, we don't have an arm64 device in AOSP which does that. Instead, use cuttlefish which
-provides the same functionalities except that the virtual machines are not protected from the host
-(i.e. Android). This however should be enough for functional testing.
-
-We support the following device:
-
-* aosp_cf_x86_64_phone (Cuttlefish a.k.a. Cloud Android)
-* oriole/raven (Pixel 6, and 6 Pro)
-* panther/cheetah (Pixel 7, and 7 Pro)
-
-### Cuttlefish
-
-Building Cuttlefish
-
-```shell
-source build/envsetup.sh
-lunch aosp_cf_x86_64_phone-userdebug
-m
-```
-
-Run Cuttlefish locally by
-
-```shell
-acloud create --local-instance --local-image
-```
-
-### Google Pixel phones
-
-If the device is running Android 13 or earlier, join the [Android Beta
-Program](https://developer.android.com/about/versions/14/get#on_pixel) to upgrade to Android 14
-Beta.
-
-Once upgraded to Android 14, and if you are using Pixel 6 or 6 Pro, execute the following command to
-enable pKVM. You don't need to do this for Pixel 7 and 7 Pro.
-
-```shell
-adb reboot bootloader
-fastboot flashing unlock
-fastboot oem pkvm enable
-fastboot reboot
-```
-
-## Running demo app
-
-The instruction is [here](../../demo/README.md).
-
-## Running tests
-
-There are various tests that spawn guest VMs and check different aspects of the architecture. They
-all can run via `atest`.
-
-```shell
-atest MicrodroidHostTestCases
-atest MicrodroidTestApp
-```
-
-If you run into problems, inspect the logs produced by `atest`. Their location is printed at the
-end. The `host_log_*.zip` file should contain the output of individual commands as well as VM logs.
-
-## Spawning your own VMs with custom kernel
-
-You can spawn your own VMs by passing a JSON config file to the VirtualizationService via the `vm`
-tool on a rooted KVM-enabled device. If your device is attached over ADB, you can run:
-
-```shell
-cat > vm_config.json
-{
- "kernel": "/data/local/tmp/kernel",
- "initrd": "/data/local/tmp/ramdisk",
- "params": "rdinit=/bin/init"
-}
-adb root
-adb push <kernel> /data/local/tmp/kernel
-adb push <ramdisk> /data/local/tmp/ramdisk
-adb push vm_config.json /data/local/tmp/vm_config.json
-adb shell "start virtualizationservice"
-adb shell "/apex/com.android.virt/bin/vm run /data/local/tmp/vm_config.json"
-```
-
-The `vm` command also has other subcommands for debugging; run `/apex/com.android.virt/bin/vm help`
-for details.
-
-## Spawning your own VMs with custom pvmfw
-
-Set system property `hypervisor.pvmfw.path` to custom `pvmfw` on the device before using `vm` tool.
-`virtualizationservice` will pass the specified `pvmfw` to `crosvm` for protected VMs.
-
-```shell
-adb push pvmfw.img /data/local/tmp/pvmfw.img
-adb root # required for setprop
-adb shell setprop hypervisor.pvmfw.path /data/local/tmp/pvmfw.img
-```
-
-## Spawning your own VMs with Microdroid
-
-[Microdroid](../../microdroid/README.md) is a lightweight version of Android that is intended to run
-on pVM. You can run a Microdroid with empty payload using the following command:
-
-```shell
-adb shell /apex/com.android.virt/bin/vm run-microdroid --debug full
-```
-
-## Building and updating CrosVM and VirtualizationService {#building-and-updating}
-
-You can update CrosVM and the VirtualizationService by updating the `com.android.virt` APEX instead
-of rebuilding the entire image.
-
-```shell
-banchan com.android.virt aosp_arm64 // or aosp_x86_64 if the device is cuttlefish
-UNBUNDLED_BUILD_SDKS_FROM_SOURCE=true m apps_only dist
-adb install out/dist/com.android.virt.apex
-adb reboot
-```
-
-## Building and updating kernel inside Microdroid
-
-The instruction is [here](../../microdroid/kernel/README.md).
diff --git a/encryptedstore/Android.bp b/encryptedstore/Android.bp
index 94ebcfc..8ba5016 100644
--- a/encryptedstore/Android.bp
+++ b/encryptedstore/Android.bp
@@ -14,6 +14,7 @@
"libclap",
"libhex",
"liblog_rust",
+ "libmicrodroid_uids",
"libnix",
"libdm_rust",
],
diff --git a/encryptedstore/src/main.rs b/encryptedstore/src/main.rs
index 86fa6da..2a698ea 100644
--- a/encryptedstore/src/main.rs
+++ b/encryptedstore/src/main.rs
@@ -21,24 +21,32 @@
use anyhow::{ensure, Context, Result};
use clap::arg;
use dm::{crypt::CipherType, util};
-use log::info;
+use log::{error, info};
use std::ffi::CString;
use std::fs::{create_dir_all, OpenOptions};
use std::io::{Error, Read, Write};
use std::os::unix::ffi::OsStrExt;
-use std::os::unix::fs::FileTypeExt;
+use std::os::unix::fs::{FileTypeExt, PermissionsExt};
use std::path::{Path, PathBuf};
use std::process::Command;
const MK2FS_BIN: &str = "/system/bin/mke2fs";
const UNFORMATTED_STORAGE_MAGIC: &str = "UNFORMATTED-STORAGE";
-fn main() -> Result<()> {
+fn main() {
android_logger::init_once(
android_logger::Config::default()
.with_tag("encryptedstore")
.with_min_level(log::Level::Info),
);
+
+ if let Err(e) = try_main() {
+ error!("{:?}", e);
+ std::process::exit(1)
+ }
+}
+
+fn try_main() -> Result<()> {
info!("Starting encryptedstore binary");
let matches = clap_command().get_matches();
@@ -47,10 +55,12 @@
let key = matches.get_one::<String>("key").unwrap();
let mountpoint = Path::new(matches.get_one::<String>("mountpoint").unwrap());
// Note this error context is used in MicrodroidTests.
- encryptedstore_init(blkdevice, key, mountpoint).context(format!(
- "Unable to initialize encryptedstore on {:?} & mount at {:?}",
- blkdevice, mountpoint
- ))?;
+ encryptedstore_init(blkdevice, key, mountpoint).with_context(|| {
+ format!(
+ "Unable to initialize encryptedstore on {:?} & mount at {:?}",
+ blkdevice, mountpoint
+ )
+ })?;
Ok(())
}
@@ -65,7 +75,7 @@
fn encryptedstore_init(blkdevice: &Path, key: &str, mountpoint: &Path) -> Result<()> {
ensure!(
std::fs::metadata(blkdevice)
- .context(format!("Failed to get metadata of {:?}", blkdevice))?
+ .with_context(|| format!("Failed to get metadata of {:?}", blkdevice))?
.file_type()
.is_block_device(),
"The path:{:?} is not of a block device",
@@ -82,7 +92,12 @@
info!("Freshly formatting the crypt device");
format_ext4(&crypt_device)?;
}
- mount(&crypt_device, mountpoint).context(format!("Unable to mount {:?}", crypt_device))?;
+ mount(&crypt_device, mountpoint)
+ .with_context(|| format!("Unable to mount {:?}", crypt_device))?;
+ if needs_formatting {
+ std::fs::set_permissions(mountpoint, PermissionsExt::from_mode(0o770))
+ .context("Failed to chmod root directory")?;
+ }
Ok(())
}
@@ -124,6 +139,11 @@
}
fn format_ext4(device: &Path) -> Result<()> {
+ let root_dir_uid_gid = format!(
+ "root_owner={}:{}",
+ microdroid_uids::ROOT_UID,
+ microdroid_uids::MICRODROID_PAYLOAD_GID
+ );
let mkfs_options = [
"-j", // Create appropriate sized journal
/* metadata_csum: enabled for filesystem integrity
@@ -131,20 +151,22 @@
* 64bit: larger fields afforded by this feature enable full-strength checksumming.
*/
"-O metadata_csum, extents, 64bit",
- "-b 4096", // block size in the filesystem
+ "-b 4096", // block size in the filesystem,
+ "-E",
+ &root_dir_uid_gid,
];
let mut cmd = Command::new(MK2FS_BIN);
let status = cmd
.args(mkfs_options)
.arg(device)
.status()
- .context(format!("failed to execute {}", MK2FS_BIN))?;
+ .with_context(|| format!("failed to execute {}", MK2FS_BIN))?;
ensure!(status.success(), "mkfs failed with {:?}", status);
Ok(())
}
fn mount(source: &Path, mountpoint: &Path) -> Result<()> {
- create_dir_all(mountpoint).context(format!("Failed to create {:?}", &mountpoint))?;
+ create_dir_all(mountpoint).with_context(|| format!("Failed to create {:?}", &mountpoint))?;
let mount_options = CString::new(
"fscontext=u:object_r:encryptedstore_fs:s0,context=u:object_r:encryptedstore_file:s0",
)
@@ -153,6 +175,9 @@
let mountpoint = CString::new(mountpoint.as_os_str().as_bytes())?;
let fstype = CString::new("ext4").unwrap();
+ // SAFETY: The source, target and filesystemtype are valid C strings. For ext4, data is expected
+ // to be a C string as well, which it is. None of these pointers are retained after mount
+ // returns.
let ret = unsafe {
libc::mount(
source.as_ptr(),
diff --git a/javalib/api/test-current.txt b/javalib/api/test-current.txt
index 8b7ec11..cf95770 100644
--- a/javalib/api/test-current.txt
+++ b/javalib/api/test-current.txt
@@ -2,15 +2,19 @@
package android.system.virtualmachine {
public class VirtualMachine implements java.lang.AutoCloseable {
+ method @NonNull @WorkerThread public java.io.OutputStream getConsoleInput() throws android.system.virtualmachine.VirtualMachineException;
method @NonNull public java.io.File getRootDir();
}
public final class VirtualMachineConfig {
method @Nullable public String getPayloadConfigPath();
+ method public boolean isVmConsoleInputSupported();
}
public static final class VirtualMachineConfig.Builder {
method @NonNull @RequiresPermission(android.system.virtualmachine.VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION) public android.system.virtualmachine.VirtualMachineConfig.Builder setPayloadConfigPath(@NonNull String);
+ method @NonNull @RequiresPermission(android.system.virtualmachine.VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION) public android.system.virtualmachine.VirtualMachineConfig.Builder setVendorDiskImage(@NonNull java.io.File);
+ method @NonNull public android.system.virtualmachine.VirtualMachineConfig.Builder setVmConsoleInputSupported(boolean);
}
}
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachine.java b/javalib/src/android/system/virtualmachine/VirtualMachine.java
index f96effa..675a046 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachine.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachine.java
@@ -76,11 +76,13 @@
import java.io.File;
import java.io.FileInputStream;
+import java.io.FileOutputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
+import java.io.OutputStream;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.nio.channels.FileChannel;
@@ -294,6 +296,8 @@
private final boolean mVmOutputCaptured;
+ private final boolean mVmConsoleInputSupported;
+
/** The configuration that is currently associated with this VM. */
@GuardedBy("mLock")
@NonNull
@@ -306,11 +310,19 @@
@GuardedBy("mLock")
@Nullable
- private ParcelFileDescriptor mConsoleReader;
+ private ParcelFileDescriptor mConsoleOutReader;
@GuardedBy("mLock")
@Nullable
- private ParcelFileDescriptor mConsoleWriter;
+ private ParcelFileDescriptor mConsoleOutWriter;
+
+ @GuardedBy("mLock")
+ @Nullable
+ private ParcelFileDescriptor mConsoleInReader;
+
+ @GuardedBy("mLock")
+ @Nullable
+ private ParcelFileDescriptor mConsoleInWriter;
@GuardedBy("mLock")
@Nullable
@@ -372,6 +384,7 @@
: null;
mVmOutputCaptured = config.isVmOutputCaptured();
+ mVmConsoleInputSupported = config.isVmConsoleInputSupported();
}
/**
@@ -787,7 +800,11 @@
try {
if (mVmOutputCaptured) {
- createVmPipes();
+ createVmOutputPipes();
+ }
+
+ if (mVmConsoleInputSupported) {
+ createVmInputPipes();
}
VirtualMachineAppConfig appConfig =
@@ -804,7 +821,9 @@
android.system.virtualizationservice.VirtualMachineConfig.appConfig(
appConfig);
- mVirtualMachine = service.createVm(vmConfigParcel, mConsoleWriter, mLogWriter);
+ mVirtualMachine =
+ service.createVm(
+ vmConfigParcel, mConsoleOutWriter, mConsoleInReader, mLogWriter);
mVirtualMachine.registerCallback(new CallbackTranslator(service));
mContext.registerComponentCallbacks(mMemoryManagementCallbacks);
mVirtualMachine.start();
@@ -843,12 +862,12 @@
}
@GuardedBy("mLock")
- private void createVmPipes() throws VirtualMachineException {
+ private void createVmOutputPipes() throws VirtualMachineException {
try {
- if (mConsoleReader == null || mConsoleWriter == null) {
+ if (mConsoleOutReader == null || mConsoleOutWriter == null) {
ParcelFileDescriptor[] pipe = ParcelFileDescriptor.createPipe();
- mConsoleReader = pipe[0];
- mConsoleWriter = pipe[1];
+ mConsoleOutReader = pipe[0];
+ mConsoleOutWriter = pipe[1];
}
if (mLogReader == null || mLogWriter == null) {
@@ -857,7 +876,20 @@
mLogWriter = pipe[1];
}
} catch (IOException e) {
- throw new VirtualMachineException("Failed to create stream for VM", e);
+ throw new VirtualMachineException("Failed to create output stream for VM", e);
+ }
+ }
+
+ @GuardedBy("mLock")
+ private void createVmInputPipes() throws VirtualMachineException {
+ try {
+ if (mConsoleInReader == null || mConsoleInWriter == null) {
+ ParcelFileDescriptor[] pipe = ParcelFileDescriptor.createPipe();
+ mConsoleInReader = pipe[0];
+ mConsoleInWriter = pipe[1];
+ }
+ } catch (IOException e) {
+ throw new VirtualMachineException("Failed to create input stream for VM", e);
}
}
@@ -883,12 +915,37 @@
throw new VirtualMachineException("Capturing vm outputs is turned off");
}
synchronized (mLock) {
- createVmPipes();
- return new FileInputStream(mConsoleReader.getFileDescriptor());
+ createVmOutputPipes();
+ return new FileInputStream(mConsoleOutReader.getFileDescriptor());
}
}
/**
+ * Returns the stream object representing the console input to the virtual machine. The console
+ * input is only available if the {@link VirtualMachineConfig} specifies that it should be
+ * {@linkplain VirtualMachineConfig#isVmConsoleInputSupported supported}.
+ *
+ * <p>NOTE: This method may block and should not be called on the main thread.
+ *
+ * @throws VirtualMachineException if the stream could not be created, or console input is not
+ * supported.
+ * @hide
+ */
+ @TestApi
+ @WorkerThread
+ @NonNull
+ public OutputStream getConsoleInput() throws VirtualMachineException {
+ if (!mVmConsoleInputSupported) {
+ throw new VirtualMachineException("VM console input is not supported");
+ }
+ synchronized (mLock) {
+ createVmInputPipes();
+ return new FileOutputStream(mConsoleInWriter.getFileDescriptor());
+ }
+ }
+
+
+ /**
* Returns the stream object representing the log output from the virtual machine. The log
* output is only available if the VirtualMachineConfig specifies that it should be {@linkplain
* VirtualMachineConfig#isVmOutputCaptured captured}.
@@ -910,7 +967,7 @@
throw new VirtualMachineException("Capturing vm outputs is turned off");
}
synchronized (mLock) {
- createVmPipes();
+ createVmOutputPipes();
return new FileInputStream(mLogReader.getFileDescriptor());
}
}
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
index 93e65db..b307854 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
@@ -61,7 +61,8 @@
@SystemApi
public final class VirtualMachineConfig {
private static final String TAG = "VirtualMachineConfig";
- private static final String[] EMPTY_STRING_ARRAY = {};
+
+ private static String[] EMPTY_STRING_ARRAY = {};
// These define the schema of the config file persisted on disk.
private static final int VERSION = 6;
@@ -76,6 +77,8 @@
private static final String KEY_CPU_TOPOLOGY = "cpuTopology";
private static final String KEY_ENCRYPTED_STORAGE_BYTES = "encryptedStorageBytes";
private static final String KEY_VM_OUTPUT_CAPTURED = "vmOutputCaptured";
+ private static final String KEY_VM_CONSOLE_INPUT_SUPPORTED = "vmConsoleInputSupported";
+ private static final String KEY_VENDOR_DISK_IMAGE_PATH = "vendorDiskImagePath";
/** @hide */
@Retention(RetentionPolicy.SOURCE)
@@ -164,6 +167,11 @@
/** Whether the app can read console and log output. */
private final boolean mVmOutputCaptured;
+ /** Whether the app can write console input to the VM */
+ private final boolean mVmConsoleInputSupported;
+
+ @Nullable private final File mVendorDiskImage;
+
private VirtualMachineConfig(
@Nullable String packageName,
@Nullable String apkPath,
@@ -174,7 +182,9 @@
long memoryBytes,
@CpuTopology int cpuTopology,
long encryptedStorageBytes,
- boolean vmOutputCaptured) {
+ boolean vmOutputCaptured,
+ boolean vmConsoleInputSupported,
+ @Nullable File vendorDiskImage) {
// This is only called from Builder.build(); the builder handles parameter validation.
mPackageName = packageName;
mApkPath = apkPath;
@@ -186,6 +196,8 @@
mCpuTopology = cpuTopology;
mEncryptedStorageBytes = encryptedStorageBytes;
mVmOutputCaptured = vmOutputCaptured;
+ mVmConsoleInputSupported = vmConsoleInputSupported;
+ mVendorDiskImage = vendorDiskImage;
}
/** Loads a config from a file. */
@@ -260,6 +272,12 @@
builder.setEncryptedStorageBytes(encryptedStorageBytes);
}
builder.setVmOutputCaptured(b.getBoolean(KEY_VM_OUTPUT_CAPTURED));
+ builder.setVmConsoleInputSupported(b.getBoolean(KEY_VM_CONSOLE_INPUT_SUPPORTED));
+
+ String vendorDiskImagePath = b.getString(KEY_VENDOR_DISK_IMAGE_PATH);
+ if (vendorDiskImagePath != null) {
+ builder.setVendorDiskImage(new File(vendorDiskImagePath));
+ }
return builder.build();
}
@@ -295,6 +313,10 @@
b.putLong(KEY_ENCRYPTED_STORAGE_BYTES, mEncryptedStorageBytes);
}
b.putBoolean(KEY_VM_OUTPUT_CAPTURED, mVmOutputCaptured);
+ b.putBoolean(KEY_VM_CONSOLE_INPUT_SUPPORTED, mVmConsoleInputSupported);
+ if (mVendorDiskImage != null) {
+ b.putString(KEY_VENDOR_DISK_IMAGE_PATH, mVendorDiskImage.getAbsolutePath());
+ }
b.writeToStream(output);
}
@@ -413,6 +435,17 @@
}
/**
+ * Returns whether the app can write to the VM console.
+ *
+ * @see Builder#setVmConsoleInputSupported
+ * @hide
+ */
+ @TestApi
+ public boolean isVmConsoleInputSupported() {
+ return mVmConsoleInputSupported;
+ }
+
+ /**
* Tests if this config is compatible with other config. Being compatible means that the configs
* can be interchangeably used for the same virtual machine; they do not change the VM identity
* or secrets. Such changes include varying the number of CPUs or the size of the RAM. Changes
@@ -431,6 +464,7 @@
&& this.mProtectedVm == other.mProtectedVm
&& this.mEncryptedStorageBytes == other.mEncryptedStorageBytes
&& this.mVmOutputCaptured == other.mVmOutputCaptured
+ && this.mVmConsoleInputSupported == other.mVmConsoleInputSupported
&& Objects.equals(this.mPayloadConfigPath, other.mPayloadConfigPath)
&& Objects.equals(this.mPayloadBinaryName, other.mPayloadBinaryName)
&& Objects.equals(this.mPackageName, other.mPackageName)
@@ -482,8 +516,21 @@
vsConfig.cpuTopology = android.system.virtualizationservice.CpuTopology.ONE_CPU;
break;
}
- // Don't allow apps to set task profiles ... at least for now.
- vsConfig.taskProfiles = EMPTY_STRING_ARRAY;
+ if (mVendorDiskImage != null) {
+ VirtualMachineAppConfig.CustomConfig customConfig =
+ new VirtualMachineAppConfig.CustomConfig();
+ customConfig.taskProfiles = EMPTY_STRING_ARRAY;
+ customConfig.devices = EMPTY_STRING_ARRAY;
+ try {
+ customConfig.vendorImage =
+ ParcelFileDescriptor.open(mVendorDiskImage, MODE_READ_ONLY);
+ } catch (FileNotFoundException e) {
+ throw new VirtualMachineException(
+ "Failed to open vendor disk image " + mVendorDiskImage.getAbsolutePath(),
+ e);
+ }
+ vsConfig.customConfig = customConfig;
+ }
return vsConfig;
}
@@ -554,6 +601,8 @@
@CpuTopology private int mCpuTopology = CPU_TOPOLOGY_ONE_CPU;
private long mEncryptedStorageBytes;
private boolean mVmOutputCaptured = false;
+ private boolean mVmConsoleInputSupported = false;
+ @Nullable private File mVendorDiskImage;
/**
* Creates a builder for the given context.
@@ -612,6 +661,10 @@
throw new IllegalStateException("debug level must be FULL to capture output");
}
+ if (mVmConsoleInputSupported && mDebugLevel != DEBUG_LEVEL_FULL) {
+ throw new IllegalStateException("debug level must be FULL to use console input");
+ }
+
return new VirtualMachineConfig(
packageName,
apkPath,
@@ -622,7 +675,9 @@
mMemoryBytes,
mCpuTopology,
mEncryptedStorageBytes,
- mVmOutputCaptured);
+ mVmOutputCaptured,
+ mVmConsoleInputSupported,
+ mVendorDiskImage);
}
/**
@@ -822,5 +877,36 @@
mVmOutputCaptured = captured;
return this;
}
+
+ /**
+ * Sets whether to allow the app to write to the VM console. Default is {@code false}.
+ *
+ * <p>Setting this as {@code true} will allow the app to directly write into {@linkplain
+ * VirtualMachine#getConsoleInput console input}.
+ *
+ * <p>The {@linkplain #setDebugLevel debug level} must be {@link #DEBUG_LEVEL_FULL} to be
+ * set as true.
+ *
+ * @hide
+ */
+ @TestApi
+ @NonNull
+ public Builder setVmConsoleInputSupported(boolean supported) {
+ mVmConsoleInputSupported = supported;
+ return this;
+ }
+
+ /**
+ * Sets the path to the disk image with vendor-specific modules.
+ *
+ * @hide
+ */
+ @TestApi
+ @RequiresPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION)
+ @NonNull
+ public Builder setVendorDiskImage(@NonNull File vendorDiskImage) {
+ mVendorDiskImage = vendorDiskImage;
+ return this;
+ }
}
}
diff --git a/launcher/main.cpp b/launcher/main.cpp
index c3f9988..5ae9956 100644
--- a/launcher/main.cpp
+++ b/launcher/main.cpp
@@ -61,7 +61,7 @@
return EXIT_FAILURE;
}
- android::base::InitLogging(argv, &android::base::KernelLogger);
+ android::base::InitLogging(argv);
const char* libname = argv[1];
auto handle = load(libname);
diff --git a/libs/avb/Android.bp b/libs/avb/Android.bp
deleted file mode 100644
index 3a671e2..0000000
--- a/libs/avb/Android.bp
+++ /dev/null
@@ -1,56 +0,0 @@
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-rust_defaults {
- name: "libavb_bindgen.defaults",
- wrapper_src: "bindgen/avb.h",
- crate_name: "avb_bindgen",
- edition: "2021",
- visibility: ["//packages/modules/Virtualization:__subpackages__"],
- source_stem: "bindings",
- bindgen_flags: [
- "--size_t-is-usize",
- "--constified-enum-module AvbDescriptorTag",
- "--default-enum-style rust",
- "--allowlist-type=AvbDescriptorTag",
- "--allowlist-function=.*",
- "--allowlist-var=AVB.*",
- "--use-core",
- "--raw-line=#![no_std]",
- "--ctypes-prefix=core::ffi",
- ],
- cflags: ["-DBORINGSSL_NO_CXX"],
-}
-
-rust_bindgen {
- name: "libavb_bindgen",
- defaults: ["libavb_bindgen.defaults"],
- host_supported: true,
- static_libs: [
- "libavb",
- ],
- shared_libs: [
- "libcrypto",
- ],
-}
-
-rust_bindgen {
- name: "libavb_bindgen_nostd",
- defaults: ["libavb_bindgen.defaults"],
- static_libs: [
- "libavb_baremetal",
- "libcrypto_baremetal",
- ],
-}
-
-rust_test {
- name: "libavb_bindgen_test",
- srcs: [":libavb_bindgen"],
- crate_name: "avb_bindgen_test",
- edition: "2021",
- test_suites: ["general-tests"],
- auto_gen_config: true,
- clippy_lints: "none",
- lints: "none",
-}
diff --git a/libs/avb/bindgen/avb.h b/libs/avb/bindgen/avb.h
deleted file mode 100644
index b3d5385..0000000
--- a/libs/avb/bindgen/avb.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <libavb/libavb.h>
diff --git a/libs/avflog/Android.bp b/libs/avflog/Android.bp
new file mode 100644
index 0000000..1ddfc7a
--- /dev/null
+++ b/libs/avflog/Android.bp
@@ -0,0 +1,30 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+ name: "libavflog.defaults",
+ crate_name: "avflog",
+ host_supported: true,
+ srcs: ["src/lib.rs"],
+ edition: "2021",
+ rustlibs: [
+ "liblog_rust",
+ ],
+}
+
+rust_library {
+ name: "libavflog",
+ defaults: ["libavflog.defaults"],
+ apex_available: [
+ "//apex_available:platform",
+ "//apex_available:anyapex",
+ ],
+}
+
+rust_test {
+ name: "libavflog.test",
+ defaults: ["libavflog.defaults"],
+ prefer_rlib: true,
+ test_suites: ["general-tests"],
+}
diff --git a/libs/avb/TEST_MAPPING b/libs/avflog/TEST_MAPPING
similarity index 83%
rename from libs/avb/TEST_MAPPING
rename to libs/avflog/TEST_MAPPING
index 57de6b3..921c4d8 100644
--- a/libs/avb/TEST_MAPPING
+++ b/libs/avflog/TEST_MAPPING
@@ -3,7 +3,7 @@
{
"avf-presubmit" : [
{
- "name" : "libavb_bindgen_test"
+ "name" : "libavflog.test"
}
]
}
diff --git a/libs/avflog/src/lib.rs b/libs/avflog/src/lib.rs
new file mode 100644
index 0000000..27c8628
--- /dev/null
+++ b/libs/avflog/src/lib.rs
@@ -0,0 +1,71 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Provides random utilities for components in AVF
+
+use log::error;
+use std::fmt::Debug;
+
+/// Convenient trait for logging an error while returning it
+pub trait LogResult<T, E> {
+ /// If this is `Err`, the error is debug-formatted and is logged via `error!`.
+ fn with_log(self) -> Result<T, E>;
+}
+
+impl<T, E: Debug> LogResult<T, E> for Result<T, E> {
+ fn with_log(self) -> Result<T, E> {
+ self.map_err(|e| {
+ error!("{e:?}");
+ e
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use log::{LevelFilter, Log, Metadata, Record};
+ use std::cell::RefCell;
+ use std::io::{Error, ErrorKind};
+
+ struct TestLogger {
+ last_log: RefCell<String>,
+ }
+ static TEST_LOGGER: TestLogger = TestLogger { last_log: RefCell::new(String::new()) };
+
+ // SAFETY: TestLogger is used only inside the test which is single-treaded.
+ unsafe impl Sync for TestLogger {}
+
+ impl Log for TestLogger {
+ fn enabled(&self, _metadata: &Metadata) -> bool {
+ true
+ }
+ fn log(&self, record: &Record) {
+ *self.last_log.borrow_mut() = format!("{}", record.args());
+ }
+ fn flush(&self) {}
+ }
+
+ #[test]
+ fn test_logresult_emits_error_log() {
+ log::set_logger(&TEST_LOGGER).unwrap();
+ log::set_max_level(LevelFilter::Info);
+
+ let e = Error::from(ErrorKind::NotFound);
+ let res: Result<(), Error> = Err(e).with_log();
+
+ assert!(res.is_err());
+ assert_eq!(TEST_LOGGER.last_log.borrow().as_str(), "Kind(NotFound)");
+ }
+}
diff --git a/libs/capabilities/src/caps.rs b/libs/capabilities/src/caps.rs
index 1f44a34..bc17fa8 100644
--- a/libs/capabilities/src/caps.rs
+++ b/libs/capabilities/src/caps.rs
@@ -26,8 +26,8 @@
/// Removes inheritable capabilities set for this process.
/// See: https://man7.org/linux/man-pages/man7/capabilities.7.html
pub fn drop_inheritable_caps() -> Result<()> {
+ // SAFETY: we do not manipulate memory handled by libcap.
unsafe {
- // SAFETY: we do not manipulate memory handled by libcap.
let caps = cap_get_proc();
scopeguard::defer! {
cap_free(caps as *mut std::os::raw::c_void);
@@ -49,8 +49,8 @@
pub fn drop_bounding_set() -> Result<()> {
let mut cap_id: cap_value_t = 0;
while cap_id <= CAP_LAST_CAP.try_into().unwrap() {
+ // SAFETY: we do not manipulate memory handled by libcap.
unsafe {
- // SAFETY: we do not manipulate memory handled by libcap.
if cap_drop_bound(cap_id) == -1 {
let e = Errno::last();
bail!("cap_drop_bound failed for {}: {:?}", cap_id, e);
diff --git a/libs/devicemapper/Android.bp b/libs/devicemapper/Android.bp
index b7cdedc..29f2f5f 100644
--- a/libs/devicemapper/Android.bp
+++ b/libs/devicemapper/Android.bp
@@ -33,7 +33,7 @@
name: "libdm_rust.test",
defaults: [
"libdm_rust.defaults",
- "ignorabletest.defaults",
+ "rdroidtest.defaults",
],
test_suites: ["general-tests"],
rustlibs: [
diff --git a/libs/devicemapper/src/lib.rs b/libs/devicemapper/src/lib.rs
index 0170795..868ac5a 100644
--- a/libs/devicemapper/src/lib.rs
+++ b/libs/devicemapper/src/lib.rs
@@ -233,13 +233,13 @@
}
#[cfg(test)]
-ignorabletest::test_main!();
+rdroidtest::test_main!();
#[cfg(test)]
mod tests {
use super::*;
use crypt::{CipherType, DmCryptTargetBuilder};
- use ignorabletest::test;
+ use rdroidtest::test;
use rustutils::system_properties;
use std::fs::{read, File, OpenOptions};
use std::io::Write;
diff --git a/libs/fdtpci/src/lib.rs b/libs/fdtpci/src/lib.rs
index 96d98d6..602f736 100644
--- a/libs/fdtpci/src/lib.rs
+++ b/libs/fdtpci/src/lib.rs
@@ -119,7 +119,9 @@
/// method must only be called once, and there must be no other `PciRoot` constructed using the
/// same CAM.
pub unsafe fn make_pci_root(&self) -> PciRoot {
- PciRoot::new(self.cam_range.start as *mut u8, Cam::MmioCam)
+ // SAFETY: We trust that the FDT gave us a valid MMIO base address for the CAM. The caller
+ // guarantees to only call us once, so there are no other references to it.
+ unsafe { PciRoot::new(self.cam_range.start as *mut u8, Cam::MmioCam) }
}
}
diff --git a/libs/hyp/Android.bp b/libs/hyp/Android.bp
index 1bb8722..8baf9dd 100644
--- a/libs/hyp/Android.bp
+++ b/libs/hyp/Android.bp
@@ -8,7 +8,6 @@
srcs: ["src/lib.rs"],
prefer_rlib: true,
rustlibs: [
- "libbitflags",
"libonce_cell_nostd",
"libsmccc",
"libuuid_nostd",
diff --git a/libs/hyp/src/error.rs b/libs/hyp/src/error.rs
index 408150e..3fdad70 100644
--- a/libs/hyp/src/error.rs
+++ b/libs/hyp/src/error.rs
@@ -14,6 +14,7 @@
//! Error and Result types for hypervisor.
+use crate::GeniezoneError;
use crate::KvmError;
use core::{fmt, result};
use uuid::Uuid;
@@ -25,10 +26,12 @@
#[derive(Debug, Clone)]
pub enum Error {
/// MMIO guard is not supported.
- MmioGuardNotsupported,
+ MmioGuardNotSupported,
/// Failed to invoke a certain KVM HVC function.
KvmError(KvmError, u32),
- /// Unsupported Hypervisor.
+ /// Failed to invoke GenieZone HVC function.
+ GeniezoneError(GeniezoneError, u32),
+ /// Unsupported Hypervisor
UnsupportedHypervisorUuid(Uuid),
/// The MMIO_GUARD granule used by the hypervisor is not supported.
UnsupportedMmioGuardGranule(usize),
@@ -37,10 +40,16 @@
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
- Self::MmioGuardNotsupported => write!(f, "MMIO guard is not supported"),
+ Self::MmioGuardNotSupported => write!(f, "MMIO guard is not supported"),
Self::KvmError(e, function_id) => {
write!(f, "Failed to invoke the HVC function with function ID {function_id}: {e}")
}
+ Self::GeniezoneError(e, function_id) => {
+ write!(
+ f,
+ "Failed to invoke GenieZone HVC function with function ID {function_id}: {e}"
+ )
+ }
Self::UnsupportedHypervisorUuid(u) => {
write!(f, "Unsupported Hypervisor UUID {u}")
}
diff --git a/libs/hyp/src/hypervisor/common.rs b/libs/hyp/src/hypervisor/common.rs
index ec7d168..70fdd0a 100644
--- a/libs/hyp/src/hypervisor/common.rs
+++ b/libs/hyp/src/hypervisor/common.rs
@@ -14,49 +14,62 @@
//! This module regroups some common traits shared by all the hypervisors.
-use crate::error::Result;
+use crate::error::{Error, Result};
use crate::util::SIZE_4KB;
-use bitflags::bitflags;
/// Expected MMIO guard granule size, validated during MMIO guard initialization.
pub const MMIO_GUARD_GRANULE_SIZE: usize = SIZE_4KB;
-bitflags! {
- /// Capabilities that Hypervisor backends can declare support for.
- pub struct HypervisorCap: u32 {
- /// Capability for guest to share its memory with host at runtime.
- const DYNAMIC_MEM_SHARE = 0b1;
+/// Trait for the hypervisor.
+pub trait Hypervisor {
+ /// Returns the hypervisor's MMIO_GUARD implementation, if any.
+ fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+ None
+ }
+
+ /// Returns the hypervisor's dynamic memory sharing implementation, if any.
+ fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+ None
}
}
-/// Trait for the hypervisor.
-pub trait Hypervisor {
- /// Initializes the hypervisor by enrolling a MMIO guard and checking the memory granule size.
- /// By enrolling, all MMIO will be blocked unless allow-listed with `mmio_guard_map`.
- /// Protected VMs are auto-enrolled.
- fn mmio_guard_init(&self) -> Result<()>;
+pub trait MmioGuardedHypervisor {
+ /// Enrolls with the MMIO guard so that all MMIO will be blocked unless allow-listed with
+ /// `MmioGuardedHypervisor::map`.
+ fn enroll(&self) -> Result<()>;
/// Maps a page containing the given memory address to the hypervisor MMIO guard.
/// The page size corresponds to the MMIO guard granule size.
- fn mmio_guard_map(&self, addr: usize) -> Result<()>;
+ fn map(&self, addr: usize) -> Result<()>;
/// Unmaps a page containing the given memory address from the hypervisor MMIO guard.
/// The page size corresponds to the MMIO guard granule size.
- fn mmio_guard_unmap(&self, addr: usize) -> Result<()>;
+ fn unmap(&self, addr: usize) -> Result<()>;
+ /// Returns the MMIO guard granule size in bytes.
+ fn granule(&self) -> Result<usize>;
+
+ // TODO(ptosi): Fully move granule validation to client code.
+ /// Validates the MMIO guard granule size.
+ fn validate_granule(&self) -> Result<()> {
+ match self.granule()? {
+ MMIO_GUARD_GRANULE_SIZE => Ok(()),
+ granule => Err(Error::UnsupportedMmioGuardGranule(granule)),
+ }
+ }
+}
+
+pub trait MemSharingHypervisor {
/// Shares a region of memory with host, granting it read, write and execute permissions.
/// The size of the region is equal to the memory protection granule returned by
/// [`hyp_meminfo`].
- fn mem_share(&self, base_ipa: u64) -> Result<()>;
+ fn share(&self, base_ipa: u64) -> Result<()>;
/// Revokes access permission from host to a memory region previously shared with
/// [`mem_share`]. The size of the region is equal to the memory protection granule returned by
/// [`hyp_meminfo`].
- fn mem_unshare(&self, base_ipa: u64) -> Result<()>;
+ fn unshare(&self, base_ipa: u64) -> Result<()>;
/// Returns the memory protection granule size in bytes.
- fn memory_protection_granule(&self) -> Result<usize>;
-
- /// Check if required capabilities are supported.
- fn has_cap(&self, cap: HypervisorCap) -> bool;
+ fn granule(&self) -> Result<usize>;
}
diff --git a/libs/hyp/src/hypervisor/geniezone.rs b/libs/hyp/src/hypervisor/geniezone.rs
new file mode 100644
index 0000000..ad18e17
--- /dev/null
+++ b/libs/hyp/src/hypervisor/geniezone.rs
@@ -0,0 +1,157 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Wrappers around calls to the GenieZone hypervisor.
+
+use super::common::{Hypervisor, MemSharingHypervisor, MmioGuardedHypervisor};
+use crate::error::{Error, Result};
+use crate::util::page_address;
+use core::fmt::{self, Display, Formatter};
+use smccc::{
+ error::{positive_or_error_64, success_or_error_64},
+ hvc64,
+};
+use uuid::{uuid, Uuid};
+
+pub(super) struct GeniezoneHypervisor;
+
+const ARM_SMCCC_GZVM_FUNC_HYP_MEMINFO: u32 = 0xc6000002;
+const ARM_SMCCC_GZVM_FUNC_MEM_SHARE: u32 = 0xc6000003;
+const ARM_SMCCC_GZVM_FUNC_MEM_UNSHARE: u32 = 0xc6000004;
+
+const VENDOR_HYP_GZVM_MMIO_GUARD_INFO_FUNC_ID: u32 = 0xc6000005;
+const VENDOR_HYP_GZVM_MMIO_GUARD_ENROLL_FUNC_ID: u32 = 0xc6000006;
+const VENDOR_HYP_GZVM_MMIO_GUARD_MAP_FUNC_ID: u32 = 0xc6000007;
+const VENDOR_HYP_GZVM_MMIO_GUARD_UNMAP_FUNC_ID: u32 = 0xc6000008;
+
+impl GeniezoneHypervisor {
+ // We generate this uuid by ourselves to identify GenieZone hypervisor
+ // and share the same identification along with guest VMs.
+ // The previous uuid was removed due to duplication elsewhere.
+ pub const UUID: Uuid = uuid!("7e134ed0-3b82-488d-8cee-69c19211dbe7");
+}
+
+/// Error from a GenieZone HVC call.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum GeniezoneError {
+ /// The call is not supported by the implementation.
+ NotSupported,
+ /// The call is not required to implement.
+ NotRequired,
+ /// One of the call parameters has a invalid value.
+ InvalidParameter,
+ /// There was an unexpected return value.
+ Unknown(i64),
+}
+
+impl From<i64> for GeniezoneError {
+ fn from(value: i64) -> Self {
+ match value {
+ -1 => GeniezoneError::NotSupported,
+ -2 => GeniezoneError::NotRequired,
+ -3 => GeniezoneError::InvalidParameter,
+ _ => GeniezoneError::Unknown(value),
+ }
+ }
+}
+
+impl From<i32> for GeniezoneError {
+ fn from(value: i32) -> Self {
+ i64::from(value).into()
+ }
+}
+
+impl Display for GeniezoneError {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ match self {
+ Self::NotSupported => write!(f, "GenieZone call not supported"),
+ Self::NotRequired => write!(f, "GenieZone call not required"),
+ Self::InvalidParameter => write!(f, "GenieZone call received invalid value"),
+ Self::Unknown(e) => write!(f, "Unknown return value from GenieZone {} ({0:#x})", e),
+ }
+ }
+}
+
+impl Hypervisor for GeniezoneHypervisor {
+ fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+ Some(self)
+ }
+
+ fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+ Some(self)
+ }
+}
+
+impl MmioGuardedHypervisor for GeniezoneHypervisor {
+ fn enroll(&self) -> Result<()> {
+ let args = [0u64; 17];
+ match success_or_error_64(hvc64(VENDOR_HYP_GZVM_MMIO_GUARD_ENROLL_FUNC_ID, args)[0]) {
+ Ok(()) => Ok(()),
+ Err(GeniezoneError::NotSupported) | Err(GeniezoneError::NotRequired) => {
+ Err(Error::MmioGuardNotSupported)
+ }
+ Err(e) => Err(Error::GeniezoneError(e, VENDOR_HYP_GZVM_MMIO_GUARD_ENROLL_FUNC_ID)),
+ }
+ }
+
+ fn map(&self, addr: usize) -> Result<()> {
+ let mut args = [0u64; 17];
+ args[0] = page_address(addr);
+
+ checked_hvc64_expect_zero(VENDOR_HYP_GZVM_MMIO_GUARD_MAP_FUNC_ID, args)
+ }
+
+ fn unmap(&self, addr: usize) -> Result<()> {
+ let mut args = [0u64; 17];
+ args[0] = page_address(addr);
+
+ checked_hvc64_expect_zero(VENDOR_HYP_GZVM_MMIO_GUARD_UNMAP_FUNC_ID, args)
+ }
+
+ fn granule(&self) -> Result<usize> {
+ let args = [0u64; 17];
+ let granule = checked_hvc64(VENDOR_HYP_GZVM_MMIO_GUARD_INFO_FUNC_ID, args)?;
+ Ok(granule.try_into().unwrap())
+ }
+}
+
+impl MemSharingHypervisor for GeniezoneHypervisor {
+ fn share(&self, base_ipa: u64) -> Result<()> {
+ let mut args = [0u64; 17];
+ args[0] = base_ipa;
+
+ checked_hvc64_expect_zero(ARM_SMCCC_GZVM_FUNC_MEM_SHARE, args)
+ }
+
+ fn unshare(&self, base_ipa: u64) -> Result<()> {
+ let mut args = [0u64; 17];
+ args[0] = base_ipa;
+
+ checked_hvc64_expect_zero(ARM_SMCCC_GZVM_FUNC_MEM_UNSHARE, args)
+ }
+
+ fn granule(&self) -> Result<usize> {
+ let args = [0u64; 17];
+ let granule = checked_hvc64(ARM_SMCCC_GZVM_FUNC_HYP_MEMINFO, args)?;
+ Ok(granule.try_into().unwrap())
+ }
+}
+
+fn checked_hvc64_expect_zero(function: u32, args: [u64; 17]) -> Result<()> {
+ success_or_error_64(hvc64(function, args)[0]).map_err(|e| Error::GeniezoneError(e, function))
+}
+
+fn checked_hvc64(function: u32, args: [u64; 17]) -> Result<u64> {
+ positive_or_error_64(hvc64(function, args)[0]).map_err(|e| Error::GeniezoneError(e, function))
+}
diff --git a/libs/hyp/src/hypervisor/gunyah.rs b/libs/hyp/src/hypervisor/gunyah.rs
index 252430f..45c01bf 100644
--- a/libs/hyp/src/hypervisor/gunyah.rs
+++ b/libs/hyp/src/hypervisor/gunyah.rs
@@ -1,5 +1,4 @@
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
-use crate::error::Result;
+use super::common::Hypervisor;
use uuid::{uuid, Uuid};
pub(super) struct GunyahHypervisor;
@@ -8,32 +7,4 @@
pub const UUID: Uuid = uuid!("c1d58fcd-a453-5fdb-9265-ce36673d5f14");
}
-impl Hypervisor for GunyahHypervisor {
- fn mmio_guard_init(&self) -> Result<()> {
- Ok(())
- }
-
- fn mmio_guard_map(&self, _addr: usize) -> Result<()> {
- Ok(())
- }
-
- fn mmio_guard_unmap(&self, _addr: usize) -> Result<()> {
- Ok(())
- }
-
- fn mem_share(&self, _base_ipa: u64) -> Result<()> {
- unimplemented!();
- }
-
- fn mem_unshare(&self, _base_ipa: u64) -> Result<()> {
- unimplemented!();
- }
-
- fn memory_protection_granule(&self) -> Result<usize> {
- Ok(MMIO_GUARD_GRANULE_SIZE)
- }
-
- fn has_cap(&self, _cap: HypervisorCap) -> bool {
- false
- }
-}
+impl Hypervisor for GunyahHypervisor {}
diff --git a/libs/hyp/src/hypervisor/kvm.rs b/libs/hyp/src/hypervisor/kvm.rs
index a89f9b8..5835346 100644
--- a/libs/hyp/src/hypervisor/kvm.rs
+++ b/libs/hyp/src/hypervisor/kvm.rs
@@ -14,7 +14,7 @@
//! Wrappers around calls to the KVM hypervisor.
-use super::common::{Hypervisor, HypervisorCap, MMIO_GUARD_GRANULE_SIZE};
+use super::common::{Hypervisor, MemSharingHypervisor, MmioGuardedHypervisor};
use crate::error::{Error, Result};
use crate::util::page_address;
use core::fmt::{self, Display, Formatter};
@@ -70,26 +70,39 @@
const VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID: u32 = 0xc6000007;
const VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID: u32 = 0xc6000008;
-pub(super) struct KvmHypervisor;
+pub(super) struct RegularKvmHypervisor;
-impl KvmHypervisor {
+impl RegularKvmHypervisor {
// Based on ARM_SMCCC_VENDOR_HYP_UID_KVM_REG values listed in Linux kernel source:
// https://github.com/torvalds/linux/blob/master/include/linux/arm-smccc.h
pub(super) const UUID: Uuid = uuid!("28b46fb6-2ec5-11e9-a9ca-4b564d003a74");
- const CAPABILITIES: HypervisorCap = HypervisorCap::DYNAMIC_MEM_SHARE;
}
-impl Hypervisor for KvmHypervisor {
- fn mmio_guard_init(&self) -> Result<()> {
- mmio_guard_enroll()?;
- let mmio_granule = mmio_guard_granule()?;
- if mmio_granule != MMIO_GUARD_GRANULE_SIZE {
- return Err(Error::UnsupportedMmioGuardGranule(mmio_granule));
- }
- Ok(())
+impl Hypervisor for RegularKvmHypervisor {}
+
+pub(super) struct ProtectedKvmHypervisor;
+
+impl Hypervisor for ProtectedKvmHypervisor {
+ fn as_mmio_guard(&self) -> Option<&dyn MmioGuardedHypervisor> {
+ Some(self)
}
- fn mmio_guard_map(&self, addr: usize) -> Result<()> {
+ fn as_mem_sharer(&self) -> Option<&dyn MemSharingHypervisor> {
+ Some(self)
+ }
+}
+
+impl MmioGuardedHypervisor for ProtectedKvmHypervisor {
+ fn enroll(&self) -> Result<()> {
+ let args = [0u64; 17];
+ match success_or_error_64(hvc64(VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID, args)[0]) {
+ Ok(()) => Ok(()),
+ Err(KvmError::NotSupported) => Err(Error::MmioGuardNotSupported),
+ Err(e) => Err(Error::KvmError(e, VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID)),
+ }
+ }
+
+ fn map(&self, addr: usize) -> Result<()> {
let mut args = [0u64; 17];
args[0] = page_address(addr);
@@ -99,7 +112,7 @@
.map_err(|e| Error::KvmError(e, VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID))
}
- fn mmio_guard_unmap(&self, addr: usize) -> Result<()> {
+ fn unmap(&self, addr: usize) -> Result<()> {
let mut args = [0u64; 17];
args[0] = page_address(addr);
@@ -111,45 +124,33 @@
}
}
- fn mem_share(&self, base_ipa: u64) -> Result<()> {
+ fn granule(&self) -> Result<usize> {
+ let args = [0u64; 17];
+ let granule = checked_hvc64(VENDOR_HYP_KVM_MMIO_GUARD_INFO_FUNC_ID, args)?;
+ Ok(granule.try_into().unwrap())
+ }
+}
+
+impl MemSharingHypervisor for ProtectedKvmHypervisor {
+ fn share(&self, base_ipa: u64) -> Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_KVM_FUNC_MEM_SHARE, args)
}
- fn mem_unshare(&self, base_ipa: u64) -> Result<()> {
+ fn unshare(&self, base_ipa: u64) -> Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_KVM_FUNC_MEM_UNSHARE, args)
}
- fn memory_protection_granule(&self) -> Result<usize> {
+ fn granule(&self) -> Result<usize> {
let args = [0u64; 17];
let granule = checked_hvc64(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO, args)?;
Ok(granule.try_into().unwrap())
}
-
- fn has_cap(&self, cap: HypervisorCap) -> bool {
- Self::CAPABILITIES.contains(cap)
- }
-}
-
-fn mmio_guard_granule() -> Result<usize> {
- let args = [0u64; 17];
-
- let granule = checked_hvc64(VENDOR_HYP_KVM_MMIO_GUARD_INFO_FUNC_ID, args)?;
- Ok(granule.try_into().unwrap())
-}
-
-fn mmio_guard_enroll() -> Result<()> {
- let args = [0u64; 17];
- match success_or_error_64(hvc64(VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID, args)[0]) {
- Ok(_) => Ok(()),
- Err(KvmError::NotSupported) => Err(Error::MmioGuardNotsupported),
- Err(e) => Err(Error::KvmError(e, VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID)),
- }
}
fn checked_hvc64_expect_zero(function: u32, args: [u64; 17]) -> Result<()> {
diff --git a/libs/hyp/src/hypervisor/mod.rs b/libs/hyp/src/hypervisor/mod.rs
index 923a21d..309f967 100644
--- a/libs/hyp/src/hypervisor/mod.rs
+++ b/libs/hyp/src/hypervisor/mod.rs
@@ -17,31 +17,37 @@
extern crate alloc;
mod common;
+mod geniezone;
mod gunyah;
mod kvm;
use crate::error::{Error, Result};
use alloc::boxed::Box;
-pub use common::Hypervisor;
-pub use common::HypervisorCap;
-pub use common::MMIO_GUARD_GRANULE_SIZE;
+use common::Hypervisor;
+pub use common::{MemSharingHypervisor, MmioGuardedHypervisor, MMIO_GUARD_GRANULE_SIZE};
+pub use geniezone::GeniezoneError;
+use geniezone::GeniezoneHypervisor;
use gunyah::GunyahHypervisor;
pub use kvm::KvmError;
-use kvm::KvmHypervisor;
+use kvm::{ProtectedKvmHypervisor, RegularKvmHypervisor};
use once_cell::race::OnceBox;
use smccc::hvc64;
use uuid::Uuid;
enum HypervisorBackend {
- Kvm,
+ RegularKvm,
Gunyah,
+ Geniezone,
+ ProtectedKvm,
}
impl HypervisorBackend {
fn get_hypervisor(&self) -> &'static dyn Hypervisor {
match self {
- Self::Kvm => &KvmHypervisor,
+ Self::RegularKvm => &RegularKvmHypervisor,
Self::Gunyah => &GunyahHypervisor,
+ Self::Geniezone => &GeniezoneHypervisor,
+ Self::ProtectedKvm => &ProtectedKvmHypervisor,
}
}
}
@@ -51,8 +57,20 @@
fn try_from(uuid: Uuid) -> Result<HypervisorBackend> {
match uuid {
+ GeniezoneHypervisor::UUID => Ok(HypervisorBackend::Geniezone),
GunyahHypervisor::UUID => Ok(HypervisorBackend::Gunyah),
- KvmHypervisor::UUID => Ok(HypervisorBackend::Kvm),
+ RegularKvmHypervisor::UUID => {
+ // Protected KVM has the same UUID as "regular" KVM so issue an HVC that is assumed
+ // to only be supported by pKVM: if it returns SUCCESS, deduce that this is pKVM
+ // and if it returns NOT_SUPPORTED assume that it is "regular" KVM.
+ match ProtectedKvmHypervisor.as_mmio_guard().unwrap().granule() {
+ Ok(_) => Ok(HypervisorBackend::ProtectedKvm),
+ Err(Error::KvmError(KvmError::NotSupported, _)) => {
+ Ok(HypervisorBackend::RegularKvm)
+ }
+ Err(e) => Err(e),
+ }
+ }
u => Err(Error::UnsupportedHypervisorUuid(u)),
}
}
@@ -85,12 +103,22 @@
}
fn detect_hypervisor() -> HypervisorBackend {
- query_vendor_hyp_call_uid().try_into().expect("Unknown hypervisor")
+ query_vendor_hyp_call_uid().try_into().expect("Failed to detect hypervisor")
}
/// Gets the hypervisor singleton.
-pub fn get_hypervisor() -> &'static dyn Hypervisor {
+fn get_hypervisor() -> &'static dyn Hypervisor {
static HYPERVISOR: OnceBox<HypervisorBackend> = OnceBox::new();
HYPERVISOR.get_or_init(|| Box::new(detect_hypervisor())).get_hypervisor()
}
+
+/// Gets the MMIO_GUARD hypervisor singleton, if any.
+pub fn get_mmio_guard() -> Option<&'static dyn MmioGuardedHypervisor> {
+ get_hypervisor().as_mmio_guard()
+}
+
+/// Gets the dynamic memory sharing hypervisor singleton, if any.
+pub fn get_mem_sharer() -> Option<&'static dyn MemSharingHypervisor> {
+ get_hypervisor().as_mem_sharer()
+}
diff --git a/libs/hyp/src/lib.rs b/libs/hyp/src/lib.rs
index 2c2d1d6..486a181 100644
--- a/libs/hyp/src/lib.rs
+++ b/libs/hyp/src/lib.rs
@@ -21,4 +21,6 @@
mod util;
pub use error::{Error, Result};
-pub use hypervisor::{get_hypervisor, Hypervisor, HypervisorCap, KvmError, MMIO_GUARD_GRANULE_SIZE};
+pub use hypervisor::{get_mem_sharer, get_mmio_guard, KvmError, MMIO_GUARD_GRANULE_SIZE};
+
+use hypervisor::GeniezoneError;
diff --git a/libs/ignorabletest/Android.bp b/libs/ignorabletest/Android.bp
deleted file mode 100644
index 10aef8e..0000000
--- a/libs/ignorabletest/Android.bp
+++ /dev/null
@@ -1,34 +0,0 @@
-rust_library {
- name: "libignorabletest",
- host_supported: true,
- crate_name: "ignorabletest",
- cargo_env_compat: true,
- cargo_pkg_version: "0.1.0",
- srcs: ["src/lib.rs"],
- edition: "2021",
- rustlibs: [
- "liblibtest_mimic",
- "liblinkme",
- ],
- proc_macros: ["libpaste"],
- apex_available: [
- "//apex_available:platform",
- "//apex_available:anyapex",
- ],
-}
-
-rust_defaults {
- name: "ignorabletest.defaults",
- test_harness: false,
- cfgs: ["test"],
- rustlibs: [
- "libignorabletest",
- "liblinkme",
- ],
- // Without this flag we get linker errors saying to add it. See
- // https://github.com/dtolnay/linkme/issues/49 and related issues.
- ld_flags: [
- "-z",
- "nostart-stop-gc",
- ],
-}
diff --git a/libs/ignorabletest/README.md b/libs/ignorabletest/README.md
deleted file mode 100644
index 77140bd..0000000
--- a/libs/ignorabletest/README.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# ignorabletest
-
-This is a custom Rust test harness which allows tests to be ignored at runtime based on arbitrary
-criteria. The built-in Rust test harness only allows tests to be ignored at compile time, but this
-is often not enough on Android, where we want to ignore tests based on system properties or other
-characteristics of the device on which the test is being run, which are not known at build time.
-
-## Usage
-
-Unfortunately without the built-in support that rustc provides to the standard test harness, this
-one is slightly more cumbersome to use. Firstly, add it to the `rust_test` build rule in your
-`Android.bp` by adding the defaults provided:
-
-```soong
-rust_test {
- name: "mycrate.test",
- defaults: ["ignorabletest.defaults"],
- // ...
-}
-```
-
-If you are testing a binary that has a `main` function, you'll need to remove it from the test
-build:
-
-```rust
-#[cfg(not(test))]
-fn main() {
- // ...
-}
-```
-
-(If you're testing a library or anything else which doesn't have a `main` function, you don't need
-to worry about this.)
-
-Each test case should be marked with the `ignorabletest::test!` macro, rather than the standard
-`#[test]` attribute:
-
-```rust
-use ignorabletest::test;
-
-test!(one_plus_one);
-fn one_plus_one {
- assert_eq!(1 + 1, 2);
-}
-```
-
-To ignore a test, you can add an `ignore_if` clause with a boolean expression:
-
-```rust
-use ignorabletest::test;
-
-test!(clap_hands, ignore_if: !feeling_happy());
-fn clap_hands {
- assert!(HANDS.clap().is_ok());
-}
-```
-
-Somewhere in your main module, you need to use the `test_main` macro to generate an entry point for
-the test harness:
-
-```rust
-#[cfg(test)]
-ignorabletest::test_main!();
-```
-
-You can then run your tests as usual with `atest`.
diff --git a/libs/ignorabletest/src/lib.rs b/libs/ignorabletest/src/lib.rs
deleted file mode 100644
index c7243e6..0000000
--- a/libs/ignorabletest/src/lib.rs
+++ /dev/null
@@ -1,57 +0,0 @@
-//! Test harness which supports ignoring tests at runtime.
-
-pub mod runner;
-
-#[doc(hidden)]
-pub use libtest_mimic as _libtest_mimic;
-#[doc(hidden)]
-pub use linkme as _linkme;
-#[doc(hidden)]
-pub use paste as _paste;
-
-/// Macro to generate the main function for the test harness.
-#[macro_export]
-macro_rules! test_main {
- () => {
- #[cfg(test)]
- fn main() {
- ignorabletest::runner::main()
- }
- };
-}
-
-/// Macro to generate a wrapper function for a single test.
-///
-/// # Usage
-///
-/// ```
-/// test!(test_string_equality);
-/// fn test_string_equality() {
-/// assert_eq!("", "");
-/// }
-/// ```
-#[macro_export]
-macro_rules! test {
- ($test_name:ident) => {
- $crate::_paste::paste!(
- #[$crate::_linkme::distributed_slice($crate::runner::IGNORABLETEST_TESTS)]
- fn [< __test_ $test_name >]() -> $crate::_libtest_mimic::Trial {
- $crate::_libtest_mimic::Trial::test(
- ::std::stringify!($test_name),
- move || ignorabletest::runner::run($test_name),
- )
- }
- );
- };
- ($test_name:ident, ignore_if: $ignore_expr:expr) => {
- $crate::_paste::paste!(
- #[$crate::_linkme::distributed_slice($crate::runner::IGNORABLETEST_TESTS)]
- fn [< __test_ $test_name >]() -> $crate::_libtest_mimic::Trial {
- $crate::_libtest_mimic::Trial::test(
- ::std::stringify!($test_name),
- move || ignorabletest::runner::run($test_name),
- ).with_ignored_flag($ignore_expr)
- }
- );
- };
-}
diff --git a/libs/ignorabletest/src/runner.rs b/libs/ignorabletest/src/runner.rs
deleted file mode 100644
index 4ec3d79..0000000
--- a/libs/ignorabletest/src/runner.rs
+++ /dev/null
@@ -1,27 +0,0 @@
-//! Test runner.
-
-use core::ops::{Deref, FnOnce};
-use libtest_mimic::{Arguments, Failed, Trial};
-use linkme::distributed_slice;
-use std::env;
-
-/// Command-line arguments to ignore, because they are not supported by libtest-mimic.
-const IGNORED_ARGS: [&str; 2] = ["-Zunstable-options", "--report-time"];
-
-/// The collection of all tests to run.
-#[doc(hidden)]
-#[distributed_slice]
-pub static IGNORABLETEST_TESTS: [fn() -> Trial] = [..];
-
-/// Runs all tests.
-pub fn main() {
- let args = Arguments::from_iter(env::args().filter(|arg| !IGNORED_ARGS.contains(&arg.deref())));
- let tests = IGNORABLETEST_TESTS.iter().map(|test| test()).collect();
- libtest_mimic::run(&args, tests).exit();
-}
-
-/// Runs the given test.
-pub fn run(test: impl FnOnce()) -> Result<(), Failed> {
- test();
- Ok(())
-}
diff --git a/libs/libfdt/Android.bp b/libs/libfdt/Android.bp
index 2a6e75f..0540f26 100644
--- a/libs/libfdt/Android.bp
+++ b/libs/libfdt/Android.bp
@@ -8,7 +8,6 @@
wrapper_src: "bindgen/fdt.h",
source_stem: "bindings",
bindgen_flags: [
- "--size_t-is-usize",
"--allowlist-type=fdt_.*",
"--allowlist-function=fdt_.*",
"--allowlist-var=FDT_.*",
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index 8e0bb65..a305e03 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -205,7 +205,7 @@
}
/// Find parent node.
pub fn parent(&self) -> Result<Self> {
- // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe { libfdt_bindgen::fdt_parent_offset(self.fdt.as_ptr(), self.offset) };
Ok(Self { fdt: self.fdt, offset: fdt_err(ret)? })
@@ -311,7 +311,7 @@
name: &CStr,
) -> Result<Option<(*const c_void, usize)>> {
let mut len: i32 = 0;
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) and the
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor) and the
// function respects the passed number of characters.
let prop = unsafe {
libfdt_bindgen::fdt_getprop_namelen(
@@ -342,7 +342,7 @@
}
fn next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
- // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe {
libfdt_bindgen::fdt_node_offset_by_compatible(
self.fdt.as_ptr(),
@@ -355,14 +355,14 @@
}
fn address_cells(&self) -> Result<AddrCells> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor).
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
unsafe { libfdt_bindgen::fdt_address_cells(self.fdt.as_ptr(), self.offset) }
.try_into()
.map_err(|_| FdtError::Internal)
}
fn size_cells(&self) -> Result<SizeCells> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor).
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
unsafe { libfdt_bindgen::fdt_size_cells(self.fdt.as_ptr(), self.offset) }
.try_into()
.map_err(|_| FdtError::Internal)
@@ -378,7 +378,7 @@
impl<'a> FdtNodeMut<'a> {
/// Append a property name-value (possibly empty) pair to the given node.
pub fn appendprop<T: AsRef<[u8]>>(&mut self, name: &CStr, value: &T) -> Result<()> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor).
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
let ret = unsafe {
libfdt_bindgen::fdt_appendprop(
self.fdt.as_mut_ptr(),
@@ -394,7 +394,7 @@
/// Append a (address, size) pair property to the given node.
pub fn appendprop_addrrange(&mut self, name: &CStr, addr: u64, size: u64) -> Result<()> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor).
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
let ret = unsafe {
libfdt_bindgen::fdt_appendprop_addrrange(
self.fdt.as_mut_ptr(),
@@ -411,7 +411,7 @@
/// Create or change a property name-value pair to the given node.
pub fn setprop(&mut self, name: &CStr, value: &[u8]) -> Result<()> {
- // SAFETY - New value size is constrained to the DT totalsize
+ // SAFETY: New value size is constrained to the DT totalsize
// (validated by underlying libfdt).
let ret = unsafe {
libfdt_bindgen::fdt_setprop(
@@ -429,7 +429,7 @@
/// Replace the value of the given property with the given value, and ensure that the given
/// value has the same length as the current value length
pub fn setprop_inplace(&mut self, name: &CStr, value: &[u8]) -> Result<()> {
- // SAFETY - fdt size is not altered
+ // SAFETY: fdt size is not altered
let ret = unsafe {
libfdt_bindgen::fdt_setprop_inplace(
self.fdt.as_mut_ptr(),
@@ -457,7 +457,7 @@
/// Delete the given property.
pub fn delprop(&mut self, name: &CStr) -> Result<()> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) when the
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor) when the
// library locates the node's property. Removing the property may shift the offsets of
// other nodes and properties but the borrow checker should prevent this function from
// being called when FdtNode instances are in use.
@@ -470,7 +470,7 @@
/// Overwrite the given property with FDT_NOP, effectively removing it from the DT.
pub fn nop_property(&mut self, name: &CStr) -> Result<()> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) when the
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor) when the
// library locates the node's property.
let ret = unsafe {
libfdt_bindgen::fdt_nop_property(self.fdt.as_mut_ptr(), self.offset, name.as_ptr())
@@ -490,7 +490,7 @@
return Err(FdtError::NoSpace);
}
- // SAFETY - new_size is smaller than the old size
+ // SAFETY: new_size is smaller than the old size
let ret = unsafe {
libfdt_bindgen::fdt_setprop(
self.fdt.as_mut_ptr(),
@@ -511,7 +511,7 @@
/// Add a new subnode to the given node and return it as a FdtNodeMut on success.
pub fn add_subnode(&'a mut self, name: &CStr) -> Result<Self> {
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor).
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor).
let ret = unsafe {
libfdt_bindgen::fdt_add_subnode(self.fdt.as_mut_ptr(), self.offset, name.as_ptr())
};
@@ -520,7 +520,7 @@
}
fn parent(&'a self) -> Result<FdtNode<'a>> {
- // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe { libfdt_bindgen::fdt_parent_offset(self.fdt.as_ptr(), self.offset) };
Ok(FdtNode { fdt: &*self.fdt, offset: fdt_err(ret)? })
@@ -528,7 +528,7 @@
/// Return the compatible node of the given name that is next to this node
pub fn next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
- // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe {
libfdt_bindgen::fdt_node_offset_by_compatible(
self.fdt.as_ptr(),
@@ -553,7 +553,7 @@
// mutable reference to DT, so we can't use current node (which also has a mutable reference to
// DT).
pub fn delete_and_next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
- // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ // SAFETY: Accesses (read-only) are constrained to the DT totalsize.
let ret = unsafe {
libfdt_bindgen::fdt_node_offset_by_compatible(
self.fdt.as_ptr(),
@@ -563,7 +563,7 @@
};
let next_offset = fdt_err_or_option(ret)?;
- // SAFETY - fdt_nop_node alter only the bytes in the blob which contain the node and its
+ // SAFETY: fdt_nop_node alter only the bytes in the blob which contain the node and its
// properties and subnodes, and will not alter or move any other part of the tree.
let ret = unsafe { libfdt_bindgen::fdt_nop_node(self.fdt.as_mut_ptr(), self.offset) };
fdt_err_expect_zero(ret)?;
@@ -611,7 +611,7 @@
///
/// Fails if the FDT does not pass validation.
pub fn from_slice(fdt: &[u8]) -> Result<&Self> {
- // SAFETY - The FDT will be validated before it is returned.
+ // SAFETY: The FDT will be validated before it is returned.
let fdt = unsafe { Self::unchecked_from_slice(fdt) };
fdt.check_full()?;
Ok(fdt)
@@ -621,7 +621,7 @@
///
/// Fails if the FDT does not pass validation.
pub fn from_mut_slice(fdt: &mut [u8]) -> Result<&mut Self> {
- // SAFETY - The FDT will be validated before it is returned.
+ // SAFETY: The FDT will be validated before it is returned.
let fdt = unsafe { Self::unchecked_from_mut_slice(fdt) };
fdt.check_full()?;
Ok(fdt)
@@ -629,7 +629,7 @@
/// Creates an empty Flattened Device Tree with a mutable slice.
pub fn create_empty_tree(fdt: &mut [u8]) -> Result<&mut Self> {
- // SAFETY - fdt_create_empty_tree() only write within the specified length,
+ // SAFETY: fdt_create_empty_tree() only write within the specified length,
// and returns error if buffer was insufficient.
// There will be no memory write outside of the given fdt.
let ret = unsafe {
@@ -640,7 +640,7 @@
};
fdt_err_expect_zero(ret)?;
- // SAFETY - The FDT will be validated before it is returned.
+ // SAFETY: The FDT will be validated before it is returned.
let fdt = unsafe { Self::unchecked_from_mut_slice(fdt) };
fdt.check_full()?;
@@ -653,7 +653,9 @@
///
/// The returned FDT might be invalid, only use on slices containing a valid DT.
pub unsafe fn unchecked_from_slice(fdt: &[u8]) -> &Self {
- mem::transmute::<&[u8], &Self>(fdt)
+ // SAFETY: Fdt is a wrapper around a [u8], so the transmute is valid. The caller is
+ // responsible for ensuring that it is actually a valid FDT.
+ unsafe { mem::transmute::<&[u8], &Self>(fdt) }
}
/// Wraps a mutable slice containing a Flattened Device Tree.
@@ -662,7 +664,9 @@
///
/// The returned FDT might be invalid, only use on slices containing a valid DT.
pub unsafe fn unchecked_from_mut_slice(fdt: &mut [u8]) -> &mut Self {
- mem::transmute::<&mut [u8], &mut Self>(fdt)
+ // SAFETY: Fdt is a wrapper around a [u8], so the transmute is valid. The caller is
+ // responsible for ensuring that it is actually a valid FDT.
+ unsafe { mem::transmute::<&mut [u8], &mut Self>(fdt) }
}
/// Update this FDT from a slice containing another FDT
@@ -682,7 +686,7 @@
/// Make the whole slice containing the DT available to libfdt.
pub fn unpack(&mut self) -> Result<()> {
- // SAFETY - "Opens" the DT in-place (supported use-case) by updating its header and
+ // SAFETY: "Opens" the DT in-place (supported use-case) by updating its header and
// internal structures to make use of the whole self.fdt slice but performs no accesses
// outside of it and leaves the DT in a state that will be detected by other functions.
let ret = unsafe {
@@ -699,7 +703,7 @@
///
/// Doesn't shrink the underlying memory slice.
pub fn pack(&mut self) -> Result<()> {
- // SAFETY - "Closes" the DT in-place by updating its header and relocating its structs.
+ // SAFETY: "Closes" the DT in-place by updating its header and relocating its structs.
let ret = unsafe { libfdt_bindgen::fdt_pack(self.as_mut_ptr()) };
fdt_err_expect_zero(ret)
}
@@ -710,10 +714,12 @@
///
/// On failure, the library corrupts the DT and overlay so both must be discarded.
pub unsafe fn apply_overlay<'a>(&'a mut self, overlay: &'a mut Fdt) -> Result<&'a mut Self> {
- fdt_err_expect_zero(libfdt_bindgen::fdt_overlay_apply(
- self.as_mut_ptr(),
- overlay.as_mut_ptr(),
- ))?;
+ let ret =
+ // SAFETY: Both pointers are valid because they come from references, and fdt_overlay_apply
+ // doesn't keep them after it returns. It may corrupt their contents if there is an error,
+ // but that's our caller's responsibility.
+ unsafe { libfdt_bindgen::fdt_overlay_apply(self.as_mut_ptr(), overlay.as_mut_ptr()) };
+ fdt_err_expect_zero(ret)?;
Ok(self)
}
@@ -779,7 +785,7 @@
fn path_offset(&self, path: &CStr) -> Result<Option<c_int>> {
let len = path.to_bytes().len().try_into().map_err(|_| FdtError::BadPath)?;
- // SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) and the
+ // SAFETY: Accesses are constrained to the DT totalsize (validated by ctor) and the
// function respects the passed number of characters.
let ret = unsafe {
// *_namelen functions don't include the trailing nul terminator in 'len'.
@@ -791,7 +797,7 @@
fn check_full(&self) -> Result<()> {
let len = self.buffer.len();
- // SAFETY - Only performs read accesses within the limits of the slice. If successful, this
+ // SAFETY: Only performs read accesses within the limits of the slice. If successful, this
// call guarantees to other unsafe calls that the header contains a valid totalsize (w.r.t.
// 'len' i.e. the self.fdt slice) that those C functions can use to perform bounds
// checking. The library doesn't maintain an internal state (such as pointers) between
@@ -815,7 +821,7 @@
fn header(&self) -> &libfdt_bindgen::fdt_header {
let p = self.as_ptr().cast::<_>();
- // SAFETY - A valid FDT (verified by constructor) must contain a valid fdt_header.
+ // SAFETY: A valid FDT (verified by constructor) must contain a valid fdt_header.
unsafe { &*p }
}
diff --git a/libs/microdroid_uids/Android.bp b/libs/microdroid_uids/Android.bp
new file mode 100644
index 0000000..497948d
--- /dev/null
+++ b/libs/microdroid_uids/Android.bp
@@ -0,0 +1,15 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_library {
+ name: "libmicrodroid_uids",
+ crate_name: "microdroid_uids",
+ srcs: ["src/lib.rs"],
+ edition: "2021",
+ // TODO(b/296393106): Figure out how/when to enable this
+ // cfgs: ["payload_not_root"],
+ apex_available: [
+ "com.android.virt",
+ ],
+}
diff --git a/libs/microdroid_uids/src/lib.rs b/libs/microdroid_uids/src/lib.rs
new file mode 100644
index 0000000..1f09c65
--- /dev/null
+++ b/libs/microdroid_uids/src/lib.rs
@@ -0,0 +1,24 @@
+// Copyright 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! User and group IDs within Microdroid
+
+/// Always the user ID of Root.
+pub const ROOT_UID: u32 = 0;
+
+/// Group ID shared by all payload users.
+pub const MICRODROID_PAYLOAD_GID: u32 = if cfg!(payload_not_root) { 6000 } else { 0 };
+
+/// User ID for the initial payload user.
+pub const MICRODROID_PAYLOAD_UID: u32 = if cfg!(payload_not_root) { 6000 } else { 0 };
diff --git a/libs/service_vm_comm/Android.bp b/libs/service_vm_comm/Android.bp
new file mode 100644
index 0000000..18397c5
--- /dev/null
+++ b/libs/service_vm_comm/Android.bp
@@ -0,0 +1,36 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+ name: "libservice_vm_comm_defaults",
+ crate_name: "service_vm_comm",
+ srcs: ["src/lib.rs"],
+ prefer_rlib: true,
+ apex_available: [
+ "com.android.virt",
+ ],
+}
+
+rust_library_rlib {
+ name: "libservice_vm_comm_nostd",
+ defaults: ["libservice_vm_comm_defaults"],
+ no_stdlibs: true,
+ stdlibs: [
+ "libcore.rust_sysroot",
+ ],
+ rustlibs: [
+ "libserde_nostd",
+ ],
+}
+
+rust_library {
+ name: "libservice_vm_comm",
+ defaults: ["libservice_vm_comm_defaults"],
+ rustlibs: [
+ "libserde",
+ ],
+ features: [
+ "std",
+ ],
+}
diff --git a/libs/service_vm_comm/src/lib.rs b/libs/service_vm_comm/src/lib.rs
new file mode 100644
index 0000000..c3d3ed5
--- /dev/null
+++ b/libs/service_vm_comm/src/lib.rs
@@ -0,0 +1,24 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This library contains the communication protocol used between the host
+//! and the service VM.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+extern crate alloc;
+
+mod message;
+
+pub use message::{Request, Response};
diff --git a/libs/service_vm_comm/src/message.rs b/libs/service_vm_comm/src/message.rs
new file mode 100644
index 0000000..ebbefcb
--- /dev/null
+++ b/libs/service_vm_comm/src/message.rs
@@ -0,0 +1,39 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module contains the requests and responses definitions exchanged
+//! between the host and the service VM.
+
+use alloc::vec::Vec;
+
+use serde::{Deserialize, Serialize};
+
+/// Represents a request to be sent to the service VM.
+///
+/// Each request has a corresponding response item.
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub enum Request {
+ /// Reverse the order of the bytes in the provided byte array.
+ /// Currently this is only used for testing.
+ Reverse(Vec<u8>),
+}
+
+/// Represents a response to a request sent to the service VM.
+///
+/// Each response corresponds to a specific request.
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+pub enum Response {
+ /// Reverse the order of the bytes in the provided byte array.
+ Reverse(Vec<u8>),
+}
diff --git a/libs/statslog_virtualization/statslog_wrapper.rs b/libs/statslog_virtualization/statslog_wrapper.rs
index 4d1a0fa..b069d7c 100644
--- a/libs/statslog_virtualization/statslog_wrapper.rs
+++ b/libs/statslog_virtualization/statslog_wrapper.rs
@@ -1,4 +1,5 @@
#![allow(clippy::too_many_arguments)]
+#![allow(clippy::undocumented_unsafe_blocks)]
#![allow(missing_docs)]
#![allow(unused)]
diff --git a/libs/vmconfig/src/lib.rs b/libs/vmconfig/src/lib.rs
index 7ca8272..50f3c8e 100644
--- a/libs/vmconfig/src/lib.rs
+++ b/libs/vmconfig/src/lib.rs
@@ -21,7 +21,7 @@
binder::ParcelFileDescriptor,
};
-use anyhow::{bail, Context, Error, Result};
+use anyhow::{anyhow, bail, Context, Error, Result};
use semver::VersionReq;
use serde::{Deserialize, Serialize};
use std::convert::TryInto;
@@ -57,6 +57,9 @@
/// Version or range of versions of the virtual platform that this config is compatible with.
/// The format follows SemVer (https://semver.org).
pub platform_version: VersionReq,
+ /// SysFS paths of devices assigned to the VM.
+ #[serde(default)]
+ pub devices: Vec<PathBuf>,
}
impl VmConfig {
@@ -103,6 +106,13 @@
protectedVm: self.protected,
memoryMib: memory_mib,
platformVersion: self.platform_version.to_string(),
+ devices: self
+ .devices
+ .iter()
+ .map(|x| {
+ x.to_str().map(String::from).ok_or(anyhow!("Failed to convert {x:?} to String"))
+ })
+ .collect::<Result<_>>()?,
..Default::default()
})
}
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 37f68a2..1e594b7 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -23,6 +23,10 @@
"apex",
"linkerconfig",
"second_stage_resources",
+
+ // Ideally we should only create the /vendor for Microdroid VMs that will mount /vendor, but
+ // for the time being we will just create it unconditionally.
+ "vendor",
]
microdroid_symlinks = [
@@ -50,15 +54,15 @@
deps: [
"init_second_stage.microdroid",
"microdroid_build_prop",
+ "microdroid_etc_passwd",
+ "microdroid_etc_group",
"microdroid_init_debug_policy",
"microdroid_init_rc",
"microdroid_ueventd_rc",
"microdroid_launcher",
- "libbinder",
"libbinder_ndk",
"libstdc++",
- "secilc",
// "com.android.adbd" requires these,
"libadbd_auth",
@@ -154,6 +158,20 @@
installable: false, // avoid collision with system partition's ueventd.rc
}
+prebuilt_etc {
+ name: "microdroid_etc_passwd",
+ src: "microdroid_passwd",
+ filename: "passwd",
+ installable: false,
+}
+
+prebuilt_etc {
+ name: "microdroid_etc_group",
+ src: "microdroid_group",
+ filename: "group",
+ installable: false,
+}
+
prebuilt_root {
name: "microdroid_build_prop",
filename: "build.prop",
diff --git a/microdroid/README.md b/microdroid/README.md
index 5e3f586..dd1505f 100644
--- a/microdroid/README.md
+++ b/microdroid/README.md
@@ -138,73 +138,6 @@
If you are looking for an example usage of the APIs, you may refer to the [demo
app](https://android.googlesource.com/platform/packages/modules/Virtualization/+/refs/heads/master/demo/).
-## Debuggable microdroid
+## Debugging Microdroid
-### Debugging features
-Microdroid supports following debugging features:
-
-- VM log
-- console output
-- kernel output
-- logcat output
-- [ramdump](../docs/debug/ramdump.md)
-- crashdump
-- [adb](#adb)
-- [gdb](#debugging-the-payload-on-microdroid)
-
-### Enabling debugging features
-There's two ways to enable the debugging features:
-
-#### Option 1) Running microdroid on AVF debug policy configured device
-
-microdroid can be started with debugging features by debug policies from the
-host. Host bootloader may provide debug policies to host OS's device tree for
-VMs. Host bootloader MUST NOT provide debug policies for locked devices for
-security reasons.
-
-For protected VM, such device tree will be available in microdroid. microdroid
-can check which debuging features is enabled.
-
-Here are list of device tree properties for debugging features.
-
-- `/avf/guest/common/log`: `<1>` to enable kernel log and logcat. Ignored
- otherwise.
-- `/avf/guest/common/ramdump`: `<1>` to enable ramdump. Ignored otherwise.
-- `/avf/guest/microdroid/adb`: `<1>` to enable `adb`. Ignored otherwise.
-
-#### Option 2) Lauching microdroid with debug level.
-
-microdroid can be started with debugging features. To do so, first, delete
-`$TEST_ROOT/instance.img`; this is because changing debug settings requires a
-new instance. Then add the `--debug=full` flag to the
-`/apex/com.android.virt/bin/vm run-app` command. This will enable all debugging
-features.
-
-### ADB
-
-If `adb` connection is enabled, launch following command.
-
-```sh
-vm_shell
-```
-
-Done. Now you are logged into Microdroid. Have fun!
-
-Once you have an adb connection with `vm_shell`, `localhost:8000` will be the
-serial of microdroid.
-
-### Debugging the payload on microdroid
-
-Like a normal adb device, you can debug native processes using `lldbclient.py`
-script, either by running a new process, or attaching to an existing process.
-Use `vm_shell` tool above, and then run `lldbclient.py`.
-
-```sh
-adb -s localhost:8000 shell 'mount -o remount,exec /data'
-development/scripts/lldbclient.py -s localhost:8000 --chroot . --user '' \
- (-p PID | -n NAME | -r ...)
-```
-
-**Note:** We need to pass `--chroot .` to skip verifying device, because
-microdroid doesn't match with the host's lunch target. We need to also pass
-`--user ''` as there is no `su` binary in microdroid.
+Refer to [Debugging protected VMs](../docs/debug/README.md).
diff --git a/microdroid/bootconfig.x86_64 b/microdroid/bootconfig.x86_64
index 6076889..eed9212 100644
--- a/microdroid/bootconfig.x86_64
+++ b/microdroid/bootconfig.x86_64
@@ -1 +1 @@
-androidboot.boot_devices = pci0000:00/0000:00:04.0,pci0000:00/0000:00:05.0,pci0000:00/0000:00:06.0
+androidboot.boot_devices = pci0000:00/0000:00:04.0,pci0000:00/0000:00:05.0,pci0000:00/0000:00:06.0,pci0000:00/0000:00:07.0
diff --git a/microdroid/fstab.microdroid b/microdroid/fstab.microdroid
index 9478c7c..da000b9 100644
--- a/microdroid/fstab.microdroid
+++ b/microdroid/fstab.microdroid
@@ -1 +1,7 @@
system /system ext4 noatime,ro,errors=panic wait,slotselect,avb=vbmeta,first_stage_mount,logical
+# This is a temporary solution to unblock other devs that depend on /vendor partition in Microdroid
+# The /vendor partition will only be mounted if the kernel cmdline contains
+# androidboot.microdroid.mount_vendor=1.
+# TODO(b/285855430): this should probably be defined in the DT
+# TODO(b/285855436): should be mounted on top of dm-verity device
+/dev/block/by-name/microdroid-vendor /vendor ext4 noatime,ro,errors=panic wait,first_stage_mount
diff --git a/microdroid/init.rc b/microdroid/init.rc
index 42033d6..c257cdb 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -12,6 +12,11 @@
# Cgroups are mounted right before early-init using list from /etc/cgroups.json
on early-init
+ # Android doesn't need kernel module autoloading, and it causes SELinux
+ # denials. So disable it by setting modprobe to the empty string. Note: to
+ # explicitly set a sysctl to an empty string, a trailing newline is needed.
+ write /proc/sys/kernel/modprobe \n
+
# set RLIMIT_NICE to allow priorities from 19 to -20
setrlimit nice 40 40
@@ -28,6 +33,10 @@
on init
mkdir /mnt/apk 0755 system system
mkdir /mnt/extra-apk 0755 root root
+
+ # Allow the payload access to the console (default is 0600)
+ chmod 0666 /dev/console
+
# Microdroid_manager starts apkdmverity/zipfuse/apexd
start microdroid_manager
diff --git a/microdroid/kdump/Android.bp b/microdroid/kdump/Android.bp
index cc681a7..b9a18fe 100644
--- a/microdroid/kdump/Android.bp
+++ b/microdroid/kdump/Android.bp
@@ -18,6 +18,9 @@
static_executable: true,
installable: false,
compile_multilib: "64",
+ sanitize: {
+ hwaddress: false, // HWASAN setup fails when run as init process
+ },
}
android_filesystem {
diff --git a/microdroid/microdroid_group b/microdroid/microdroid_group
new file mode 100644
index 0000000..4eb8fa5
--- /dev/null
+++ b/microdroid/microdroid_group
@@ -0,0 +1 @@
+system_payload::6000:
diff --git a/microdroid/microdroid_passwd b/microdroid/microdroid_passwd
new file mode 100644
index 0000000..bd15182
--- /dev/null
+++ b/microdroid/microdroid_passwd
@@ -0,0 +1 @@
+system_payload_0::6000:6000:::
diff --git a/microdroid/payload/Android.bp b/microdroid/payload/Android.bp
index 4814a64..8225875 100644
--- a/microdroid/payload/Android.bp
+++ b/microdroid/payload/Android.bp
@@ -31,6 +31,7 @@
protos: ["metadata.proto"],
source_stem: "microdroid_metadata",
host_supported: true,
+ use_protobuf3: true,
apex_available: [
"com.android.virt",
],
diff --git a/microdroid/payload/metadata/Android.bp b/microdroid/payload/metadata/Android.bp
index e3138e8..cd182fc 100644
--- a/microdroid/payload/metadata/Android.bp
+++ b/microdroid/payload/metadata/Android.bp
@@ -12,7 +12,7 @@
rustlibs: [
"libanyhow",
"libmicrodroid_metadata_proto_rust",
- "libprotobuf_deprecated",
+ "libprotobuf",
],
apex_available: [
"com.android.virt",
diff --git a/microdroid/payload/metadata/src/lib.rs b/microdroid/payload/metadata/src/lib.rs
index bfbec60..f00391a 100644
--- a/microdroid/payload/metadata/src/lib.rs
+++ b/microdroid/payload/metadata/src/lib.rs
@@ -24,7 +24,7 @@
use std::io::Write;
pub use microdroid_metadata::metadata::{
- ApexPayload, ApkPayload, Metadata, Metadata_oneof_payload as PayloadMetadata, PayloadConfig,
+ metadata::Payload as PayloadMetadata, ApexPayload, ApkPayload, Metadata, PayloadConfig,
};
/// Reads a metadata from a reader
diff --git a/microdroid_manager/Android.bp b/microdroid_manager/Android.bp
index 495d3bb..fe0cf6a 100644
--- a/microdroid_manager/Android.bp
+++ b/microdroid_manager/Android.bp
@@ -13,7 +13,9 @@
"android.system.virtualizationservice-rust",
"android.system.virtualmachineservice-rust",
"android.system.virtualization.payload-rust",
+ "libandroid_logger",
"libanyhow",
+ "libavflog",
"libapexutil_rust",
"libapkverify",
"libbinder_rs",
@@ -25,12 +27,12 @@
"libglob",
"libhex",
"libitertools",
- "libkernlog",
"libkeystore2_crypto_rust",
"liblibc",
"liblog_rust",
"libmicrodroid_metadata",
"libmicrodroid_payload_config",
+ "libmicrodroid_uids",
"libnix",
"libonce_cell",
"libopenssl",
diff --git a/microdroid_manager/microdroid_manager.rc b/microdroid_manager/microdroid_manager.rc
index e257547..da38564 100644
--- a/microdroid_manager/microdroid_manager.rc
+++ b/microdroid_manager/microdroid_manager.rc
@@ -8,8 +8,8 @@
# TODO(jooyung) remove this when microdroid_manager becomes a daemon
oneshot
# CAP_SYS_BOOT is required to exec kexecload from microdroid_manager
- # CAP_SETCAP is required to allow microdroid_manager to drop capabilities
+ # CAP_SETPCAP is required to allow microdroid_manager to drop capabilities
# before executing the payload
- capabilities AUDIT_CONTROL SYS_ADMIN SYS_BOOT SETPCAP
+ capabilities AUDIT_CONTROL SYS_ADMIN SYS_BOOT SETPCAP SETUID SETGID
user root
socket vm_payload_service stream 0666 system system
diff --git a/microdroid_manager/src/dice.rs b/microdroid_manager/src/dice.rs
index 3a2a1e6..27ec7a5 100644
--- a/microdroid_manager/src/dice.rs
+++ b/microdroid_manager/src/dice.rs
@@ -78,7 +78,7 @@
let mmap_size =
file.read_u64::<NativeEndian>()
.map_err(|error| Error::new(error).context("Reading driver"))? as usize;
- // It's safe to map the driver as the service will only create a single
+ // SAFETY: It's safe to map the driver as the service will only create a single
// mapping per process.
let mmap_addr = unsafe {
let fd = file.as_raw_fd();
@@ -87,10 +87,10 @@
if mmap_addr == MAP_FAILED {
bail!("Failed to mmap {:?}", driver_path);
}
- // The slice is created for the region of memory that was just
+ let mmap_buf =
+ // SAFETY: The slice is created for the region of memory that was just
// successfully mapped into the process address space so it will be
// accessible and not referenced from anywhere else.
- let mmap_buf =
unsafe { slice::from_raw_parts((mmap_addr as *const u8).as_ref().unwrap(), mmap_size) };
let bcc_handover =
bcc_handover_parse(mmap_buf).map_err(|_| anyhow!("Failed to parse Bcc Handover"))?;
@@ -149,9 +149,9 @@
impl Drop for DiceDriver<'_> {
fn drop(&mut self) {
if let &mut Self::Real { mmap_addr, mmap_size, .. } = self {
- // All references to the mapped region have the same lifetime as self. Since self is
- // being dropped, so are all the references to the mapped region meaning its safe to
- // unmap.
+ // SAFETY: All references to the mapped region have the same lifetime as self. Since
+ // self is being dropped, so are all the references to the mapped region meaning it's
+ // safe to unmap.
let ret = unsafe { munmap(mmap_addr, mmap_size) };
if ret != 0 {
log::warn!("Failed to munmap ({})", ret);
@@ -164,27 +164,29 @@
/// https://cs.android.com/android/platform/superproject/+/master:hardware/interfaces/security/rkp/aidl/android/hardware/security/keymint/ProtectedData.aidl
/// {
/// -70002: "Microdroid payload",
-/// ? -71000: tstr // payload_config_path
+/// ? -71000: tstr ; payload_config_path
/// ? -71001: PayloadConfig
/// }
/// PayloadConfig = {
-/// 1: tstr // payload_binary_name
+/// 1: tstr ; payload_binary_name
/// }
-pub fn format_payload_config_descriptor(payload_metadata: &PayloadMetadata) -> Result<Vec<u8>> {
+pub fn format_payload_config_descriptor(payload: &PayloadMetadata) -> Result<Vec<u8>> {
const MICRODROID_PAYLOAD_COMPONENT_NAME: &str = "Microdroid payload";
- let config_descriptor_cbor_value = match payload_metadata {
- PayloadMetadata::config_path(payload_config_path) => cbor!({
+ let config_descriptor_cbor_value = match payload {
+ PayloadMetadata::ConfigPath(payload_config_path) => cbor!({
-70002 => MICRODROID_PAYLOAD_COMPONENT_NAME,
-71000 => payload_config_path
}),
- PayloadMetadata::config(payload_config) => cbor!({
+ PayloadMetadata::Config(payload_config) => cbor!({
-70002 => MICRODROID_PAYLOAD_COMPONENT_NAME,
-71001 => {1 => payload_config.payload_binary_name}
}),
+ _ => bail!("Failed to match the payload against a config type: {:?}", payload),
}
.context("Failed to build a CBOR Value from payload metadata")?;
let mut config_descriptor = Vec::new();
+
ser::into_writer(&config_descriptor_cbor_value, &mut config_descriptor)?;
Ok(config_descriptor)
}
@@ -196,7 +198,7 @@
#[test]
fn payload_metadata_with_path_formats_correctly() -> Result<()> {
- let payload_metadata = PayloadMetadata::config_path("/config_path".to_string());
+ let payload_metadata = PayloadMetadata::ConfigPath("/config_path".to_string());
let config_descriptor = format_payload_config_descriptor(&payload_metadata)?;
static EXPECTED_CONFIG_DESCRIPTOR: &[u8] = &[
0xa2, 0x3a, 0x00, 0x01, 0x11, 0x71, 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72,
@@ -214,7 +216,7 @@
payload_binary_name: "payload_binary".to_string(),
..Default::default()
};
- let payload_metadata = PayloadMetadata::config(payload_config);
+ let payload_metadata = PayloadMetadata::Config(payload_config);
let config_descriptor = format_payload_config_descriptor(&payload_metadata)?;
static EXPECTED_CONFIG_DESCRIPTOR: &[u8] = &[
0xa2, 0x3a, 0x00, 0x01, 0x11, 0x71, 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72,
diff --git a/microdroid_manager/src/ioutil.rs b/microdroid_manager/src/ioutil.rs
index d36e349..772941d 100644
--- a/microdroid_manager/src/ioutil.rs
+++ b/microdroid_manager/src/ioutil.rs
@@ -28,6 +28,10 @@
const SLEEP_DURATION: Duration = Duration::from_millis(5);
/// waits for a file with a timeout and returns it
+///
+/// WARNING: This only guarantees file creation. When there's another thread
+/// writing a file and you're waiting for the file, reading the file should be
+/// synchronized with other mechanism than just waiting for the creation.
pub fn wait_for_file<P: AsRef<Path> + Debug>(path: P, timeout: Duration) -> Result<File> {
debug!("waiting for {:?}...", path);
let begin = Instant::now();
@@ -64,15 +68,21 @@
#[cfg(test)]
mod tests {
use super::*;
+ use std::fs::rename;
use std::io::{Read, Write};
#[test]
fn test_wait_for_file() -> Result<()> {
let test_dir = tempfile::TempDir::new().unwrap();
let test_file = test_dir.path().join("test.txt");
+ let temp_file = test_dir.path().join("test.txt~");
thread::spawn(move || -> io::Result<()> {
+ // Sleep to ensure that `wait_for_file` actually waits
thread::sleep(Duration::from_secs(1));
- File::create(test_file)?.write_all(b"test")
+ // Write to a temp file and then rename it to avoid the race between
+ // write and read.
+ File::create(&temp_file)?.write_all(b"test")?;
+ rename(temp_file, test_file)
});
let test_file = test_dir.path().join("test.txt");
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index 8fa2807..a48d540 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -193,7 +193,7 @@
/// Prepares a socket file descriptor for the vm payload service.
///
-/// # Safety requirement
+/// # Safety
///
/// The caller must ensure that this function is the only place that claims ownership
/// of the file descriptor and it is called only once.
@@ -211,7 +211,11 @@
}
fn try_main() -> Result<()> {
- let _ignored = kernlog::init();
+ android_logger::init_once(
+ android_logger::Config::default()
+ .with_tag("microdroid_manager")
+ .with_min_level(log::Level::Info),
+ );
info!("started.");
// SAFETY: This is the only place we take the ownership of the fd of the vm payload service.
@@ -224,7 +228,7 @@
load_crashkernel_if_supported().context("Failed to load crashkernel")?;
- swap::init_swap().context("Failed to initialise swap")?;
+ swap::init_swap().context("Failed to initialize swap")?;
info!("swap enabled.");
let service = get_vms_rpc_binder()
@@ -263,6 +267,8 @@
if Path::new(ENCRYPTEDSTORE_BACKING_DEVICE).exists() {
let mountpoint = CString::new(ENCRYPTEDSTORE_MOUNTPOINT).unwrap();
+ // SAFETY: `mountpoint` is a valid C string. `syncfs` and `close` are safe for any parameter
+ // values.
let ret = unsafe {
let dirfd = libc::open(
mountpoint.as_ptr(),
@@ -431,8 +437,9 @@
// Restricted APIs are only allowed to be used by platform or test components. Infer this from
// the use of a VM config file since those can only be used by platform and test components.
let allow_restricted_apis = match payload_metadata {
- PayloadMetadata::config_path(_) => true,
- PayloadMetadata::config(_) => false,
+ PayloadMetadata::ConfigPath(_) => true,
+ PayloadMetadata::Config(_) => false,
+ _ => false, // default is false for safety
};
let config = load_config(payload_metadata).context("Failed to load payload metadata")?;
@@ -521,8 +528,6 @@
}
impl Zipfuse {
- const MICRODROID_PAYLOAD_UID: u32 = 0; // TODO(b/264861173) should be non-root
- const MICRODROID_PAYLOAD_GID: u32 = 0; // TODO(b/264861173) should be non-root
fn mount(
&mut self,
noexec: MountForExec,
@@ -535,9 +540,13 @@
if let MountForExec::Disallowed = noexec {
cmd.arg("--noexec");
}
+ // Let root own the files in APK, so we can access them, but set the group to
+ // allow all payloads to have access too.
+ let (uid, gid) = (microdroid_uids::ROOT_UID, microdroid_uids::MICRODROID_PAYLOAD_GID);
+
cmd.args(["-p", &ready_prop, "-o", option]);
- cmd.args(["-u", &Self::MICRODROID_PAYLOAD_UID.to_string()]);
- cmd.args(["-g", &Self::MICRODROID_PAYLOAD_GID.to_string()]);
+ cmd.args(["-u", &uid.to_string()]);
+ cmd.args(["-g", &gid.to_string()]);
cmd.arg(zip_path).arg(mount_dir);
self.ready_properties.push(ready_prop);
cmd.spawn().with_context(|| format!("Failed to run zipfuse for {mount_dir:?}"))
@@ -788,14 +797,14 @@
fn load_config(payload_metadata: PayloadMetadata) -> Result<VmPayloadConfig> {
match payload_metadata {
- PayloadMetadata::config_path(path) => {
+ PayloadMetadata::ConfigPath(path) => {
let path = Path::new(&path);
info!("loading config from {:?}...", path);
let file = ioutil::wait_for_file(path, WAIT_TIMEOUT)
.with_context(|| format!("Failed to read {:?}", path))?;
Ok(serde_json::from_reader(file)?)
}
- PayloadMetadata::config(payload_config) => {
+ PayloadMetadata::Config(payload_config) => {
let task = Task {
type_: TaskType::MicrodroidLauncher,
command: payload_config.payload_binary_name,
@@ -810,6 +819,7 @@
enable_authfs: false,
})
}
+ _ => bail!("Failed to match config against a config type."),
}
}
@@ -842,27 +852,28 @@
fn exec_task(task: &Task, service: &Strong<dyn IVirtualMachineService>) -> Result<i32> {
info!("executing main task {:?}...", task);
let mut command = match task.type_ {
- TaskType::Executable => Command::new(&task.command),
+ TaskType::Executable => {
+ // TODO(b/296393106): Run system payloads as non-root.
+ Command::new(&task.command)
+ }
TaskType::MicrodroidLauncher => {
let mut command = Command::new("/system/bin/microdroid_launcher");
command.arg(find_library_path(&task.command)?);
+ command.uid(microdroid_uids::MICRODROID_PAYLOAD_UID);
+ command.gid(microdroid_uids::MICRODROID_PAYLOAD_GID);
command
}
};
+ // SAFETY: We are not accessing any resource of the parent process. This means we can't make any
+ // log calls inside the closure.
unsafe {
- // SAFETY: we are not accessing any resource of the parent process.
command.pre_exec(|| {
- info!("dropping capabilities before executing payload");
// It is OK to continue with payload execution even if the calls below fail, since
// whether process can use a capability is controlled by the SELinux. Dropping the
// capabilities here is just another defense-in-depth layer.
- if let Err(e) = cap::drop_inheritable_caps() {
- error!("failed to drop inheritable capabilities: {:?}", e);
- }
- if let Err(e) = cap::drop_bounding_set() {
- error!("failed to drop bounding set: {:?}", e);
- }
+ let _ = cap::drop_inheritable_caps();
+ let _ = cap::drop_bounding_set();
Ok(())
});
}
diff --git a/microdroid_manager/src/swap.rs b/microdroid_manager/src/swap.rs
index 2f4d176..c2b20ac 100644
--- a/microdroid_manager/src/swap.rs
+++ b/microdroid_manager/src/swap.rs
@@ -48,7 +48,7 @@
.checked_mul(512)
.ok_or_else(|| anyhow!("sysfs_size too large"))?;
- // safe because we give a constant and known-valid sysconf parameter
+ // SAFETY: We give a constant and known-valid sysconf parameter.
let pagesize = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as u64 };
let mut f = OpenOptions::new().read(false).write(true).open(format!("/dev/{}", dev))?;
@@ -75,7 +75,7 @@
/// Simple "swapon", using libc:: wrapper.
fn swapon(dev: &str) -> Result<()> {
let swapon_arg = std::ffi::CString::new(format!("/dev/{}", dev))?;
- // safe because we give a nul-terminated string and check the result
+ // SAFETY: We give a nul-terminated string and check the result.
let res = unsafe { libc::swapon(swapon_arg.as_ptr(), 0) };
if res != 0 {
return Err(anyhow!("Failed to swapon: {}", Error::last_os_error()));
diff --git a/microdroid_manager/src/vm_payload_service.rs b/microdroid_manager/src/vm_payload_service.rs
index bcddc3a..1e0b574 100644
--- a/microdroid_manager/src/vm_payload_service.rs
+++ b/microdroid_manager/src/vm_payload_service.rs
@@ -18,10 +18,11 @@
use android_system_virtualization_payload::aidl::android::system::virtualization::payload::IVmPayloadService::{
BnVmPayloadService, IVmPayloadService, VM_PAYLOAD_SERVICE_SOCKET_NAME};
use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::IVirtualMachineService::IVirtualMachineService;
-use anyhow::Result;
-use binder::{Interface, BinderFeatures, ExceptionCode, Status, Strong};
+use anyhow::{anyhow, Context, Result};
+use avflog::LogResult;
+use binder::{Interface, BinderFeatures, ExceptionCode, Strong, IntoBinderResult};
use diced_open_dice::{DiceArtifacts, OwnedDiceArtifacts};
-use log::{error, info};
+use log::info;
use rpcbinder::RpcServer;
use std::os::unix::io::OwnedFd;
@@ -39,7 +40,8 @@
fn getVmInstanceSecret(&self, identifier: &[u8], size: i32) -> binder::Result<Vec<u8>> {
if !(0..=32).contains(&size) {
- return Err(Status::new_exception(ExceptionCode::ILLEGAL_ARGUMENT, None));
+ return Err(anyhow!("size {size} not in range (0..=32)"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
}
// Use a fixed salt to scope the derivation to this API. It was randomly generated.
let salt = [
@@ -48,10 +50,10 @@
0xB7, 0xA8, 0x43, 0x92,
];
let mut secret = vec![0; size.try_into().unwrap()];
- derive_sealing_key(&self.dice, &salt, identifier, &mut secret).map_err(|e| {
- error!("Failed to derive VM instance secret: {:?}", e);
- Status::new_service_specific_error(-1, None)
- })?;
+ derive_sealing_key(&self.dice, &salt, identifier, &mut secret)
+ .context("Failed to derive VM instance secret")
+ .with_log()
+ .or_service_specific_exception(-1)?;
Ok(secret)
}
@@ -60,7 +62,7 @@
if let Some(bcc) = self.dice.bcc() {
Ok(bcc.to_vec())
} else {
- Err(Status::new_exception_str(ExceptionCode::ILLEGAL_STATE, Some("bcc is none")))
+ Err(anyhow!("bcc is none")).or_binder_exception(ExceptionCode::ILLEGAL_STATE)
}
}
@@ -91,8 +93,9 @@
if self.allow_restricted_apis {
Ok(())
} else {
- error!("Use of restricted APIs is not allowed");
- Err(Status::new_exception_str(ExceptionCode::SECURITY, Some("Use of restricted APIs")))
+ Err(anyhow!("Use of restricted APIs is not allowed"))
+ .with_log()
+ .or_binder_exception(ExceptionCode::SECURITY)
}
}
}
diff --git a/pvmfw/Android.bp b/pvmfw/Android.bp
index c9909e6..1aa5935 100644
--- a/pvmfw/Android.bp
+++ b/pvmfw/Android.bp
@@ -7,16 +7,12 @@
crate_name: "pvmfw",
defaults: ["vmbase_ffi_defaults"],
srcs: ["src/main.rs"],
- edition: "2021",
- // Require unsafe blocks for inside unsafe functions.
- flags: ["-Dunsafe_op_in_unsafe_fn"],
features: [
"legacy",
],
rustlibs: [
"libaarch64_paging",
"libbssl_ffi_nostd",
- "libbuddy_system_allocator",
"libciborium_nostd",
"libciborium_io_nostd",
"libdiced_open_dice_nostd",
@@ -83,7 +79,6 @@
// partition image. This is just to package the unstripped file into the
// symbols zip file for debugging purpose.
installable: true,
- native_coverage: false,
}
raw_binary {
@@ -135,11 +130,9 @@
rust_library_rlib {
name: "libpvmfw_embedded_key",
- defaults: ["vmbase_ffi_defaults"],
- prefer_rlib: true,
+ defaults: ["vmbase_rlib_defaults"],
srcs: [":pvmfw_embedded_key_rs"],
crate_name: "pvmfw_embedded_key",
- apex_available: ["com.android.virt"],
}
prebuilt_etc {
@@ -193,8 +186,7 @@
rust_library_rlib {
name: "libpvmfw_fdt_template",
- defaults: ["vmbase_ffi_defaults"],
- prefer_rlib: true,
+ defaults: ["vmbase_rlib_defaults"],
srcs: [":pvmfw_fdt_template_rs"],
crate_name: "pvmfw_fdt_template",
}
diff --git a/pvmfw/README.md b/pvmfw/README.md
index 4e93648..698972a 100644
--- a/pvmfw/README.md
+++ b/pvmfw/README.md
@@ -139,6 +139,10 @@
| offset = (SECOND - HEAD) |
| size = (SECOND_END - SECOND) |
+-------------------------------+
+| [Entry 2] | <-- Entry 2 is present since version 1.1
+| offset = (THIRD - HEAD) |
+| size = (THIRD_END - SECOND) |
++-------------------------------+
| ... |
+-------------------------------+
| [Entry n] |
@@ -152,6 +156,10 @@
| {Second blob: DP} |
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ <-- SECOND_END
| (Padding to 8-byte alignment) |
++===============================+ <-- THIRD
+| {Third blob: VM DTBO} |
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ <-- THIRD_END
+| (Padding to 8-byte alignment) |
+===============================+
| ... |
+===============================+ <-- TAIL
@@ -174,10 +182,17 @@
blos it refers to. In version 1.0, it describes two blobs:
- entry 0 must point to a valid BCC Handover (see below)
-- entry 1 may point to a [DTBO] to be applied to the pVM device tree
+- entry 1 may point to a [DTBO] to be applied to the pVM device tree. See
+ [debug policy][debug_policy] for an example.
+
+In version 1.1, new blob is added.
+
+- entry 2 may point to a [DTBO] that describes VM DTBO for device assignment.
+ pvmfw will provision assigned devices with the VM DTBO.
[header]: src/config.rs
[DTBO]: https://android.googlesource.com/platform/external/dtc/+/refs/heads/master/Documentation/dt-object-internal.txt
+[debug_policy]: ../docs/debug/README.md#debug-policy
#### Virtual Platform Boot Certificate Chain Handover
@@ -240,37 +255,6 @@
[Layering]: https://pigweed.googlesource.com/open-dice/+/refs/heads/main/docs/specification.md#layering-details
[Trusty-BCC]: https://android.googlesource.com/trusty/lib/+/1696be0a8f3a7103/lib/hwbcc/common/swbcc.c#554
-#### pVM Device Tree Overlay
-
-Config header can provide a DTBO to be overlaid on top of the baseline device
-tree from crosvm.
-
-The DTBO may contain debug policies. Debug policies MUST NOT be provided for
-locked devices for security reasons.
-
-Here are an example of DTBO.
-
-```
-/ {
- fragment@avf {
- target-path = "/";
-
- __overlay__ {
- avf {
- /* your debug policy here */
- };
- };
- };
-}; /* end of avf */
-```
-
-For specifying DTBO, host bootloader should apply the DTBO to both host
-OS's device tree and config header of `pvmfw`. Both `virtualizationmanager` and
-`pvmfw` will prepare for debugging features.
-
-For details about device tree properties for debug policies, see
-[microdroid's debugging policy guide](../microdroid/README.md#option-1-running-microdroid-on-avf-debug-policy-configured-device).
-
### Platform Requirements
pvmfw is intended to run in a virtualized environment according to the `crosvm`
@@ -433,3 +417,25 @@
kernel][soong-udroid]).
[soong-udroid]: https://cs.android.com/android/platform/superproject/+/master:packages/modules/Virtualization/microdroid/Android.bp;l=427;drc=ca0049be4d84897b8c9956924cfae506773103eb
+
+## Development
+
+For faster iteration, you can build pvmfw, adb-push it to the device, and use
+it directly for a new pVM, without having to flash it to the physical
+partition. To do that, set the system property `hypervisor.pvmfw.path` to point
+to the pvmfw image you pushed as shown below:
+
+```shell
+m pvmfw_img
+adb push out/target/product/generic_arm64/system/etc/pvmfw.img /data/local/tmp/pvmfw.img
+adb root
+adb shell setprop hypervisor.pvmfw.path /data/local/tmp/pvmfw.img
+```
+
+Then run a protected VM, for example:
+
+```shell
+adb shell /apex/com.android.virt/bin/vm run-microdroid --protected
+```
+
+Note: `adb root` is required to set the system property.
diff --git a/pvmfw/avb/Android.bp b/pvmfw/avb/Android.bp
index 5353a21..4efee6a 100644
--- a/pvmfw/avb/Android.bp
+++ b/pvmfw/avb/Android.bp
@@ -2,13 +2,11 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
-rust_defaults {
- name: "libpvmfw_avb_nostd_defaults",
+rust_library_rlib {
+ name: "libpvmfw_avb_nostd",
crate_name: "pvmfw_avb",
srcs: ["src/lib.rs"],
prefer_rlib: true,
- // Require unsafe blocks for inside unsafe functions.
- flags: ["-Dunsafe_op_in_unsafe_fn"],
rustlibs: [
"libavb_bindgen_nostd",
"libtinyvec_nostd",
@@ -16,11 +14,6 @@
whole_static_libs: [
"libavb_baremetal",
],
-}
-
-rust_library_rlib {
- name: "libpvmfw_avb_nostd",
- defaults: ["libpvmfw_avb_nostd_defaults"],
no_stdlibs: true,
stdlibs: [
"libcore.rust_sysroot",
diff --git a/pvmfw/avb/src/descriptor/collection.rs b/pvmfw/avb/src/descriptor/collection.rs
index c6698c0..14c47b1 100644
--- a/pvmfw/avb/src/descriptor/collection.rs
+++ b/pvmfw/avb/src/descriptor/collection.rs
@@ -170,9 +170,9 @@
/// Behavior is undefined if any of the following conditions are violated:
/// * The `descriptor` pointer must be non-null and point to a valid `AvbDescriptor`.
unsafe fn from_descriptor_ptr(descriptor: *const AvbDescriptor) -> utils::Result<Self> {
+ let avb_descriptor =
// SAFETY: It is safe as the raw pointer `descriptor` is non-null and points to
// a valid `AvbDescriptor`.
- let avb_descriptor =
unsafe { get_valid_descriptor(descriptor, avb_descriptor_validate_and_byteswap)? };
let len = usize_checked_add(
size_of::<AvbDescriptor>(),
@@ -189,9 +189,9 @@
Ok(Self::Hash(descriptor))
}
Ok(AvbDescriptorTag::AVB_DESCRIPTOR_TAG_PROPERTY) => {
+ let descriptor =
// SAFETY: It is safe because the caller ensures that `descriptor` is a non-null
// pointer pointing to a valid struct.
- let descriptor =
unsafe { PropertyDescriptor::from_descriptor_ptr(descriptor, data)? };
Ok(Self::Property(descriptor))
}
diff --git a/pvmfw/avb/src/ops.rs b/pvmfw/avb/src/ops.rs
index e7f0ac7..8f7295c 100644
--- a/pvmfw/avb/src/ops.rs
+++ b/pvmfw/avb/src/ops.rs
@@ -320,8 +320,8 @@
pub(crate) fn vbmeta_images(&self) -> Result<&[AvbVBMetaData], AvbSlotVerifyError> {
let data = self.as_ref();
is_not_null(data.vbmeta_images).map_err(|_| AvbSlotVerifyError::Io)?;
- // SAFETY: It is safe as the raw pointer `data.vbmeta_images` is a nonnull pointer.
let vbmeta_images =
+ // SAFETY: It is safe as the raw pointer `data.vbmeta_images` is a nonnull pointer.
unsafe { slice::from_raw_parts(data.vbmeta_images, data.num_vbmeta_images) };
Ok(vbmeta_images)
}
@@ -329,10 +329,10 @@
pub(crate) fn loaded_partitions(&self) -> Result<&[AvbPartitionData], AvbSlotVerifyError> {
let data = self.as_ref();
is_not_null(data.loaded_partitions).map_err(|_| AvbSlotVerifyError::Io)?;
+ let loaded_partitions =
// SAFETY: It is safe as the raw pointer `data.loaded_partitions` is a nonnull pointer and
// is guaranteed by libavb to point to a valid `AvbPartitionData` array as part of the
// `AvbSlotVerifyData` struct.
- let loaded_partitions =
unsafe { slice::from_raw_parts(data.loaded_partitions, data.num_loaded_partitions) };
Ok(loaded_partitions)
}
diff --git a/pvmfw/avb/tests/api_test.rs b/pvmfw/avb/tests/api_test.rs
index aa9ed36..2f45d77 100644
--- a/pvmfw/avb/tests/api_test.rs
+++ b/pvmfw/avb/tests/api_test.rs
@@ -243,10 +243,15 @@
let total_len = kernel.len() as u64;
let footer = extract_avb_footer(&kernel)?;
assert!(footer.vbmeta_offset < total_len);
+ // TODO: use core::mem::offset_of once stable.
+ let footer_addr = ptr::addr_of!(footer) as *const u8;
let vbmeta_offset_addr = ptr::addr_of!(footer.vbmeta_offset) as *const u8;
- // SAFETY: It is safe as both raw pointers `vbmeta_offset_addr` and `footer` are not null.
let vbmeta_offset_start =
- unsafe { vbmeta_offset_addr.offset_from(ptr::addr_of!(footer) as *const u8) };
+ // SAFETY:
+ // - both raw pointers `vbmeta_offset_addr` and `footer_addr` are not null;
+ // - they are both derived from the `footer` object;
+ // - the offset is known from the struct definition to be a small positive number of bytes.
+ unsafe { vbmeta_offset_addr.offset_from(footer_addr) };
let footer_start = kernel.len() - size_of::<AvbFooter>();
let vbmeta_offset_start = footer_start + usize::try_from(vbmeta_offset_start)?;
diff --git a/pvmfw/platform.dts b/pvmfw/platform.dts
index 74439d9..cb8e30d 100644
--- a/pvmfw/platform.dts
+++ b/pvmfw/platform.dts
@@ -225,6 +225,8 @@
0x3000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 5) IRQ_TYPE_LEVEL_HIGH
0x3800 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 6) IRQ_TYPE_LEVEL_HIGH
0x4000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 7) IRQ_TYPE_LEVEL_HIGH
+ 0x4800 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 8) IRQ_TYPE_LEVEL_HIGH
+ 0x5000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 9) IRQ_TYPE_LEVEL_HIGH
>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7
0xf800 0x0 0x0 0x7
@@ -233,6 +235,8 @@
0xf800 0x0 0x0 0x7
0xf800 0x0 0x0 0x7
0xf800 0x0 0x0 0x7
+ 0xf800 0x0 0x0 0x7
+ 0xf800 0x0 0x0 0x7
0xf800 0x0 0x0 0x7>;
};
diff --git a/pvmfw/src/config.rs b/pvmfw/src/config.rs
index 4086af7..d0a6b7f 100644
--- a/pvmfw/src/config.rs
+++ b/pvmfw/src/config.rs
@@ -18,7 +18,9 @@
use core::mem;
use core::ops::Range;
use core::result;
-use vmbase::util::unchecked_align_up;
+use log::{info, warn};
+use static_assertions::const_assert_eq;
+use vmbase::util::RangeExt;
use zerocopy::{FromBytes, LayoutVerified};
/// Configuration data header.
@@ -28,13 +30,11 @@
/// Magic number; must be `Header::MAGIC`.
magic: u32,
/// Version of the header format.
- version: u32,
+ version: Version,
/// Total size of the configuration data.
total_size: u32,
/// Feature flags; currently reserved and must be zero.
flags: u32,
- /// (offset, size) pairs used to locate individual entries appended to the header.
- entries: [HeaderEntry; Entry::COUNT],
}
#[derive(Debug)]
@@ -46,15 +46,13 @@
/// Header doesn't contain the expect magic value.
InvalidMagic,
/// Version of the header isn't supported.
- UnsupportedVersion(u16, u16),
- /// Header sets flags incorrectly or uses reserved flags.
- InvalidFlags(u32),
+ UnsupportedVersion(Version),
/// Header describes configuration data that doesn't fit in the expected buffer.
InvalidSize(usize),
/// Header entry is missing.
MissingEntry(Entry),
- /// Header entry is invalid.
- InvalidEntry(Entry, EntryError),
+ /// Range described by entry does not fit within config data.
+ EntryOutOfBounds(Entry, Range<usize>, Range<usize>),
}
impl fmt::Display for Error {
@@ -63,110 +61,69 @@
Self::BufferTooSmall => write!(f, "Reserved region is smaller than config header"),
Self::HeaderMisaligned => write!(f, "Reserved region is misaligned"),
Self::InvalidMagic => write!(f, "Wrong magic number"),
- Self::UnsupportedVersion(x, y) => write!(f, "Version {x}.{y} not supported"),
- Self::InvalidFlags(v) => write!(f, "Flags value {v:#x} is incorrect or reserved"),
+ Self::UnsupportedVersion(v) => write!(f, "Version {v} not supported"),
Self::InvalidSize(sz) => write!(f, "Total size ({sz:#x}) overflows reserved region"),
Self::MissingEntry(entry) => write!(f, "Mandatory {entry:?} entry is missing"),
- Self::InvalidEntry(entry, e) => write!(f, "Invalid {entry:?} entry: {e}"),
+ Self::EntryOutOfBounds(entry, range, limits) => {
+ write!(
+ f,
+ "Entry {entry:?} out of bounds: {range:#x?} must be within range {limits:#x?}"
+ )
+ }
}
}
}
pub type Result<T> = result::Result<T, Error>;
-#[derive(Debug)]
-pub enum EntryError {
- /// Offset isn't between the fixed minimum value and size of configuration data.
- InvalidOffset(usize),
- /// Size must be zero when offset is and not be when it isn't.
- InvalidSize(usize),
- /// Entry isn't fully within the configuration data structure.
- OutOfBounds { offset: usize, size: usize, limit: usize },
-}
-
-impl fmt::Display for EntryError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- Self::InvalidOffset(offset) => write!(f, "Invalid offset: {offset:#x?}"),
- Self::InvalidSize(sz) => write!(f, "Invalid size: {sz:#x?}"),
- Self::OutOfBounds { offset, size, limit } => {
- let range = Header::PADDED_SIZE..*limit;
- let entry = *offset..(*offset + *size);
- write!(f, "Out of bounds: {entry:#x?} must be within range {range:#x?}")
- }
- }
- }
-}
-
impl Header {
const MAGIC: u32 = u32::from_ne_bytes(*b"pvmf");
- const VERSION_1_0: u32 = Self::version(1, 0);
- const PADDED_SIZE: usize = unchecked_align_up(mem::size_of::<Self>(), mem::size_of::<u64>());
-
- pub const fn version(major: u16, minor: u16) -> u32 {
- ((major as u32) << 16) | (minor as u32)
- }
-
- pub const fn version_tuple(&self) -> (u16, u16) {
- ((self.version >> 16) as u16, self.version as u16)
- }
+ const VERSION_1_0: Version = Version { major: 1, minor: 0 };
+ const VERSION_1_1: Version = Version { major: 1, minor: 1 };
pub fn total_size(&self) -> usize {
self.total_size as usize
}
- pub fn body_size(&self) -> usize {
- self.total_size() - Self::PADDED_SIZE
+ pub fn body_lowest_bound(&self) -> Result<usize> {
+ let entries_offset = mem::size_of::<Self>();
+
+ // Ensure that the entries are properly aligned and do not require padding.
+ const_assert_eq!(mem::align_of::<Header>() % mem::align_of::<HeaderEntry>(), 0);
+ const_assert_eq!(mem::size_of::<Header>() % mem::align_of::<HeaderEntry>(), 0);
+
+ let entries_size = self.entry_count()?.checked_mul(mem::size_of::<HeaderEntry>()).unwrap();
+
+ Ok(entries_offset.checked_add(entries_size).unwrap())
}
- fn get_body_range(&self, entry: Entry) -> Result<Option<Range<usize>>> {
- let e = self.entries[entry as usize];
- let offset = e.offset as usize;
- let size = e.size as usize;
-
- match self._get_body_range(offset, size) {
- Ok(r) => Ok(r),
- Err(EntryError::InvalidSize(0)) => {
- // As our bootloader currently uses this (non-compliant) case, permit it for now.
- log::warn!("Config entry {entry:?} uses non-zero offset with zero size");
- // TODO(b/262181812): Either make this case valid or fix the bootloader.
- Ok(None)
+ pub fn entry_count(&self) -> Result<usize> {
+ let last_entry = match self.version {
+ Self::VERSION_1_0 => Entry::DebugPolicy,
+ Self::VERSION_1_1 => Entry::VmDtbo,
+ v @ Version { major: 1, .. } => {
+ const LATEST: Version = Header::VERSION_1_1;
+ warn!("Parsing unknown config data version {v} as version {LATEST}");
+ return Ok(Entry::COUNT);
}
- Err(e) => Err(Error::InvalidEntry(entry, e)),
- }
- }
+ v => return Err(Error::UnsupportedVersion(v)),
+ };
- fn _get_body_range(
- &self,
- offset: usize,
- size: usize,
- ) -> result::Result<Option<Range<usize>>, EntryError> {
- match (offset, size) {
- (0, 0) => Ok(None),
- (0, size) | (_, size @ 0) => Err(EntryError::InvalidSize(size)),
- _ => {
- let start = offset
- .checked_sub(Header::PADDED_SIZE)
- .ok_or(EntryError::InvalidOffset(offset))?;
- let end = start
- .checked_add(size)
- .filter(|x| *x <= self.body_size())
- .ok_or(EntryError::OutOfBounds { offset, size, limit: self.total_size() })?;
-
- Ok(Some(start..end))
- }
- }
+ Ok(last_entry as usize + 1)
}
}
#[derive(Clone, Copy, Debug)]
pub enum Entry {
- Bcc = 0,
- DebugPolicy = 1,
+ Bcc,
+ DebugPolicy,
+ VmDtbo,
+ #[allow(non_camel_case_types)] // TODO: Use mem::variant_count once stable.
+ _VARIANT_COUNT,
}
impl Entry {
- const COUNT: usize = 2;
+ const COUNT: usize = Self::_VARIANT_COUNT as usize;
}
#[repr(packed)]
@@ -176,59 +133,111 @@
size: u32,
}
+impl HeaderEntry {
+ pub fn as_range(&self) -> Option<Range<usize>> {
+ let size = usize::try_from(self.size).unwrap();
+ if size != 0 {
+ let offset = self.offset.try_into().unwrap();
+ // Allow overflows here for the Range to properly describe the entry (validated later).
+ Some(offset..(offset + size))
+ } else {
+ None
+ }
+ }
+}
+
+#[repr(C, packed)]
+#[derive(Clone, Copy, Debug, Eq, FromBytes, PartialEq)]
+pub struct Version {
+ minor: u16,
+ major: u16,
+}
+
+impl fmt::Display for Version {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ // Copy the fields to local variables to prevent unaligned access.
+ let (major, minor) = (self.major, self.minor);
+ write!(f, "{}.{}", major, minor)
+ }
+}
+
#[derive(Debug)]
pub struct Config<'a> {
body: &'a mut [u8],
- bcc_range: Range<usize>,
- dp_range: Option<Range<usize>>,
+ ranges: [Option<Range<usize>>; Entry::COUNT],
}
impl<'a> Config<'a> {
/// Take ownership of a pvmfw configuration consisting of its header and following entries.
- pub fn new(data: &'a mut [u8]) -> Result<Self> {
- let header = data.get(..Header::PADDED_SIZE).ok_or(Error::BufferTooSmall)?;
+ pub fn new(bytes: &'a mut [u8]) -> Result<Self> {
+ const HEADER_SIZE: usize = mem::size_of::<Header>();
+ if bytes.len() < HEADER_SIZE {
+ return Err(Error::BufferTooSmall);
+ }
- let (header, _) =
- LayoutVerified::<_, Header>::new_from_prefix(header).ok_or(Error::HeaderMisaligned)?;
+ let (header, rest) =
+ LayoutVerified::<_, Header>::new_from_prefix(bytes).ok_or(Error::HeaderMisaligned)?;
let header = header.into_ref();
if header.magic != Header::MAGIC {
return Err(Error::InvalidMagic);
}
- if header.version != Header::VERSION_1_0 {
- let (major, minor) = header.version_tuple();
- return Err(Error::UnsupportedVersion(major, minor));
+ let header_flags = header.flags;
+ if header_flags != 0 {
+ warn!("Ignoring unknown config flags: {header_flags:#x}");
}
- if header.flags != 0 {
- return Err(Error::InvalidFlags(header.flags));
- }
+ info!("pvmfw config version: {}", header.version);
- let bcc_range =
- header.get_body_range(Entry::Bcc)?.ok_or(Error::MissingEntry(Entry::Bcc))?;
- let dp_range = header.get_body_range(Entry::DebugPolicy)?;
+ // Validate that we won't get an invalid alignment in the following due to padding to u64.
+ const_assert_eq!(HEADER_SIZE % mem::size_of::<u64>(), 0);
- let body_size = header.body_size();
+ // Ensure that Header::total_size isn't larger than anticipated by the caller and resize
+ // the &[u8] to catch OOB accesses to entries/blobs.
let total_size = header.total_size();
- let body = data
- .get_mut(Header::PADDED_SIZE..)
- .ok_or(Error::BufferTooSmall)?
- .get_mut(..body_size)
- .ok_or(Error::InvalidSize(total_size))?;
+ let rest = if let Some(rest_size) = total_size.checked_sub(HEADER_SIZE) {
+ rest.get_mut(..rest_size).ok_or(Error::InvalidSize(total_size))?
+ } else {
+ return Err(Error::InvalidSize(total_size));
+ };
- Ok(Self { body, bcc_range, dp_range })
+ let (header_entries, body) =
+ LayoutVerified::<_, [HeaderEntry]>::new_slice_from_prefix(rest, header.entry_count()?)
+ .ok_or(Error::BufferTooSmall)?;
+
+ // Validate that we won't get an invalid alignment in the following due to padding to u64.
+ const_assert_eq!(mem::size_of::<HeaderEntry>() % mem::size_of::<u64>(), 0);
+
+ let limits = header.body_lowest_bound()?..total_size;
+ let ranges = [
+ // TODO: Find a way to do this programmatically even if the trait
+ // `core::marker::Copy` is not implemented for `core::ops::Range<usize>`.
+ Self::validated_body_range(Entry::Bcc, &header_entries, &limits)?,
+ Self::validated_body_range(Entry::DebugPolicy, &header_entries, &limits)?,
+ Self::validated_body_range(Entry::VmDtbo, &header_entries, &limits)?,
+ ];
+
+ Ok(Self { body, ranges })
}
/// Get slice containing the platform BCC.
- pub fn get_entries(&mut self) -> (&mut [u8], Option<&mut [u8]>) {
- let bcc_start = self.bcc_range.start;
- let bcc_end = self.bcc_range.len();
+ pub fn get_entries(&mut self) -> Result<(&mut [u8], Option<&mut [u8]>)> {
+ // This assumes that the blobs are in-order w.r.t. the entries.
+ let bcc_range = self.get_entry_range(Entry::Bcc).ok_or(Error::MissingEntry(Entry::Bcc))?;
+ let dp_range = self.get_entry_range(Entry::DebugPolicy);
+ let vm_dtbo_range = self.get_entry_range(Entry::VmDtbo);
+ // TODO(b/291191157): Provision device assignment with this.
+ if let Some(vm_dtbo_range) = vm_dtbo_range {
+ info!("Found VM DTBO at {:?}", vm_dtbo_range);
+ }
+ let bcc_start = bcc_range.start;
+ let bcc_end = bcc_range.len();
let (_, rest) = self.body.split_at_mut(bcc_start);
let (bcc, rest) = rest.split_at_mut(bcc_end);
- let dp = if let Some(dp_range) = &self.dp_range {
- let dp_start = dp_range.start.checked_sub(self.bcc_range.end).unwrap();
+ let dp = if let Some(dp_range) = dp_range {
+ let dp_start = dp_range.start.checked_sub(bcc_range.end).unwrap();
let dp_end = dp_range.len();
let (_, rest) = rest.split_at_mut(dp_start);
let (dp, _) = rest.split_at_mut(dp_end);
@@ -237,6 +246,31 @@
None
};
- (bcc, dp)
+ Ok((bcc, dp))
+ }
+
+ pub fn get_entry_range(&self, entry: Entry) -> Option<Range<usize>> {
+ self.ranges[entry as usize].clone()
+ }
+
+ fn validated_body_range(
+ entry: Entry,
+ header_entries: &[HeaderEntry],
+ limits: &Range<usize>,
+ ) -> Result<Option<Range<usize>>> {
+ if let Some(header_entry) = header_entries.get(entry as usize) {
+ if let Some(r) = header_entry.as_range() {
+ return if r.start <= r.end && r.is_within(limits) {
+ let start = r.start - limits.start;
+ let end = r.end - limits.start;
+
+ Ok(Some(start..end))
+ } else {
+ Err(Error::EntryOutOfBounds(entry, r, limits.clone()))
+ };
+ }
+ }
+
+ Ok(None)
}
}
diff --git a/pvmfw/src/crypto.rs b/pvmfw/src/crypto.rs
index 3d9c8d1..94714c0 100644
--- a/pvmfw/src/crypto.rs
+++ b/pvmfw/src/crypto.rs
@@ -46,17 +46,14 @@
impl Error {
fn get() -> Option<Self> {
- let mut file = MaybeUninit::uninit();
- let mut line = MaybeUninit::uninit();
- // SAFETY - The function writes to the provided pointers, validated below.
- let packed = unsafe { ERR_get_error_line(file.as_mut_ptr(), line.as_mut_ptr()) };
- // SAFETY - Any possible value returned could be considered a valid *const c_char.
- let file = unsafe { file.assume_init() };
- // SAFETY - Any possible value returned could be considered a valid c_int.
- let line = unsafe { line.assume_init() };
+ let mut file = ptr::null();
+ let mut line = 0;
+ // SAFETY: The function writes to the provided pointers, which are valid because they come
+ // from references. It doesn't retain them after it returns.
+ let packed = unsafe { ERR_get_error_line(&mut file, &mut line) };
let packed = packed.try_into().ok()?;
- // SAFETY - Any non-NULL result is expected to point to a global const C string.
+ // SAFETY: Any non-NULL result is expected to point to a global const C string.
let file = unsafe { as_static_cstr(file) };
Some(Self { packed, file, line })
@@ -67,16 +64,16 @@
}
fn library_name(&self) -> Option<&'static CStr> {
- // SAFETY - Call to a pure function.
+ // SAFETY: Call to a pure function.
let name = unsafe { ERR_lib_error_string(self.packed_value()) };
- // SAFETY - Any non-NULL result is expected to point to a global const C string.
+ // SAFETY: Any non-NULL result is expected to point to a global const C string.
unsafe { as_static_cstr(name) }
}
fn reason(&self) -> Option<&'static CStr> {
- // SAFETY - Call to a pure function.
+ // SAFETY: Call to a pure function.
let reason = unsafe { ERR_reason_error_string(self.packed_value()) };
- // SAFETY - Any non-NULL result is expected to point to a global const C string.
+ // SAFETY: Any non-NULL result is expected to point to a global const C string.
unsafe { as_static_cstr(reason) }
}
}
@@ -111,18 +108,18 @@
impl Aead {
pub fn aes_256_gcm_randnonce() -> Option<&'static Self> {
- // SAFETY - Returned pointer is checked below.
+ // SAFETY: Returned pointer is checked below.
let aead = unsafe { EVP_aead_aes_256_gcm_randnonce() };
if aead.is_null() {
None
} else {
- // SAFETY - We assume that the non-NULL value points to a valid and static EVP_AEAD.
+ // SAFETY: We assume that the non-NULL value points to a valid and static EVP_AEAD.
Some(unsafe { &*(aead as *const _) })
}
}
pub fn max_overhead(&self) -> usize {
- // SAFETY - Function should only read from self.
+ // SAFETY: Function should only read from self.
unsafe { EVP_AEAD_max_overhead(self.as_ref() as *const _) }
}
}
@@ -141,7 +138,7 @@
const DEFAULT_TAG_LENGTH: usize = 0;
let engine = ptr::null_mut(); // Use default implementation.
let mut ctx = MaybeUninit::zeroed();
- // SAFETY - Initialize the EVP_AEAD_CTX with const pointers to the AEAD and key.
+ // SAFETY: Initialize the EVP_AEAD_CTX with const pointers to the AEAD and key.
let result = unsafe {
EVP_AEAD_CTX_init(
ctx.as_mut_ptr(),
@@ -154,7 +151,7 @@
};
if result == 1 {
- // SAFETY - We assume that the non-NULL value points to a valid and static EVP_AEAD.
+ // SAFETY: We assume that the non-NULL value points to a valid and static EVP_AEAD.
Ok(Self(unsafe { ctx.assume_init() }))
} else {
Err(ErrorIterator {})
@@ -162,12 +159,12 @@
}
pub fn aead(&self) -> Option<&'static Aead> {
- // SAFETY - The function should only read from self.
+ // SAFETY: The function should only read from self.
let aead = unsafe { EVP_AEAD_CTX_aead(self.as_ref() as *const _) };
if aead.is_null() {
None
} else {
- // SAFETY - We assume that the non-NULL value points to a valid and static EVP_AEAD.
+ // SAFETY: We assume that the non-NULL value points to a valid and static EVP_AEAD.
Some(unsafe { &*(aead as *const _) })
}
}
@@ -178,7 +175,7 @@
let ad = ptr::null_mut();
let ad_len = 0;
let mut out_len = MaybeUninit::uninit();
- // SAFETY - The function should only read from self and write to out (at most the provided
+ // SAFETY: The function should only read from self and write to out (at most the provided
// number of bytes) and out_len while reading from data (at most the provided number of
// bytes), ignoring any NULL input.
let result = unsafe {
@@ -197,7 +194,7 @@
};
if result == 1 {
- // SAFETY - Any value written to out_len could be a valid usize. The value itself is
+ // SAFETY: Any value written to out_len could be a valid usize. The value itself is
// validated as being a proper slice length by panicking in the following indexing
// otherwise.
let out_len = unsafe { out_len.assume_init() };
@@ -213,7 +210,7 @@
let ad = ptr::null_mut();
let ad_len = 0;
let mut out_len = MaybeUninit::uninit();
- // SAFETY - The function should only read from self and write to out (at most the provided
+ // SAFETY: The function should only read from self and write to out (at most the provided
// number of bytes) while reading from data (at most the provided number of bytes),
// ignoring any NULL input.
let result = unsafe {
@@ -232,7 +229,7 @@
};
if result == 1 {
- // SAFETY - Any value written to out_len could be a valid usize. The value itself is
+ // SAFETY: Any value written to out_len could be a valid usize. The value itself is
// validated as being a proper slice length by panicking in the following indexing
// otherwise.
let out_len = unsafe { out_len.assume_init() };
@@ -272,12 +269,12 @@
pub fn hkdf_sh512<const N: usize>(secret: &[u8], salt: &[u8], info: &[u8]) -> Result<[u8; N]> {
let mut key = [0; N];
- // SAFETY - The function shouldn't access any Rust variable and the returned value is accepted
+ // SAFETY: The function shouldn't access any Rust variable and the returned value is accepted
// as a potentially NULL pointer.
let digest = unsafe { EVP_sha512() };
assert!(!digest.is_null());
- // SAFETY - Only reads from/writes to the provided slices and supports digest was checked not
+ // SAFETY: Only reads from/writes to the provided slices and supports digest was checked not
// be NULL.
let result = unsafe {
HKDF(
@@ -301,6 +298,6 @@
}
pub fn init() {
- // SAFETY - Configures the internal state of the library - may be called multiple times.
+ // SAFETY: Configures the internal state of the library - may be called multiple times.
unsafe { CRYPTO_library_init() }
}
diff --git a/pvmfw/src/dice.rs b/pvmfw/src/dice.rs
index fbab013..9542429 100644
--- a/pvmfw/src/dice.rs
+++ b/pvmfw/src/dice.rs
@@ -18,8 +18,8 @@
use core::mem::size_of;
use core::slice;
use diced_open_dice::{
- bcc_format_config_descriptor, bcc_handover_main_flow, hash, Config, DiceMode, Hash,
- InputValues, HIDDEN_SIZE,
+ bcc_format_config_descriptor, bcc_handover_main_flow, hash, Config, DiceConfigValues, DiceMode,
+ Hash, InputValues, HIDDEN_SIZE,
};
use pvmfw_avb::{DebugLevel, Digest, VerifiedBootData};
use vmbase::cstr;
@@ -63,12 +63,10 @@
next_bcc: &mut [u8],
) -> diced_open_dice::Result<()> {
let mut config_descriptor_buffer = [0; 128];
- let config_descriptor_size = bcc_format_config_descriptor(
- Some(cstr!("vm_entry")),
- None, // component_version
- false, // resettable
- &mut config_descriptor_buffer,
- )?;
+ let config_values =
+ DiceConfigValues { component_name: Some(cstr!("vm_entry")), ..Default::default() };
+ let config_descriptor_size =
+ bcc_format_config_descriptor(&config_values, &mut config_descriptor_buffer)?;
let config = &config_descriptor_buffer[..config_descriptor_size];
let dice_inputs = InputValues::new(
@@ -91,7 +89,7 @@
/// .data, or provided BCC).
#[no_mangle]
unsafe extern "C" fn DiceClearMemory(_ctx: *mut c_void, size: usize, addr: *mut c_void) {
- // SAFETY - We must trust that the slice will be valid arrays/variables on the C code stack.
+ // SAFETY: We must trust that the slice will be valid arrays/variables on the C code stack.
let region = unsafe { slice::from_raw_parts_mut(addr as *mut u8, size) };
flushed_zeroize(region)
}
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 0d2dfda..3efa61e 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -17,15 +17,13 @@
use crate::config;
use crate::crypto;
use crate::fdt;
-use crate::heap;
use crate::memory;
-use crate::rand;
use core::arch::asm;
use core::mem::{drop, size_of};
use core::num::NonZeroUsize;
use core::ops::Range;
use core::slice;
-use hyp::{get_hypervisor, HypervisorCap};
+use hyp::{get_mem_sharer, get_mmio_guard};
use log::debug;
use log::error;
use log::info;
@@ -33,10 +31,10 @@
use log::LevelFilter;
use vmbase::util::RangeExt as _;
use vmbase::{
- console,
+ configure_heap, console,
layout::{self, crosvm},
- logger, main,
- memory::{min_dcache_line_size, MemoryTracker, MEMORY, SIZE_4KB},
+ main,
+ memory::{min_dcache_line_size, MemoryTracker, MEMORY, SIZE_128KB, SIZE_4KB},
power::reboot,
};
use zeroize::Zeroize;
@@ -62,6 +60,7 @@
}
main!(start);
+configure_heap!(SIZE_128KB);
/// Entry point for pVM firmware.
pub fn start(fdt_address: u64, payload_start: u64, payload_size: u64, _arg3: u64) {
@@ -69,9 +68,6 @@
// - can't access non-pvmfw memory (only statically-mapped memory)
// - can't access MMIO (therefore, no logging)
- // SAFETY - This function should and will only be called once, here.
- unsafe { heap::init() };
-
match main_wrapper(fdt_address as usize, payload_start as usize, payload_size as usize) {
Ok((entry, bcc)) => jump_to_payload(fdt_address, entry.try_into().unwrap(), bcc),
Err(_) => reboot(), // TODO(b/220071963) propagate the reason back to the host.
@@ -88,17 +84,16 @@
impl<'a> MemorySlices<'a> {
fn new(fdt: usize, kernel: usize, kernel_size: usize) -> Result<Self, RebootReason> {
- // SAFETY - SIZE_2MB is non-zero.
- const FDT_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(crosvm::FDT_MAX_SIZE) };
+ let fdt_size = NonZeroUsize::new(crosvm::FDT_MAX_SIZE).unwrap();
// TODO - Only map the FDT as read-only, until we modify it right before jump_to_payload()
// e.g. by generating a DTBO for a template DT in main() and, on return, re-map DT as RW,
// overwrite with the template DT and apply the DTBO.
- let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, FDT_SIZE).map_err(|e| {
+ let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, fdt_size).map_err(|e| {
error!("Failed to allocate the FDT range: {e}");
RebootReason::InternalError
})?;
- // SAFETY - The tracker validated the range to be in main memory, mapped, and not overlap.
+ // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
let fdt = unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) };
let fdt = libfdt::Fdt::from_mut_slice(fdt).map_err(|e| {
error!("Failed to spawn the FDT wrapper: {e}");
@@ -115,8 +110,8 @@
RebootReason::InvalidFdt
})?;
- if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
- let granule = get_hypervisor().memory_protection_granule().map_err(|e| {
+ if let Some(mem_sharer) = get_mem_sharer() {
+ let granule = mem_sharer.granule().map_err(|e| {
error!("Failed to get memory protection granule: {e}");
RebootReason::InternalError
})?;
@@ -158,9 +153,9 @@
return Err(RebootReason::InvalidPayload);
};
- // SAFETY - The tracker validated the range to be in main memory, mapped, and not overlap.
- let kernel =
- unsafe { slice::from_raw_parts(kernel_range.start as *const u8, kernel_range.len()) };
+ let kernel = kernel_range.start as *const u8;
+ // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
+ let kernel = unsafe { slice::from_raw_parts(kernel, kernel_range.len()) };
let ramdisk = if let Some(r) = info.initrd_range {
debug!("Located ramdisk at {r:?}");
@@ -169,7 +164,7 @@
RebootReason::InvalidRamdisk
})?;
- // SAFETY - The region was validated by memory to be in main memory, mapped, and
+ // SAFETY: The region was validated by memory to be in main memory, mapped, and
// not overlap.
Some(unsafe { slice::from_raw_parts(r.start as *const u8, r.len()) })
} else {
@@ -195,21 +190,7 @@
// - only perform logging once the logger has been initialized
// - only access non-pvmfw memory once (and while) it has been mapped
- logger::init(LevelFilter::Info).map_err(|_| RebootReason::InternalError)?;
-
- // Use debug!() to avoid printing to the UART if we failed to configure it as only local
- // builds that have tweaked the logger::init() call will actually attempt to log the message.
-
- get_hypervisor().mmio_guard_init().map_err(|e| {
- debug!("{e}");
- RebootReason::InternalError
- })?;
-
- get_hypervisor().mmio_guard_map(console::BASE_ADDRESS).map_err(|e| {
- debug!("Failed to configure the UART: {e}");
- RebootReason::InternalError
- })?;
-
+ log::set_max_level(LevelFilter::Info);
crypto::init();
let page_table = memory::init_page_table().map_err(|e| {
@@ -217,7 +198,7 @@
RebootReason::InternalError
})?;
- // SAFETY - We only get the appended payload from here, once. The region was statically mapped,
+ // SAFETY: We only get the appended payload from here, once. The region was statically mapped,
// then remapped by `init_page_table()`.
let appended_data = unsafe { get_appended_data_slice() };
@@ -226,7 +207,10 @@
RebootReason::InvalidConfig
})?;
- let (bcc_slice, debug_policy) = appended.get_entries();
+ let (bcc_slice, debug_policy) = appended.get_entries().map_err(|e| {
+ error!("Failed to obtained the config entries: {e}");
+ RebootReason::InvalidConfig
+ })?;
// Up to this point, we were using the built-in static (from .rodata) page tables.
MEMORY.lock().replace(MemoryTracker::new(
@@ -238,11 +222,6 @@
let slices = MemorySlices::new(fdt, payload, payload_size)?;
- rand::init().map_err(|e| {
- error!("Failed to initialize rand: {e}");
- RebootReason::InternalError
- })?;
-
// This wrapper allows main() to be blissfully ignorant of platform details.
let next_bcc = crate::main(slices.fdt, slices.kernel, slices.ramdisk, bcc_slice, debug_policy)?;
@@ -256,10 +235,12 @@
})?;
// Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
MEMORY.lock().as_mut().unwrap().unshare_all_memory();
- get_hypervisor().mmio_guard_unmap(console::BASE_ADDRESS).map_err(|e| {
- error!("Failed to unshare the UART: {e}");
- RebootReason::InternalError
- })?;
+ if let Some(mmio_guard) = get_mmio_guard() {
+ mmio_guard.unmap(console::BASE_ADDRESS).map_err(|e| {
+ error!("Failed to unshare the UART: {e}");
+ RebootReason::InternalError
+ })?;
+ }
// Drop MemoryTracker and deactivate page table.
drop(MEMORY.lock().take());
@@ -281,25 +262,25 @@
let scratch = layout::scratch_range();
- assert_ne!(scratch.len(), 0, "scratch memory is empty.");
- assert_eq!(scratch.start % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
- assert_eq!(scratch.end % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
+ assert_ne!(scratch.end - scratch.start, 0, "scratch memory is empty.");
+ assert_eq!(scratch.start.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
+ assert_eq!(scratch.end.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
- assert!(bcc.is_within(&scratch));
+ assert!(bcc.is_within(&(scratch.start.0..scratch.end.0)));
assert_eq!(bcc.start % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
assert_eq!(bcc.end % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
let stack = memory::stack_range();
- assert_ne!(stack.len(), 0, "stack region is empty.");
- assert_eq!(stack.start % ASM_STP_ALIGN, 0, "Misaligned stack region.");
- assert_eq!(stack.end % ASM_STP_ALIGN, 0, "Misaligned stack region.");
+ assert_ne!(stack.end - stack.start, 0, "stack region is empty.");
+ assert_eq!(stack.start.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
+ assert_eq!(stack.end.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
// Zero all memory that could hold secrets and that can't be safely written to from Rust.
// Disable the exception vector, caches and page table and then jump to the payload at the
// given address, passing it the given FDT pointer.
//
- // SAFETY - We're exiting pvmfw by passing the register values we need to a noreturn asm!().
+ // SAFETY: We're exiting pvmfw by passing the register values we need to a noreturn asm!().
unsafe {
asm!(
"cmp {scratch}, {bcc}",
@@ -378,11 +359,11 @@
sctlr_el1_val = in(reg) SCTLR_EL1_VAL,
bcc = in(reg) u64::try_from(bcc.start).unwrap(),
bcc_end = in(reg) u64::try_from(bcc.end).unwrap(),
- cache_line = in(reg) u64::try_from(scratch.start).unwrap(),
- scratch = in(reg) u64::try_from(scratch.start).unwrap(),
- scratch_end = in(reg) u64::try_from(scratch.end).unwrap(),
- stack = in(reg) u64::try_from(stack.start).unwrap(),
- stack_end = in(reg) u64::try_from(stack.end).unwrap(),
+ cache_line = in(reg) u64::try_from(scratch.start.0).unwrap(),
+ scratch = in(reg) u64::try_from(scratch.start.0).unwrap(),
+ scratch_end = in(reg) u64::try_from(scratch.end.0).unwrap(),
+ stack = in(reg) u64::try_from(stack.start.0).unwrap(),
+ stack_end = in(reg) u64::try_from(stack.end.0).unwrap(),
dcache_line_size = in(reg) u64::try_from(min_dcache_line_size()).unwrap(),
in("x0") fdt_address,
in("x30") payload_start,
@@ -399,7 +380,7 @@
let range = memory::appended_payload_range();
// SAFETY: This region is mapped and the linker script prevents it from overlapping with other
// objects.
- unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) }
+ unsafe { slice::from_raw_parts_mut(range.start.0 as *mut u8, range.end - range.start) }
}
enum AppendedConfigType {
@@ -449,10 +430,10 @@
}
}
- fn get_entries(&mut self) -> (&mut [u8], Option<&mut [u8]>) {
+ fn get_entries(&mut self) -> config::Result<(&mut [u8], Option<&mut [u8]>)> {
match self {
Self::Config(ref mut cfg) => cfg.get_entries(),
- Self::LegacyBcc(ref mut bcc) => (bcc, None),
+ Self::LegacyBcc(ref mut bcc) => Ok((bcc, None)),
}
}
}
diff --git a/pvmfw/src/exceptions.rs b/pvmfw/src/exceptions.rs
index c3f8a29..d9f0891 100644
--- a/pvmfw/src/exceptions.rs
+++ b/pvmfw/src/exceptions.rs
@@ -14,125 +14,34 @@
//! Exception handlers.
-use core::fmt;
-use vmbase::console;
-use vmbase::logger;
-use vmbase::memory::{page_4kb_of, MemoryTrackerError, MEMORY};
-use vmbase::read_sysreg;
-use vmbase::{eprintln, power::reboot};
+use vmbase::{
+ eprintln,
+ exceptions::{ArmException, Esr, HandleExceptionError},
+ logger,
+ memory::{handle_permission_fault, handle_translation_fault},
+ power::reboot,
+ read_sysreg,
+};
-const UART_PAGE: usize = page_4kb_of(console::BASE_ADDRESS);
-
-#[derive(Debug)]
-enum HandleExceptionError {
- PageTableUnavailable,
- PageTableNotInitialized,
- InternalError(MemoryTrackerError),
- UnknownException,
-}
-
-impl From<MemoryTrackerError> for HandleExceptionError {
- fn from(other: MemoryTrackerError) -> Self {
- Self::InternalError(other)
- }
-}
-
-impl fmt::Display for HandleExceptionError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- Self::PageTableUnavailable => write!(f, "Page table is not available."),
- Self::PageTableNotInitialized => write!(f, "Page table is not initialized."),
- Self::InternalError(e) => write!(f, "Error while updating page table: {e}"),
- Self::UnknownException => write!(f, "An unknown exception occurred, not handled."),
- }
- }
-}
-
-#[derive(Debug, PartialEq, Copy, Clone)]
-enum Esr {
- DataAbortTranslationFault,
- DataAbortPermissionFault,
- DataAbortSyncExternalAbort,
- Unknown(usize),
-}
-
-impl Esr {
- const EXT_DABT_32BIT: usize = 0x96000010;
- const TRANSL_FAULT_BASE_32BIT: usize = 0x96000004;
- const TRANSL_FAULT_ISS_MASK_32BIT: usize = !0x143;
- const PERM_FAULT_BASE_32BIT: usize = 0x9600004C;
- const PERM_FAULT_ISS_MASK_32BIT: usize = !0x103;
-}
-
-impl From<usize> for Esr {
- fn from(esr: usize) -> Self {
- if esr == Self::EXT_DABT_32BIT {
- Self::DataAbortSyncExternalAbort
- } else if esr & Self::TRANSL_FAULT_ISS_MASK_32BIT == Self::TRANSL_FAULT_BASE_32BIT {
- Self::DataAbortTranslationFault
- } else if esr & Self::PERM_FAULT_ISS_MASK_32BIT == Self::PERM_FAULT_BASE_32BIT {
- Self::DataAbortPermissionFault
- } else {
- Self::Unknown(esr)
- }
- }
-}
-
-impl fmt::Display for Esr {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- Self::DataAbortSyncExternalAbort => write!(f, "Synchronous external abort"),
- Self::DataAbortTranslationFault => write!(f, "Translation fault"),
- Self::DataAbortPermissionFault => write!(f, "Permission fault"),
- Self::Unknown(v) => write!(f, "Unknown exception esr={v:#08x}"),
- }
- }
-}
-
-#[inline]
-fn handle_translation_fault(far: usize) -> Result<(), HandleExceptionError> {
- let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
- let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
- Ok(memory.handle_mmio_fault(far)?)
-}
-
-#[inline]
-fn handle_permission_fault(far: usize) -> Result<(), HandleExceptionError> {
- let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
- let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
- Ok(memory.handle_permission_fault(far)?)
-}
-
-fn handle_exception(esr: Esr, far: usize) -> Result<(), HandleExceptionError> {
+fn handle_exception(exception: &ArmException) -> Result<(), HandleExceptionError> {
// Handle all translation faults on both read and write, and MMIO guard map
// flagged invalid pages or blocks that caused the exception.
// Handle permission faults for DBM flagged entries, and flag them as dirty on write.
- match esr {
- Esr::DataAbortTranslationFault => handle_translation_fault(far),
- Esr::DataAbortPermissionFault => handle_permission_fault(far),
+ match exception.esr {
+ Esr::DataAbortTranslationFault => handle_translation_fault(exception.far),
+ Esr::DataAbortPermissionFault => handle_permission_fault(exception.far),
_ => Err(HandleExceptionError::UnknownException),
}
}
-#[inline]
-fn handling_uart_exception(esr: Esr, far: usize) -> bool {
- esr == Esr::DataAbortSyncExternalAbort && page_4kb_of(far) == UART_PAGE
-}
-
#[no_mangle]
extern "C" fn sync_exception_current(elr: u64, _spsr: u64) {
// Disable logging in exception handler to prevent unsafe writes to UART.
let _guard = logger::suppress();
- let esr: Esr = read_sysreg!("esr_el1").into();
- let far = read_sysreg!("far_el1");
- if let Err(e) = handle_exception(esr, far) {
- // Don't print to the UART if we are handling an exception it could raise.
- if !handling_uart_exception(esr, far) {
- eprintln!("sync_exception_current");
- eprintln!("{e}");
- eprintln!("{esr}, far={far:#08x}, elr={elr:#08x}");
- }
+ let exception = ArmException::from_el1_regs();
+ if let Err(e) = handle_exception(&exception) {
+ exception.print("sync_exception_current", e, elr);
reboot()
}
}
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index efb354c..244b192 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -124,8 +124,24 @@
node.setprop(cstr!("bootargs"), bootargs.to_bytes_with_nul())
}
-/// Check if memory range is ok
-fn validate_memory_range(range: &Range<usize>) -> Result<(), RebootReason> {
+/// Reads and validates the memory range in the DT.
+///
+/// Only one memory range is expected with the crosvm setup for now.
+fn read_and_validate_memory_range(fdt: &Fdt) -> Result<Range<usize>, RebootReason> {
+ let mut memory = fdt.memory().map_err(|e| {
+ error!("Failed to read memory range from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ let range = memory.next().ok_or_else(|| {
+ error!("The /memory node in the DT contains no range.");
+ RebootReason::InvalidFdt
+ })?;
+ if memory.next().is_some() {
+ warn!(
+ "The /memory node in the DT contains more than one memory range, \
+ while only one is expected."
+ );
+ }
let base = range.start;
if base != MEM_START {
error!("Memory base address {:#x} is not {:#x}", base, MEM_START);
@@ -142,7 +158,7 @@
error!("Memory size is 0");
return Err(RebootReason::InvalidFdt);
}
- Ok(())
+ Ok(range)
}
fn patch_memory_range(fdt: &mut Fdt, memory_range: &Range<usize>) -> libfdt::Result<()> {
@@ -193,7 +209,7 @@
impl PciInfo {
const IRQ_MASK_CELLS: usize = 4;
const IRQ_MAP_CELLS: usize = 10;
- const MAX_IRQS: usize = 8;
+ const MAX_IRQS: usize = 10;
}
type PciAddrRange = AddressRange<(u32, u64), u64, u64>;
@@ -232,14 +248,22 @@
let range1 = ranges.next().ok_or(FdtError::NotFound)?;
let irq_masks = node.getprop_cells(cstr!("interrupt-map-mask"))?.ok_or(FdtError::NotFound)?;
- let irq_masks = CellChunkIterator::<{ PciInfo::IRQ_MASK_CELLS }>::new(irq_masks);
- let irq_masks: ArrayVec<[PciIrqMask; PciInfo::MAX_IRQS]> =
- irq_masks.take(PciInfo::MAX_IRQS).collect();
+ let mut chunks = CellChunkIterator::<{ PciInfo::IRQ_MASK_CELLS }>::new(irq_masks);
+ let irq_masks = (&mut chunks).take(PciInfo::MAX_IRQS).collect();
+
+ if chunks.next().is_some() {
+ warn!("Input DT has more than {} PCI entries!", PciInfo::MAX_IRQS);
+ return Err(FdtError::NoSpace);
+ }
let irq_maps = node.getprop_cells(cstr!("interrupt-map"))?.ok_or(FdtError::NotFound)?;
- let irq_maps = CellChunkIterator::<{ PciInfo::IRQ_MAP_CELLS }>::new(irq_maps);
- let irq_maps: ArrayVec<[PciIrqMap; PciInfo::MAX_IRQS]> =
- irq_maps.take(PciInfo::MAX_IRQS).collect();
+ let mut chunks = CellChunkIterator::<{ PciInfo::IRQ_MAP_CELLS }>::new(irq_maps);
+ let irq_maps = (&mut chunks).take(PciInfo::MAX_IRQS).collect();
+
+ if chunks.next().is_some() {
+ warn!("Input DT has more than {} PCI entries!", PciInfo::MAX_IRQS);
+ return Err(FdtError::NoSpace);
+ }
Ok(PciInfo { ranges: [range0, range1], irq_masks, irq_maps })
}
@@ -543,7 +567,7 @@
*v = v.to_be();
}
- // SAFETY - array size is the same
+ // SAFETY: array size is the same
let value = unsafe {
core::mem::transmute::<
[u32; NUM_INTERRUPTS * CELLS_PER_INTERRUPT],
@@ -600,11 +624,7 @@
RebootReason::InvalidFdt
})?;
- let memory_range = fdt.first_memory_range().map_err(|e| {
- error!("Failed to read memory range from DT: {e}");
- RebootReason::InvalidFdt
- })?;
- validate_memory_range(&memory_range)?;
+ let memory_range = read_and_validate_memory_range(fdt)?;
let bootargs = read_bootargs_from(fdt).map_err(|e| {
error!("Failed to read bootargs from DT: {e}");
@@ -789,7 +809,7 @@
}
};
- // SAFETY - on failure, the corrupted DT is restored using the backup.
+ // SAFETY: on failure, the corrupted DT is restored using the backup.
if let Err(e) = unsafe { fdt.apply_overlay(overlay) } {
warn!("Failed to apply debug policy: {e}. Recovering...");
fdt.copy_from_slice(backup_fdt.as_slice())?;
@@ -801,7 +821,7 @@
}
}
-fn read_common_debug_policy(fdt: &Fdt, debug_feature_name: &CStr) -> libfdt::Result<bool> {
+fn has_common_debug_policy(fdt: &Fdt, debug_feature_name: &CStr) -> libfdt::Result<bool> {
if let Some(node) = fdt.node(cstr!("/avf/guest/common"))? {
if let Some(value) = node.getprop_u32(debug_feature_name)? {
return Ok(value == 1);
@@ -811,8 +831,8 @@
}
fn filter_out_dangerous_bootargs(fdt: &mut Fdt, bootargs: &CStr) -> libfdt::Result<()> {
- let has_crashkernel = read_common_debug_policy(fdt, cstr!("ramdump"))?;
- let has_console = read_common_debug_policy(fdt, cstr!("log"))?;
+ let has_crashkernel = has_common_debug_policy(fdt, cstr!("ramdump"))?;
+ let has_console = has_common_debug_policy(fdt, cstr!("log"))?;
let accepted: &[(&str, Box<dyn Fn(Option<&str>) -> bool>)] = &[
("panic", Box::new(|v| if let Some(v) = v { v == "=-1" } else { false })),
diff --git a/pvmfw/src/gpt.rs b/pvmfw/src/gpt.rs
index b553705..06bf994 100644
--- a/pvmfw/src/gpt.rs
+++ b/pvmfw/src/gpt.rs
@@ -24,9 +24,11 @@
use uuid::Uuid;
use virtio_drivers::device::blk::SECTOR_SIZE;
use vmbase::util::ceiling_div;
-use vmbase::virtio::pci::VirtIOBlk;
+use vmbase::virtio::{pci, HalImpl};
use zerocopy::FromBytes;
+type VirtIOBlk = pci::VirtIOBlk<HalImpl>;
+
pub enum Error {
/// VirtIO error during read operation.
FailedRead(virtio_drivers::Error),
@@ -101,7 +103,7 @@
fn new(mut device: VirtIOBlk) -> Result<Self> {
let mut blk = [0; Self::LBA_SIZE];
- device.read_block(Header::LBA, &mut blk).map_err(Error::FailedRead)?;
+ device.read_blocks(Header::LBA, &mut blk).map_err(Error::FailedRead)?;
let header = Header::read_from_prefix(blk.as_slice()).unwrap();
if !header.is_valid() {
return Err(Error::InvalidHeader);
@@ -128,7 +130,7 @@
for i in Header::ENTRIES_LBA..Header::ENTRIES_LBA.checked_add(num_blocks).unwrap() {
self.read_block(i, &mut blk)?;
let entries = blk.as_ptr().cast::<Entry>();
- // SAFETY - blk is assumed to be properly aligned for Entry and its size is assert-ed
+ // SAFETY: blk is assumed to be properly aligned for Entry and its size is assert-ed
// above. All potential values of the slice will produce valid Entry values.
let entries = unsafe { slice::from_raw_parts(entries, min(rem, entries_per_blk)) };
for entry in entries {
@@ -143,11 +145,11 @@
}
fn read_block(&mut self, index: usize, blk: &mut [u8]) -> Result<()> {
- self.device.read_block(index, blk).map_err(Error::FailedRead)
+ self.device.read_blocks(index, blk).map_err(Error::FailedRead)
}
fn write_block(&mut self, index: usize, blk: &[u8]) -> Result<()> {
- self.device.write_block(index, blk).map_err(Error::FailedWrite)
+ self.device.write_blocks(index, blk).map_err(Error::FailedWrite)
}
}
diff --git a/pvmfw/src/instance.rs b/pvmfw/src/instance.rs
index 56468b2..f2b34da 100644
--- a/pvmfw/src/instance.rs
+++ b/pvmfw/src/instance.rs
@@ -21,7 +21,6 @@
use crate::gpt;
use crate::gpt::Partition;
use crate::gpt::Partitions;
-use crate::rand;
use core::fmt;
use core::mem::size_of;
use diced_open_dice::DiceMode;
@@ -30,8 +29,10 @@
use log::trace;
use uuid::Uuid;
use virtio_drivers::transport::{pci::bus::PciRoot, DeviceType, Transport};
+use vmbase::rand;
use vmbase::util::ceiling_div;
use vmbase::virtio::pci::{PciTransportIterator, VirtIOBlk};
+use vmbase::virtio::HalImpl;
use zerocopy::AsBytes;
use zerocopy::FromBytes;
@@ -183,10 +184,11 @@
}
fn find_instance_img(pci_root: &mut PciRoot) -> Result<Partition> {
- for transport in
- PciTransportIterator::new(pci_root).filter(|t| DeviceType::Block == t.device_type())
+ for transport in PciTransportIterator::<HalImpl>::new(pci_root)
+ .filter(|t| DeviceType::Block == t.device_type())
{
- let device = VirtIOBlk::new(transport).map_err(Error::VirtIOBlkCreationFailed)?;
+ let device =
+ VirtIOBlk::<HalImpl>::new(transport).map_err(Error::VirtIOBlkCreationFailed)?;
match Partition::get_by_name(device, "vm-instance") {
Ok(Some(p)) => return Ok(p),
Ok(None) => {}
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index c826cd8..ba453e7 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -28,12 +28,9 @@
mod exceptions;
mod fdt;
mod gpt;
-mod heap;
mod helpers;
-mod hvc;
mod instance;
mod memory;
-mod rand;
use crate::bcc::Bcc;
use crate::dice::PartialInputs;
@@ -51,6 +48,7 @@
use pvmfw_avb::Capability;
use pvmfw_avb::DebugLevel;
use pvmfw_embedded_key::PUBLIC_KEY;
+use vmbase::heap;
use vmbase::memory::flush;
use vmbase::memory::MEMORY;
use vmbase::virtio::pci;
@@ -97,8 +95,8 @@
// Set up PCI bus for VirtIO devices.
let pci_info = PciInfo::from_fdt(fdt).map_err(handle_pci_error)?;
debug!("PCI: {:#x?}", pci_info);
- let mut pci_root = pci::initialise(pci_info, MEMORY.lock().as_mut().unwrap()).map_err(|e| {
- error!("Failed to initialise PCI: {e}");
+ let mut pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap()).map_err(|e| {
+ error!("Failed to initialize PCI: {e}");
RebootReason::InternalError
})?;
@@ -106,6 +104,11 @@
error!("Failed to verify the payload: {e}");
RebootReason::PayloadVerificationError
})?;
+ let debuggable = verified_boot_data.debug_level != DebugLevel::None;
+ if debuggable {
+ info!("Successfully verified a debuggable payload.");
+ info!("Please disregard any previous libavb ERROR about initrd_normal.");
+ }
if verified_boot_data.capabilities.contains(&Capability::RemoteAttest) {
info!("Service VM capable of remote attestation detected");
@@ -148,7 +151,6 @@
flush(next_bcc);
let strict_boot = true;
- let debuggable = verified_boot_data.debug_level != DebugLevel::None;
modify_for_next_stage(fdt, next_bcc, new_instance, strict_boot, debug_policy, debuggable)
.map_err(|e| {
error!("Failed to configure device tree: {e}");
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 11fcd7c..06158dd 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -14,28 +14,28 @@
//! Low-level allocation and tracking of main memory.
-#![deny(unsafe_op_in_unsafe_fn)]
-
use crate::helpers::PVMFW_PAGE_SIZE;
+use aarch64_paging::paging::VirtualAddress;
use aarch64_paging::MapError;
+use core::ops::Range;
use core::result;
use log::error;
use vmbase::{
layout,
- memory::{MemoryRange, PageTable, SIZE_2MB, SIZE_4KB},
+ memory::{PageTable, SIZE_2MB, SIZE_4KB},
util::align_up,
};
/// Returns memory range reserved for the appended payload.
-pub fn appended_payload_range() -> MemoryRange {
- let start = align_up(layout::binary_end(), SIZE_4KB).unwrap();
+pub fn appended_payload_range() -> Range<VirtualAddress> {
+ let start = align_up(layout::binary_end().0, SIZE_4KB).unwrap();
// pvmfw is contained in a 2MiB region so the payload can't be larger than the 2MiB alignment.
let end = align_up(start, SIZE_2MB).unwrap();
- start..end
+ VirtualAddress(start)..VirtualAddress(end)
}
/// Region allocated for the stack.
-pub fn stack_range() -> MemoryRange {
+pub fn stack_range() -> Range<VirtualAddress> {
const STACK_PAGES: usize = 8;
layout::stack_range(STACK_PAGES * PVMFW_PAGE_SIZE)
@@ -46,12 +46,12 @@
// Stack and scratch ranges are explicitly zeroed and flushed before jumping to payload,
// so dirty state management can be omitted.
- page_table.map_data(&layout::scratch_range())?;
- page_table.map_data(&stack_range())?;
- page_table.map_code(&layout::text_range())?;
- page_table.map_rodata(&layout::rodata_range())?;
- page_table.map_data_dbm(&appended_payload_range())?;
- if let Err(e) = page_table.map_device(&layout::console_uart_range()) {
+ page_table.map_data(&layout::scratch_range().into())?;
+ page_table.map_data(&stack_range().into())?;
+ page_table.map_code(&layout::text_range().into())?;
+ page_table.map_rodata(&layout::rodata_range().into())?;
+ page_table.map_data_dbm(&appended_payload_range().into())?;
+ if let Err(e) = page_table.map_device(&layout::console_uart_range().into()) {
error!("Failed to remap the UART as a dynamic page table entry: {e}");
return Err(e);
}
diff --git a/pvmfw/src/rand.rs b/pvmfw/src/rand.rs
deleted file mode 100644
index b45538a..0000000
--- a/pvmfw/src/rand.rs
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2023, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use crate::hvc;
-use core::fmt;
-use core::mem::size_of;
-
-pub enum Error {
- /// Error during SMCCC TRNG call.
- Trng(hvc::trng::Error),
- /// Unsupported SMCCC TRNG version.
- UnsupportedVersion((u16, u16)),
-}
-
-impl From<hvc::trng::Error> for Error {
- fn from(e: hvc::trng::Error) -> Self {
- Self::Trng(e)
- }
-}
-
-pub type Result<T> = core::result::Result<T, Error>;
-
-impl fmt::Display for Error {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- Self::Trng(e) => write!(f, "SMCCC TRNG error: {e}"),
- Self::UnsupportedVersion((x, y)) => {
- write!(f, "Unsupported SMCCC TRNG version v{x}.{y}")
- }
- }
- }
-}
-
-impl fmt::Debug for Error {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "{self}")
- }
-}
-
-/// Configure the source of entropy.
-pub fn init() -> Result<()> {
- match hvc::trng_version()? {
- (1, _) => Ok(()),
- version => Err(Error::UnsupportedVersion(version)),
- }
-}
-
-fn fill_with_entropy(s: &mut [u8]) -> Result<()> {
- const MAX_BYTES_PER_CALL: usize = size_of::<hvc::TrngRng64Entropy>();
-
- let (aligned, remainder) = s.split_at_mut(s.len() - s.len() % MAX_BYTES_PER_CALL);
-
- for chunk in aligned.chunks_exact_mut(MAX_BYTES_PER_CALL) {
- let (r, s, t) = repeat_trng_rnd(chunk.len())?;
-
- let mut words = chunk.chunks_exact_mut(size_of::<u64>());
- words.next().unwrap().clone_from_slice(&t.to_ne_bytes());
- words.next().unwrap().clone_from_slice(&s.to_ne_bytes());
- words.next().unwrap().clone_from_slice(&r.to_ne_bytes());
- }
-
- if !remainder.is_empty() {
- let mut entropy = [0; MAX_BYTES_PER_CALL];
- let (r, s, t) = repeat_trng_rnd(remainder.len())?;
-
- let mut words = entropy.chunks_exact_mut(size_of::<u64>());
- words.next().unwrap().clone_from_slice(&t.to_ne_bytes());
- words.next().unwrap().clone_from_slice(&s.to_ne_bytes());
- words.next().unwrap().clone_from_slice(&r.to_ne_bytes());
-
- remainder.clone_from_slice(&entropy[..remainder.len()]);
- }
-
- Ok(())
-}
-
-fn repeat_trng_rnd(n_bytes: usize) -> hvc::trng::Result<hvc::TrngRng64Entropy> {
- let bits = usize::try_from(u8::BITS).unwrap();
- let n_bits = (n_bytes * bits).try_into().unwrap();
- loop {
- match hvc::trng_rnd64(n_bits) {
- Err(hvc::trng::Error::NoEntropy) => continue,
- res => return res,
- }
- }
-}
-
-pub fn random_array<const N: usize>() -> Result<[u8; N]> {
- let mut arr = [0; N];
- fill_with_entropy(&mut arr)?;
- Ok(arr)
-}
-
-#[no_mangle]
-extern "C" fn CRYPTO_sysrand_for_seed(out: *mut u8, req: usize) {
- CRYPTO_sysrand(out, req)
-}
-
-#[no_mangle]
-extern "C" fn CRYPTO_sysrand(out: *mut u8, req: usize) {
- // SAFETY - We need to assume that out points to valid memory of size req.
- let s = unsafe { core::slice::from_raw_parts_mut(out, req) };
- fill_with_entropy(s).unwrap()
-}
diff --git a/rialto/Android.bp b/rialto/Android.bp
index 59f8ba2..55423ea 100644
--- a/rialto/Android.bp
+++ b/rialto/Android.bp
@@ -6,18 +6,20 @@
name: "librialto",
crate_name: "rialto",
srcs: ["src/main.rs"],
- edition: "2021",
defaults: ["vmbase_ffi_defaults"],
rustlibs: [
"libaarch64_paging",
- "libbuddy_system_allocator",
+ "libciborium_io_nostd",
+ "libciborium_nostd",
"libhyp",
"libfdtpci",
"liblibfdt",
"liblog_rust_nostd",
+ "libservice_vm_comm_nostd",
+ "libtinyvec_nostd",
+ "libvirtio_drivers",
"libvmbase",
],
- apex_available: ["com.android.virt"],
}
cc_binary {
@@ -29,13 +31,11 @@
],
static_libs: [
"librialto",
- "libvmbase_entry",
],
linker_scripts: [
"image.ld",
":vmbase_sections",
],
- apex_available: ["com.android.virt"],
}
raw_binary {
@@ -80,6 +80,7 @@
}
prebuilt_etc {
+ // rialto_bin is a prebuilt target wrapping the signed bare-metal service VM.
name: "rialto_bin",
filename: "rialto.bin",
target: {
@@ -101,10 +102,13 @@
"android.system.virtualizationservice-rust",
"libandroid_logger",
"libanyhow",
+ "libciborium",
"liblibc",
"liblog_rust",
"libnix",
+ "libservice_vm_comm",
"libvmclient",
+ "libvsock",
],
data: [
":rialto_bin",
diff --git a/rialto/AndroidTest.xml b/rialto/AndroidTest.xml
new file mode 100644
index 0000000..43c4c90
--- /dev/null
+++ b/rialto/AndroidTest.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2023 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<configuration description="Config for rialto_test">
+ <!--
+ We need root privilege to bypass selinux because shell cannot create socket.
+ Otherwise, we hit the following errors:
+
+ avc: denied { create } for scontext=u:r:shell:s0 tcontext=u:r:shell:s0
+ tclass=vsock_socket permissive=0
+ -->
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer"/>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="push-file" key="rialto_test" value="/data/local/tmp/rialto_test" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.rust.RustBinaryTest" >
+ <option name="test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="rialto_test" />
+ </test>
+</configuration>
\ No newline at end of file
diff --git a/rialto/src/communication.rs b/rialto/src/communication.rs
new file mode 100644
index 0000000..ee4ecdb
--- /dev/null
+++ b/rialto/src/communication.rs
@@ -0,0 +1,206 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Supports for the communication between rialto and host.
+
+use crate::error::Result;
+use ciborium_io::{Read, Write};
+use core::hint::spin_loop;
+use core::mem;
+use core::result;
+use log::info;
+use service_vm_comm::{Request, Response};
+use tinyvec::ArrayVec;
+use virtio_drivers::{
+ self,
+ device::socket::{
+ SocketError, VirtIOSocket, VsockAddr, VsockConnectionManager, VsockEventType,
+ },
+ transport::Transport,
+ Hal,
+};
+
+const WRITE_BUF_CAPACITY: usize = 512;
+
+pub struct VsockStream<H: Hal, T: Transport> {
+ connection_manager: VsockConnectionManager<H, T>,
+ /// Peer address. The same port is used on rialto and peer for convenience.
+ peer_addr: VsockAddr,
+ write_buf: ArrayVec<[u8; WRITE_BUF_CAPACITY]>,
+}
+
+impl<H: Hal, T: Transport> VsockStream<H, T> {
+ pub fn new(
+ socket_device_driver: VirtIOSocket<H, T>,
+ peer_addr: VsockAddr,
+ ) -> virtio_drivers::Result<Self> {
+ let mut vsock_stream = Self {
+ connection_manager: VsockConnectionManager::new(socket_device_driver),
+ peer_addr,
+ write_buf: ArrayVec::default(),
+ };
+ vsock_stream.connect()?;
+ Ok(vsock_stream)
+ }
+
+ fn connect(&mut self) -> virtio_drivers::Result {
+ self.connection_manager.connect(self.peer_addr, self.peer_addr.port)?;
+ self.wait_for_connect()?;
+ info!("Connected to the peer {:?}", self.peer_addr);
+ Ok(())
+ }
+
+ fn wait_for_connect(&mut self) -> virtio_drivers::Result {
+ loop {
+ if let Some(event) = self.poll_event_from_peer()? {
+ match event {
+ VsockEventType::Connected => return Ok(()),
+ VsockEventType::Disconnected { .. } => {
+ return Err(SocketError::ConnectionFailed.into())
+ }
+ // We shouldn't receive the following event before the connection is
+ // established.
+ VsockEventType::ConnectionRequest | VsockEventType::Received { .. } => {
+ return Err(SocketError::InvalidOperation.into())
+ }
+ // We can receive credit requests and updates at any time.
+ // This can be ignored as the connection manager handles them in poll().
+ VsockEventType::CreditRequest | VsockEventType::CreditUpdate => {}
+ }
+ } else {
+ spin_loop();
+ }
+ }
+ }
+
+ pub fn read_request(&mut self) -> Result<Request> {
+ Ok(ciborium::from_reader(self)?)
+ }
+
+ pub fn write_response(&mut self, response: &Response) -> Result<()> {
+ Ok(ciborium::into_writer(response, self)?)
+ }
+
+ /// Shuts down the data channel.
+ pub fn shutdown(&mut self) -> virtio_drivers::Result {
+ self.connection_manager.force_close(self.peer_addr, self.peer_addr.port)?;
+ info!("Connection shutdown.");
+ Ok(())
+ }
+
+ fn recv(&mut self, buffer: &mut [u8]) -> virtio_drivers::Result<usize> {
+ self.connection_manager.recv(self.peer_addr, self.peer_addr.port, buffer)
+ }
+
+ fn wait_for_send(&mut self, buffer: &[u8]) -> virtio_drivers::Result {
+ const INSUFFICIENT_BUFFER_SPACE_ERROR: virtio_drivers::Error =
+ virtio_drivers::Error::SocketDeviceError(SocketError::InsufficientBufferSpaceInPeer);
+ loop {
+ match self.connection_manager.send(self.peer_addr, self.peer_addr.port, buffer) {
+ Ok(_) => return Ok(()),
+ Err(INSUFFICIENT_BUFFER_SPACE_ERROR) => {
+ self.poll()?;
+ }
+ Err(e) => return Err(e),
+ }
+ }
+ }
+
+ fn wait_for_recv(&mut self) -> virtio_drivers::Result {
+ loop {
+ match self.poll()? {
+ Some(VsockEventType::Received { .. }) => return Ok(()),
+ _ => spin_loop(),
+ }
+ }
+ }
+
+ /// Polls the rx queue after the connection is established with the peer, this function
+ /// rejects some invalid events. The valid events are handled inside the connection
+ /// manager.
+ fn poll(&mut self) -> virtio_drivers::Result<Option<VsockEventType>> {
+ if let Some(event) = self.poll_event_from_peer()? {
+ match event {
+ VsockEventType::Disconnected { .. } => Err(SocketError::ConnectionFailed.into()),
+ VsockEventType::Connected | VsockEventType::ConnectionRequest => {
+ Err(SocketError::InvalidOperation.into())
+ }
+ // When there is a received event, the received data is buffered in the
+ // connection manager's internal receive buffer, so we don't need to do
+ // anything here.
+ // The credit request and updates also handled inside the connection
+ // manager.
+ VsockEventType::Received { .. }
+ | VsockEventType::CreditRequest
+ | VsockEventType::CreditUpdate => Ok(Some(event)),
+ }
+ } else {
+ Ok(None)
+ }
+ }
+
+ fn poll_event_from_peer(&mut self) -> virtio_drivers::Result<Option<VsockEventType>> {
+ Ok(self.connection_manager.poll()?.map(|event| {
+ assert_eq!(event.source, self.peer_addr);
+ assert_eq!(event.destination.port, self.peer_addr.port);
+ event.event_type
+ }))
+ }
+}
+
+impl<H: Hal, T: Transport> Read for VsockStream<H, T> {
+ type Error = virtio_drivers::Error;
+
+ fn read_exact(&mut self, data: &mut [u8]) -> result::Result<(), Self::Error> {
+ let mut start = 0;
+ while start < data.len() {
+ let len = self.recv(&mut data[start..])?;
+ let len = if len == 0 {
+ self.wait_for_recv()?;
+ self.recv(&mut data[start..])?
+ } else {
+ len
+ };
+ start += len;
+ }
+ Ok(())
+ }
+}
+
+impl<H: Hal, T: Transport> Write for VsockStream<H, T> {
+ type Error = virtio_drivers::Error;
+
+ fn write_all(&mut self, data: &[u8]) -> result::Result<(), Self::Error> {
+ if data.len() >= self.write_buf.capacity() - self.write_buf.len() {
+ self.flush()?;
+ if data.len() >= self.write_buf.capacity() {
+ self.wait_for_send(data)?;
+ return Ok(());
+ }
+ }
+ self.write_buf.extend_from_slice(data);
+ Ok(())
+ }
+
+ fn flush(&mut self) -> result::Result<(), Self::Error> {
+ if !self.write_buf.is_empty() {
+ // We need to take the memory from self.write_buf to a temporary
+ // buffer to avoid borrowing `*self` as mutable and immutable on
+ // the same time in `self.wait_for_send(&self.write_buf)`.
+ let buffer = mem::take(&mut self.write_buf);
+ self.wait_for_send(&buffer)?;
+ }
+ Ok(())
+ }
+}
diff --git a/rialto/src/error.rs b/rialto/src/error.rs
index 8e2991c..23667ed 100644
--- a/rialto/src/error.rs
+++ b/rialto/src/error.rs
@@ -19,24 +19,37 @@
use fdtpci::PciError;
use hyp::Error as HypervisorError;
use libfdt::FdtError;
-use vmbase::memory::MemoryTrackerError;
+use vmbase::{memory::MemoryTrackerError, virtio::pci};
pub type Result<T> = result::Result<T, Error>;
-#[derive(Clone, Debug)]
+type CiboriumSerError = ciborium::ser::Error<virtio_drivers::Error>;
+type CiboriumDeError = ciborium::de::Error<virtio_drivers::Error>;
+
+#[derive(Debug)]
pub enum Error {
/// Hypervisor error.
Hypervisor(HypervisorError),
/// Failed when attempting to map some range in the page table.
PageTableMapping(MapError),
- /// Failed to initialize the logger.
- LoggerInit,
/// Invalid FDT.
InvalidFdt(FdtError),
/// Invalid PCI.
InvalidPci(PciError),
/// Failed memory operation.
MemoryOperationFailed(MemoryTrackerError),
+ /// Failed to initialize PCI.
+ PciInitializationFailed(pci::PciError),
+ /// Failed to create VirtIO Socket device.
+ VirtIOSocketCreationFailed(virtio_drivers::Error),
+ /// Missing socket device.
+ MissingVirtIOSocketDevice,
+ /// Failed VirtIO driver operation.
+ VirtIODriverOperationFailed(virtio_drivers::Error),
+ /// Failed to serialize.
+ SerializationFailed(CiboriumSerError),
+ /// Failed to deserialize.
+ DeserializationFailed(CiboriumDeError),
}
impl fmt::Display for Error {
@@ -46,10 +59,19 @@
Self::PageTableMapping(e) => {
write!(f, "Failed when attempting to map some range in the page table: {e}.")
}
- Self::LoggerInit => write!(f, "Failed to initialize the logger."),
Self::InvalidFdt(e) => write!(f, "Invalid FDT: {e}"),
Self::InvalidPci(e) => write!(f, "Invalid PCI: {e}"),
Self::MemoryOperationFailed(e) => write!(f, "Failed memory operation: {e}"),
+ Self::PciInitializationFailed(e) => write!(f, "Failed to initialize PCI: {e}"),
+ Self::VirtIOSocketCreationFailed(e) => {
+ write!(f, "Failed to create VirtIO Socket device: {e}")
+ }
+ Self::MissingVirtIOSocketDevice => write!(f, "Missing VirtIO Socket device."),
+ Self::VirtIODriverOperationFailed(e) => {
+ write!(f, "Failed VirtIO driver operation: {e}")
+ }
+ Self::SerializationFailed(e) => write!(f, "Failed to serialize: {e}"),
+ Self::DeserializationFailed(e) => write!(f, "Failed to deserialize: {e}"),
}
}
}
@@ -83,3 +105,21 @@
Self::MemoryOperationFailed(e)
}
}
+
+impl From<virtio_drivers::Error> for Error {
+ fn from(e: virtio_drivers::Error) -> Self {
+ Self::VirtIODriverOperationFailed(e)
+ }
+}
+
+impl From<CiboriumSerError> for Error {
+ fn from(e: CiboriumSerError) -> Self {
+ Self::SerializationFailed(e)
+ }
+}
+
+impl From<CiboriumDeError> for Error {
+ fn from(e: CiboriumDeError) -> Self {
+ Self::DeserializationFailed(e)
+ }
+}
diff --git a/rialto/src/exceptions.rs b/rialto/src/exceptions.rs
index 61f7846..b806b08 100644
--- a/rialto/src/exceptions.rs
+++ b/rialto/src/exceptions.rs
@@ -14,14 +14,37 @@
//! Exception handlers.
-use core::arch::asm;
-use vmbase::{console::emergency_write_str, eprintln, power::reboot};
+use vmbase::{
+ console::emergency_write_str,
+ eprintln,
+ exceptions::{ArmException, Esr, HandleExceptionError},
+ logger,
+ memory::{handle_permission_fault, handle_translation_fault},
+ power::reboot,
+ read_sysreg,
+};
+
+fn handle_exception(exception: &ArmException) -> Result<(), HandleExceptionError> {
+ // Handle all translation faults on both read and write, and MMIO guard map
+ // flagged invalid pages or blocks that caused the exception.
+ // Handle permission faults for DBM flagged entries, and flag them as dirty on write.
+ match exception.esr {
+ Esr::DataAbortTranslationFault => handle_translation_fault(exception.far),
+ Esr::DataAbortPermissionFault => handle_permission_fault(exception.far),
+ _ => Err(HandleExceptionError::UnknownException),
+ }
+}
#[no_mangle]
-extern "C" fn sync_exception_current() {
- emergency_write_str("sync_exception_current\n");
- print_esr();
- reboot();
+extern "C" fn sync_exception_current(elr: u64, _spsr: u64) {
+ // Disable logging in exception handler to prevent unsafe writes to UART.
+ let _guard = logger::suppress();
+
+ let exception = ArmException::from_el1_regs();
+ if let Err(e) = handle_exception(&exception) {
+ exception.print("sync_exception_current", e, elr);
+ reboot()
+ }
}
#[no_mangle]
@@ -71,9 +94,6 @@
#[inline]
fn print_esr() {
- let mut esr: u64;
- unsafe {
- asm!("mrs {esr}, esr_el1", esr = out(reg) esr);
- }
+ let esr = read_sysreg!("esr_el1");
eprintln!("esr={:#08x}", esr);
}
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index 29056f1..42d39c4 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -17,71 +17,66 @@
#![no_main]
#![no_std]
+mod communication;
mod error;
mod exceptions;
+mod requests;
extern crate alloc;
+use crate::communication::VsockStream;
use crate::error::{Error, Result};
-use buddy_system_allocator::LockedHeap;
+use ciborium_io::Write;
use core::num::NonZeroUsize;
-use core::result;
use core::slice;
use fdtpci::PciInfo;
-use hyp::{get_hypervisor, HypervisorCap, KvmError};
+use hyp::{get_mem_sharer, get_mmio_guard};
use libfdt::FdtError;
use log::{debug, error, info};
+use virtio_drivers::{
+ device::socket::VsockAddr,
+ transport::{pci::bus::PciRoot, DeviceType, Transport},
+ Hal,
+};
use vmbase::{
+ configure_heap,
fdt::SwiotlbInfo,
layout::{self, crosvm},
main,
- memory::{MemoryTracker, PageTable, MEMORY, PAGE_SIZE},
+ memory::{MemoryTracker, PageTable, MEMORY, PAGE_SIZE, SIZE_128KB},
power::reboot,
+ virtio::{
+ pci::{self, PciTransportIterator, VirtIOSocket},
+ HalImpl,
+ },
};
-const SZ_1K: usize = 1024;
-const SZ_64K: usize = 64 * SZ_1K;
+fn host_addr() -> VsockAddr {
+ const PROTECTED_VM_PORT: u32 = 5679;
+ const NON_PROTECTED_VM_PORT: u32 = 5680;
+ const VMADDR_CID_HOST: u64 = 2;
-#[global_allocator]
-static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::<32>::new();
+ let port = if is_protected_vm() { PROTECTED_VM_PORT } else { NON_PROTECTED_VM_PORT };
+ VsockAddr { cid: VMADDR_CID_HOST, port }
+}
-static mut HEAP: [u8; SZ_64K] = [0; SZ_64K];
-
-fn init_heap() {
- // SAFETY: Allocator set to otherwise unused, static memory.
- unsafe {
- HEAP_ALLOCATOR.lock().init(&mut HEAP as *mut u8 as usize, HEAP.len());
- }
+fn is_protected_vm() -> bool {
+ // Use MMIO support to determine whether the VM is protected.
+ get_mmio_guard().is_some()
}
fn new_page_table() -> Result<PageTable> {
let mut page_table = PageTable::default();
- page_table.map_device(&crosvm::MMIO_RANGE)?;
- page_table.map_data(&layout::scratch_range())?;
- page_table.map_data(&layout::stack_range(40 * PAGE_SIZE))?;
- page_table.map_code(&layout::text_range())?;
- page_table.map_rodata(&layout::rodata_range())?;
- page_table.map_device(&layout::console_uart_range())?;
+ page_table.map_data(&layout::scratch_range().into())?;
+ page_table.map_data(&layout::stack_range(40 * PAGE_SIZE).into())?;
+ page_table.map_code(&layout::text_range().into())?;
+ page_table.map_rodata(&layout::rodata_range().into())?;
+ page_table.map_device(&layout::console_uart_range().into())?;
Ok(page_table)
}
-fn try_init_logger() -> Result<bool> {
- let mmio_guard_supported = match get_hypervisor().mmio_guard_init() {
- // pKVM blocks MMIO by default, we need to enable MMIO guard to support logging.
- Ok(()) => {
- get_hypervisor().mmio_guard_map(vmbase::console::BASE_ADDRESS)?;
- true
- }
- // MMIO guard enroll is not supported in unprotected VM.
- Err(hyp::Error::MmioGuardNotsupported) => false,
- Err(e) => return Err(e.into()),
- };
- vmbase::logger::init(log::LevelFilter::Debug).map_err(|_| Error::LoggerInit)?;
- Ok(mmio_guard_supported)
-}
-
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
@@ -106,8 +101,6 @@
let fdt = unsafe { slice::from_raw_parts(fdt_range.start as *mut u8, fdt_range.len()) };
// We do not need to validate the DT since it is already validated in pvmfw.
let fdt = libfdt::Fdt::from_slice(fdt)?;
- let pci_info = PciInfo::from_fdt(fdt)?;
- debug!("PCI: {pci_info:#x?}");
let memory_range = fdt.first_memory_range()?;
MEMORY.lock().as_mut().unwrap().shrink(&memory_range).map_err(|e| {
@@ -115,14 +108,14 @@
e
})?;
- if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
- let granule = memory_protection_granule()?;
+ if let Some(mem_sharer) = get_mem_sharer() {
+ let granule = mem_sharer.granule()?;
MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
error!("Failed to initialize dynamically shared pool.");
e
})?;
- } else {
- let range = SwiotlbInfo::new_from_fdt(fdt)?.fixed_range().ok_or_else(|| {
+ } else if let Ok(swiotlb_info) = SwiotlbInfo::new_from_fdt(fdt) {
+ let range = swiotlb_info.fixed_range().ok_or_else(|| {
error!("Pre-shared pool range not specified in swiotlb node");
Error::from(FdtError::BadValue)
})?;
@@ -130,54 +123,72 @@
error!("Failed to initialize pre-shared pool.");
e
})?;
+ } else {
+ info!("No MEM_SHARE capability detected or swiotlb found: allocating buffers from heap.");
+ MEMORY.lock().as_mut().unwrap().init_heap_shared_pool().map_err(|e| {
+ error!("Failed to initialize heap-based pseudo-shared pool.");
+ e
+ })?;
}
+
+ let pci_info = PciInfo::from_fdt(fdt)?;
+ debug!("PCI: {pci_info:#x?}");
+ let mut pci_root = pci::initialize(pci_info, MEMORY.lock().as_mut().unwrap())
+ .map_err(Error::PciInitializationFailed)?;
+ debug!("PCI root: {pci_root:#x?}");
+ let socket_device = find_socket_device::<HalImpl>(&mut pci_root)?;
+ debug!("Found socket device: guest cid = {:?}", socket_device.guest_cid());
+
+ let mut vsock_stream = VsockStream::new(socket_device, host_addr())?;
+ let response = requests::process_request(vsock_stream.read_request()?);
+ vsock_stream.write_response(&response)?;
+ vsock_stream.flush()?;
+ vsock_stream.shutdown()?;
+
Ok(())
}
-fn memory_protection_granule() -> result::Result<usize, hyp::Error> {
- match get_hypervisor().memory_protection_granule() {
- Ok(granule) => Ok(granule),
- // Take the default page size when KVM call is not supported in non-protected VMs.
- Err(hyp::Error::KvmError(KvmError::NotSupported, _)) => Ok(PAGE_SIZE),
- Err(e) => Err(e),
- }
+fn find_socket_device<T: Hal>(pci_root: &mut PciRoot) -> Result<VirtIOSocket<T>> {
+ PciTransportIterator::<T>::new(pci_root)
+ .find(|t| DeviceType::Socket == t.device_type())
+ .map(VirtIOSocket::<T>::new)
+ .transpose()
+ .map_err(Error::VirtIOSocketCreationFailed)?
+ .ok_or(Error::MissingVirtIOSocketDevice)
}
-fn try_unshare_all_memory(mmio_guard_supported: bool) -> Result<()> {
+fn try_unshare_all_memory() -> Result<()> {
info!("Starting unsharing memory...");
// No logging after unmapping UART.
- if mmio_guard_supported {
- get_hypervisor().mmio_guard_unmap(vmbase::console::BASE_ADDRESS)?;
+ if let Some(mmio_guard) = get_mmio_guard() {
+ mmio_guard.unmap(vmbase::console::BASE_ADDRESS)?;
}
// Unshares all memory and deactivates page table.
drop(MEMORY.lock().take());
Ok(())
}
-fn unshare_all_memory(mmio_guard_supported: bool) {
- if let Err(e) = try_unshare_all_memory(mmio_guard_supported) {
+fn unshare_all_memory() {
+ if let Err(e) = try_unshare_all_memory() {
error!("Failed to unshare the memory: {e}");
}
}
/// Entry point for Rialto.
pub fn main(fdt_addr: u64, _a1: u64, _a2: u64, _a3: u64) {
- init_heap();
- let Ok(mmio_guard_supported) = try_init_logger() else {
- // Don't log anything if the logger initialization fails.
- reboot();
- };
+ log::set_max_level(log::LevelFilter::Debug);
// SAFETY: `fdt_addr` is supposed to be a valid pointer and points to
// a valid `Fdt`.
match unsafe { try_main(fdt_addr as usize) } {
- Ok(()) => unshare_all_memory(mmio_guard_supported),
+ Ok(()) => unshare_all_memory(),
Err(e) => {
error!("Rialto failed with {e}");
- unshare_all_memory(mmio_guard_supported);
+ unshare_all_memory();
reboot()
}
}
}
main!(main);
+configure_heap!(SIZE_128KB);
diff --git a/rialto/src/requests/api.rs b/rialto/src/requests/api.rs
new file mode 100644
index 0000000..11fdde4
--- /dev/null
+++ b/rialto/src/requests/api.rs
@@ -0,0 +1,31 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module contains the main API for the request processing module.
+
+use alloc::vec::Vec;
+use service_vm_comm::{Request, Response};
+
+/// Processes a request and returns the corresponding response.
+/// This function serves as the entry point for the request processing
+/// module.
+pub fn process_request(request: Request) -> Response {
+ match request {
+ Request::Reverse(v) => Response::Reverse(reverse(v)),
+ }
+}
+
+fn reverse(payload: Vec<u8>) -> Vec<u8> {
+ payload.into_iter().rev().collect()
+}
diff --git a/rialto/src/requests/mod.rs b/rialto/src/requests/mod.rs
new file mode 100644
index 0000000..ca22777
--- /dev/null
+++ b/rialto/src/requests/mod.rs
@@ -0,0 +1,19 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module contains functions for the request processing.
+
+mod api;
+
+pub use api::process_request;
diff --git a/rialto/tests/test.rs b/rialto/tests/test.rs
index 7048b44..2bd8968 100644
--- a/rialto/tests/test.rs
+++ b/rialto/tests/test.rs
@@ -22,15 +22,22 @@
},
binder::{ParcelFileDescriptor, ProcessState},
};
-use anyhow::{anyhow, Context, Error};
+use anyhow::{anyhow, bail, Context, Result};
use log::info;
+use service_vm_comm::{Request, Response};
use std::fs::File;
-use std::io::{self, BufRead, BufReader};
+use std::io::{self, BufRead, BufReader, BufWriter, Write};
use std::os::unix::io::FromRawFd;
use std::panic;
use std::thread;
use std::time::Duration;
use vmclient::{DeathReason, VmInstance};
+use vsock::{VsockListener, VMADDR_CID_HOST};
+
+// TODO(b/291732060): Move the port numbers to the common library shared between the host
+// and rialto.
+const PROTECTED_VM_PORT: u32 = 5679;
+const NON_PROTECTED_VM_PORT: u32 = 5680;
const SIGNED_RIALTO_PATH: &str = "/data/local/tmp/rialto_test/arm64/rialto.bin";
const UNSIGNED_RIALTO_PATH: &str = "/data/local/tmp/rialto_test/arm64/rialto_unsigned.bin";
@@ -38,7 +45,7 @@
const INSTANCE_IMG_SIZE: i64 = 1024 * 1024; // 1MB
#[test]
-fn boot_rialto_in_protected_vm_successfully() -> Result<(), Error> {
+fn boot_rialto_in_protected_vm_successfully() -> Result<()> {
boot_rialto_successfully(
SIGNED_RIALTO_PATH,
true, // protected_vm
@@ -46,14 +53,14 @@
}
#[test]
-fn boot_rialto_in_unprotected_vm_successfully() -> Result<(), Error> {
+fn boot_rialto_in_unprotected_vm_successfully() -> Result<()> {
boot_rialto_successfully(
UNSIGNED_RIALTO_PATH,
false, // protected_vm
)
}
-fn boot_rialto_successfully(rialto_path: &str, protected_vm: bool) -> Result<(), Error> {
+fn boot_rialto_successfully(rialto_path: &str, protected_vm: bool) -> Result<()> {
android_logger::init_once(
android_logger::Config::default().with_tag("rialto").with_min_level(log::Level::Debug),
);
@@ -111,11 +118,21 @@
memoryMib: 300,
cpuTopology: CpuTopology::ONE_CPU,
platformVersion: "~1.0".to_string(),
- taskProfiles: vec![],
gdbPort: 0, // No gdb
+ ..Default::default()
});
- let vm = VmInstance::create(service.as_ref(), &config, Some(console), Some(log), None)
- .context("Failed to create VM")?;
+ let vm = VmInstance::create(
+ service.as_ref(),
+ &config,
+ Some(console),
+ /* consoleIn */ None,
+ Some(log),
+ None,
+ )
+ .context("Failed to create VM")?;
+
+ let port = if protected_vm { PROTECTED_VM_PORT } else { NON_PROTECTED_VM_PORT };
+ let check_socket_handle = thread::spawn(move || try_check_socket_connection(port).unwrap());
vm.start().context("Failed to start VM")?;
@@ -125,6 +142,15 @@
.ok_or_else(|| anyhow!("Timed out waiting for VM exit"))?;
assert_eq!(death_reason, DeathReason::Shutdown);
+ match check_socket_handle.join() {
+ Ok(_) => {
+ info!(
+ "Received the echoed message. \
+ The socket connection between the host and the service VM works correctly."
+ )
+ }
+ Err(_) => bail!("The socket connection check failed."),
+ }
Ok(())
}
@@ -133,6 +159,7 @@
// SAFETY: These are new FDs with no previous owner.
let reader = unsafe { File::from_raw_fd(reader_fd) };
+ // SAFETY: These are new FDs with no previous owner.
let writer = unsafe { File::from_raw_fd(writer_fd) };
thread::spawn(|| {
@@ -142,3 +169,32 @@
});
Ok(writer)
}
+
+fn try_check_socket_connection(port: u32) -> Result<()> {
+ info!("Setting up the listening socket on port {port}...");
+ let listener = VsockListener::bind_with_cid_port(VMADDR_CID_HOST, port)?;
+ info!("Listening on port {port}...");
+
+ let mut vsock_stream =
+ listener.incoming().next().ok_or_else(|| anyhow!("Failed to get vsock_stream"))??;
+ info!("Accepted connection {:?}", vsock_stream);
+ vsock_stream.set_read_timeout(Some(Duration::from_millis(1_000)))?;
+
+ const WRITE_BUFFER_CAPACITY: usize = 512;
+ let mut buffer = BufWriter::with_capacity(WRITE_BUFFER_CAPACITY, vsock_stream.clone());
+
+ // TODO(b/292080257): Test with message longer than the receiver's buffer capacity
+ // 1024 bytes once the guest virtio-vsock driver fixes the credit update in recv().
+ let message = "abc".repeat(166);
+ let request = Request::Reverse(message.as_bytes().to_vec());
+ ciborium::into_writer(&request, &mut buffer)?;
+ buffer.flush()?;
+ info!("Sent request: {request:?}.");
+
+ let response: Response = ciborium::from_reader(&mut vsock_stream)?;
+ info!("Received response: {response:?}.");
+
+ let expected_response: Vec<u8> = message.as_bytes().iter().rev().cloned().collect();
+ assert_eq!(Response::Reverse(expected_response), response);
+ Ok(())
+}
diff --git a/service_vm/client_apk/src/main.rs b/service_vm/client_apk/src/main.rs
index 1f8db96..672dd4a 100644
--- a/service_vm/client_apk/src/main.rs
+++ b/service_vm/client_apk/src/main.rs
@@ -49,12 +49,7 @@
fn request_certificate(csr: &[u8]) -> Vec<u8> {
// SAFETY: It is safe as we only request the size of the certificate in this call.
let certificate_size = unsafe {
- AVmPayload_requestCertificate(
- csr.as_ptr() as *const c_void,
- csr.len(),
- [].as_mut_ptr() as *mut c_void,
- 0,
- )
+ AVmPayload_requestCertificate(csr.as_ptr() as *const c_void, csr.len(), [].as_mut_ptr(), 0)
};
let mut certificate = vec![0u8; certificate_size];
// SAFETY: It is safe as we only write the data into the given buffer within the buffer
diff --git a/tests/aidl/com/android/microdroid/testservice/ITestService.aidl b/tests/aidl/com/android/microdroid/testservice/ITestService.aidl
index a6f1c80..e81f6d7 100644
--- a/tests/aidl/com/android/microdroid/testservice/ITestService.aidl
+++ b/tests/aidl/com/android/microdroid/testservice/ITestService.aidl
@@ -55,6 +55,9 @@
/** Returns a mask of effective capabilities that the process running the payload binary has. */
String[] getEffectiveCapabilities();
+ /* Return the uid of the process running the binary. */
+ int getUid();
+
/* write the content into the specified file. */
void writeToFile(String content, String path);
@@ -70,6 +73,9 @@
/** Requests the VM to asynchronously call appCallback.setVmCallback() */
void requestCallback(IAppCallback appCallback);
+ /** Read a line from /dev/console */
+ String readLineFromConsole();
+
/**
* Request the service to exit, triggering the termination of the VM. This may cause any
* requests in flight to fail.
diff --git a/tests/benchmark/Android.bp b/tests/benchmark/Android.bp
index 9c512bf..90ba575 100644
--- a/tests/benchmark/Android.bp
+++ b/tests/benchmark/Android.bp
@@ -26,6 +26,7 @@
sdk_version: "test_current",
use_embedded_native_libs: true,
compile_multilib: "64",
+ required: ["perf-setup"],
host_required: ["MicrodroidTestPreparer"],
}
diff --git a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
index aed28a8..625f26a 100644
--- a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
+++ b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
@@ -78,6 +78,7 @@
private static final String TAG = "MicrodroidBenchmarks";
private static final String METRIC_NAME_PREFIX = getMetricPrefix() + "microdroid/";
private static final int IO_TEST_TRIAL_COUNT = 5;
+ private static final int TEST_TRIAL_COUNT = 5;
private static final long ONE_MEBI = 1024 * 1024;
@Rule public Timeout globalTimeout = Timeout.seconds(300);
@@ -767,4 +768,35 @@
}
reportMetrics(requestLatencies, "latency/vsock", "us");
}
+
+ @Test
+ public void testVmKillTime() throws Exception {
+ VirtualMachineConfig config =
+ newVmConfigBuilder()
+ .setPayloadConfigPath("assets/vm_config_io.json")
+ .setDebugLevel(DEBUG_LEVEL_NONE)
+ .build();
+ List<Double> vmKillTime = new ArrayList<>(TEST_TRIAL_COUNT);
+
+ for (int i = 0; i < TEST_TRIAL_COUNT; ++i) {
+ VirtualMachine vm = forceCreateNewVirtualMachine("test_vm_kill_time" + i, config);
+ VmEventListener listener =
+ new VmEventListener() {
+ @Override
+ public void onPayloadReady(VirtualMachine vm) {
+ long start = System.nanoTime();
+ try {
+ vm.stop();
+ } catch (Exception e) {
+ Log.e(TAG, "Error in vm.stop():" + e);
+ throw new RuntimeException(e);
+ }
+ vmKillTime.add((double) (System.nanoTime() - start) / NANO_TO_MICRO);
+ super.onPayloadReady(vm);
+ }
+ };
+ listener.runToFinish(TAG, vm);
+ }
+ reportMetrics(vmKillTime, "vm_kill_time", "microsecond");
+ }
}
diff --git a/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java b/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
index 8c6218c..f5656e2 100644
--- a/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
+++ b/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
@@ -60,9 +60,6 @@
// Files that define the "test" instance of CompOS
private static final String COMPOS_TEST_ROOT = "/data/misc/apexdata/com.android.compos/test/";
- private static final String SYSTEM_SERVER_COMPILER_FILTER_PROP_NAME =
- "dalvik.vm.systemservercompilerfilter";
-
private static final String BOOTLOADER_TIME_PROP_NAME = "ro.boot.boottime";
private static final String BOOTLOADER_PREFIX = "bootloader-";
private static final String BOOTLOADER_TIME = "bootloader_time";
@@ -74,7 +71,6 @@
private static final int COMPILE_STAGED_APEX_RETRY_INTERVAL_MS = 10 * 1000;
private static final int COMPILE_STAGED_APEX_TIMEOUT_SEC = 540;
private static final int BOOT_COMPLETE_TIMEOUT_MS = 10 * 60 * 1000;
- private static final double NANOS_IN_SEC = 1_000_000_000.0;
private static final int ROUND_COUNT = 5;
private static final int ROUND_IGNORE_STARTUP_TIME = 3;
private static final String APK_NAME = "MicrodroidTestApp.apk";
@@ -85,12 +81,9 @@
private boolean mNeedTearDown = false;
- private boolean mNeedToRestartPkvmStatus = false;
-
@Before
public void setUp() throws Exception {
mNeedTearDown = false;
- mNeedToRestartPkvmStatus = false;
assumeDeviceIsCapable(getDevice());
mNeedTearDown = true;
@@ -108,11 +101,6 @@
// sees, so we can't rely on that - b/268688303.)
return;
}
- // Restore PKVM status and reboot to prevent previous staged session, if switched.
- if (mNeedToRestartPkvmStatus) {
- setPKVMStatusWithRebootToBootloader(true);
- rebootFromBootloaderAndWaitBootCompleted();
- }
CommandRunner android = new CommandRunner(getDevice());
@@ -121,16 +109,6 @@
}
@Test
- public void testBootEnablePKVM() throws Exception {
- enableDisablePKVMTestHelper(true);
- }
-
- @Test
- public void testBootDisablePKVM() throws Exception {
- enableDisablePKVMTestHelper(false);
- }
-
- @Test
public void testBootWithCompOS() throws Exception {
composTestHelper(true);
}
@@ -262,32 +240,34 @@
.memoryMib(vm_mem_mb)
.cpuTopology("match_host")
.build(device);
- microdroidDevice.waitForBootComplete(30000);
- microdroidDevice.enableAdbRoot();
-
- CommandRunner microdroid = new CommandRunner(microdroidDevice);
-
- microdroid.run("mkdir -p /mnt/ramdisk && chmod 777 /mnt/ramdisk");
- microdroid.run("mount -t tmpfs -o size=32G tmpfs /mnt/ramdisk");
-
- // Allocate memory for the VM until it fails and make sure that we touch
- // the allocated memory in the guest to be able to create stage2 fragmentation.
try {
- microdroid.tryRun(
- String.format(
- "cd /mnt/ramdisk && truncate -s %dM sprayMemory"
- + " && dd if=/dev/zero of=sprayMemory bs=1MB count=%d",
- vm_mem_mb, vm_mem_mb));
- } catch (Exception ex) {
- }
+ microdroidDevice.waitForBootComplete(30000);
+ microdroidDevice.enableAdbRoot();
- // Run the app during the VM run and collect cold startup time.
- for (int i = 0; i < ROUND_COUNT; i++) {
- AmStartupTimeCmdParser duringVmStartApp = getColdRunStartupTimes(android, pkgName);
- metricColector.addStartupTimeMetricDuringVmRun(duringVmStartApp);
- }
+ CommandRunner microdroid = new CommandRunner(microdroidDevice);
- device.shutdownMicrodroid(microdroidDevice);
+ microdroid.run("mkdir -p /mnt/ramdisk && chmod 777 /mnt/ramdisk");
+ microdroid.run("mount -t tmpfs -o size=32G tmpfs /mnt/ramdisk");
+
+ // Allocate memory for the VM until it fails and make sure that we touch
+ // the allocated memory in the guest to be able to create stage2 fragmentation.
+ try {
+ microdroid.tryRun(
+ String.format(
+ "cd /mnt/ramdisk && truncate -s %dM sprayMemory"
+ + " && dd if=/dev/zero of=sprayMemory bs=1MB count=%d",
+ vm_mem_mb, vm_mem_mb));
+ } catch (Exception expected) {
+ }
+
+ // Run the app during the VM run and collect cold startup time.
+ for (int i = 0; i < ROUND_COUNT; i++) {
+ AmStartupTimeCmdParser duringVmStartApp = getColdRunStartupTimes(android, pkgName);
+ metricColector.addStartupTimeMetricDuringVmRun(duringVmStartApp);
+ }
+ } finally {
+ device.shutdownMicrodroid(microdroidDevice);
+ }
// Run the app after the VM run and collect cold startup time.
for (int i = 0; i < ROUND_COUNT; i++) {
@@ -304,12 +284,12 @@
String[] lines = startAppLog.split("[\r\n]+");
mTotalTime = mWaitTime = 0;
- for (int i = 0; i < lines.length; i++) {
- if (lines[i].contains("TotalTime:")) {
- mTotalTime = Integer.parseInt(lines[i].replaceAll("\\D+", ""));
+ for (String line : lines) {
+ if (line.contains("TotalTime:")) {
+ mTotalTime = Integer.parseInt(line.replaceAll("\\D+", ""));
}
- if (lines[i].contains("WaitTime:")) {
- mWaitTime = Integer.parseInt(lines[i].replaceAll("\\D+", ""));
+ if (line.contains("WaitTime:")) {
+ mWaitTime = Integer.parseInt(line.replaceAll("\\D+", ""));
}
}
}
@@ -365,9 +345,9 @@
String content = android.runForResult("cat /proc/meminfo").getStdout().trim();
String[] lines = content.split("[\r\n]+");
- for (int i = 0; i < lines.length; i++) {
- if (lines[i].contains("MemFree:")) {
- freeMemory = Integer.parseInt(lines[i].replaceAll("\\D+", "")) / 1024;
+ for (String line : lines) {
+ if (line.contains("MemFree:")) {
+ freeMemory = Integer.parseInt(line.replaceAll("\\D+", "")) / 1024;
return freeMemory;
}
}
@@ -416,7 +396,7 @@
CommandRunner android = new CommandRunner(getDevice());
String result = android.run("dmesg");
- Pattern pattern = Pattern.compile("\\[(.*)\\].*sys.boot_completed=1.*");
+ Pattern pattern = Pattern.compile("\\[(.*)].*sys.boot_completed=1.*");
for (String line : result.split("[\r\n]+")) {
Matcher matcher = pattern.matcher(line);
if (matcher.find()) {
@@ -426,36 +406,6 @@
throw new IllegalArgumentException("Failed to get boot time info.");
}
- private void enableDisablePKVMTestHelper(boolean isEnable) throws Exception {
- assumePKVMStatusSwitchSupported();
-
- List<Double> bootDmesgTime = new ArrayList<>(ROUND_COUNT);
- Map<String, List<Double>> bootloaderTime = new HashMap<>();
-
- setPKVMStatusWithRebootToBootloader(isEnable);
- rebootFromBootloaderAndWaitBootCompleted();
- for (int round = 0; round < ROUND_COUNT; ++round) {
- getDevice().nonBlockingReboot();
- waitForBootCompleted();
-
- updateBootloaderTimeInfo(bootloaderTime);
-
- double elapsedSec = getDmesgBootTime();
- bootDmesgTime.add(elapsedSec);
- }
-
- String suffix = "";
- if (isEnable) {
- suffix = "enable";
- } else {
- suffix = "disable";
- }
-
- reportMetric(bootDmesgTime, "dmesg_boot_time_with_pkvm_" + suffix, "s");
- reportAggregatedMetrics(bootloaderTime,
- "bootloader_time_with_pkvm_" + suffix, "ms");
- }
-
private void composTestHelper(boolean isWithCompos) throws Exception {
assumeFalse("Skip on CF; too slow", isCuttlefish());
@@ -483,29 +433,6 @@
reportMetric(bootDmesgTime, "dmesg_boot_time_" + suffix, "s");
}
- private void assumePKVMStatusSwitchSupported() throws Exception {
- assumeFalse("Skip on CF; can't reboot to bootloader", isCuttlefish());
-
- // This is an overkill. The intention is to exclude remote_device_proxy, which uses
- // different serial for fastboot. But there's no good way to distinguish from regular IP
- // transport. This is currently not a problem until someone really needs to run the test
- // over regular IP transport.
- assumeFalse("Skip over IP (overkill for remote_device_proxy)", getDevice().isAdbTcp());
-
- if (!getDevice().isStateBootloaderOrFastbootd()) {
- getDevice().rebootIntoBootloader();
- }
- getDevice().waitForDeviceBootloader();
-
- CommandResult result;
- result = getDevice().executeFastbootCommand("oem", "pkvm", "status");
- rebootFromBootloaderAndWaitBootCompleted();
- assumeFalse(result.getStderr().contains("Invalid oem command"));
- // Skip the test if running on a build with pkvm_enabler. Disabling pKVM
- // for such builds results in a bootloop.
- assumeTrue(result.getStderr().contains("misc=auto"));
- }
-
private void reportMetric(List<Double> data, String name, String unit) {
CLog.d("Report metric " + name + "(" + unit + ") : " + data.toString());
Map<String, Double> stats = mMetricsProcessor.computeStats(data, name, unit);
@@ -515,50 +442,6 @@
}
}
- private void reportAggregatedMetrics(Map<String, List<Double>> bootloaderTime,
- String prefix, String unit) {
-
- for (Map.Entry<String, List<Double>> entry : bootloaderTime.entrySet()) {
- reportMetric(entry.getValue(), prefix + "_" + entry.getKey(), unit);
- }
- }
-
- private void setPKVMStatusWithRebootToBootloader(boolean isEnable) throws Exception {
- mNeedToRestartPkvmStatus = true;
-
- if (!getDevice().isStateBootloaderOrFastbootd()) {
- getDevice().rebootIntoBootloader();
- }
- getDevice().waitForDeviceBootloader();
-
- CommandResult result;
- if (isEnable) {
- result = getDevice().executeFastbootCommand("oem", "pkvm", "enable");
- } else {
- result = getDevice().executeFastbootCommand("oem", "pkvm", "disable");
- }
-
- result = getDevice().executeFastbootCommand("oem", "pkvm", "status");
- CLog.i("Gets PKVM status : " + result);
-
- String expectedOutput = "";
-
- if (isEnable) {
- expectedOutput = "pkvm is enabled";
- } else {
- expectedOutput = "pkvm is disabled";
- }
- assertWithMessage("Failed to set PKVM status. Reason: " + result)
- .that(result.toString()).ignoringCase().contains(expectedOutput);
- }
-
- private void rebootFromBootloaderAndWaitBootCompleted() throws Exception {
- getDevice().executeFastbootCommand("reboot");
- getDevice().waitForDeviceOnline(BOOT_COMPLETE_TIMEOUT_MS);
- getDevice().waitForBootComplete(BOOT_COMPLETE_TIMEOUT_MS);
- getDevice().enableAdbRoot();
- }
-
private void waitForBootCompleted() throws Exception {
getDevice().waitForDeviceOnline(BOOT_COMPLETE_TIMEOUT_MS);
getDevice().waitForBootComplete(BOOT_COMPLETE_TIMEOUT_MS);
@@ -568,7 +451,7 @@
private void compileStagedApex(int timeoutSec) throws Exception {
long timeStart = System.currentTimeMillis();
- long timeEnd = timeStart + timeoutSec * 1000;
+ long timeEnd = timeStart + timeoutSec * 1000L;
while (true) {
@@ -599,7 +482,7 @@
private void reInstallApex(int timeoutSec) throws Exception {
long timeStart = System.currentTimeMillis();
- long timeEnd = timeStart + timeoutSec * 1000;
+ long timeEnd = timeStart + timeoutSec * 1000L;
while (true) {
diff --git a/tests/helper/src/java/com/android/microdroid/test/common/DeviceProperties.java b/tests/helper/src/java/com/android/microdroid/test/common/DeviceProperties.java
index 23f8ca6..2ea748b 100644
--- a/tests/helper/src/java/com/android/microdroid/test/common/DeviceProperties.java
+++ b/tests/helper/src/java/com/android/microdroid/test/common/DeviceProperties.java
@@ -28,10 +28,12 @@
private static final String KEY_VENDOR_DEVICE = "ro.product.vendor.device";
private static final String KEY_BUILD_TYPE = "ro.build.type";
+ private static final String KEY_PRODUCT_NAME = "ro.product.name";
private static final String KEY_METRICS_TAG = "debug.hypervisor.metrics_tag";
private static final String CUTTLEFISH_DEVICE_PREFIX = "vsoc_";
private static final String USER_BUILD_TYPE = "user";
+ private static final String HWASAN_SUFFIX = "_hwasan";
private final PropertyGetter mPropertyGetter;
@@ -53,6 +55,14 @@
}
/**
+ * @return whether the build is HWASAN.
+ */
+ public boolean isHwasan() {
+ String productName = getProperty(KEY_PRODUCT_NAME);
+ return productName != null && productName.contains(HWASAN_SUFFIX);
+ }
+
+ /**
* @return whether the device is user build.
*/
public boolean isUserBuild() {
diff --git a/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java b/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
index f58ce81..9f03ab7 100644
--- a/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
+++ b/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
@@ -160,7 +160,7 @@
private StringBuilder mLogOutput = new StringBuilder();
private boolean mProcessedBootTimeMetrics = false;
- private void processBootTimeMetrics(String log) {
+ private synchronized void processBootTimeMetrics(String log) {
if (!mVcpuStartedNanoTime.isPresent()) {
mVcpuStartedNanoTime = OptionalLong.of(System.nanoTime());
}
@@ -177,12 +177,8 @@
}
private void logVmOutputAndMonitorBootTimeMetrics(
- String tag,
- InputStream vmOutputStream,
- String name,
- StringBuilder result,
- boolean monitorEvents) {
- mProcessedBootTimeMetrics |= monitorEvents;
+ String tag, InputStream vmOutputStream, String name, StringBuilder result) {
+ mProcessedBootTimeMetrics = true;
new Thread(
() -> {
try {
@@ -192,7 +188,7 @@
String line;
while ((line = reader.readLine()) != null
&& !Thread.interrupted()) {
- if (monitorEvents) processBootTimeMetrics(line);
+ processBootTimeMetrics(line);
Log.i(tag, name + ": " + line);
result.append(line + "\n");
}
@@ -203,17 +199,6 @@
.start();
}
- private void logVmOutputAndMonitorBootTimeMetrics(
- String tag, InputStream vmOutputStream, String name, StringBuilder result) {
- logVmOutputAndMonitorBootTimeMetrics(tag, vmOutputStream, name, result, true);
- }
-
- /** Copy output from the VM to logcat. This is helpful when things go wrong. */
- protected void logVmOutput(
- String tag, InputStream vmOutputStream, String name, StringBuilder result) {
- logVmOutputAndMonitorBootTimeMetrics(tag, vmOutputStream, name, result, false);
- }
-
public void runToFinish(String logTag, VirtualMachine vm)
throws VirtualMachineException, InterruptedException {
vm.setCallback(mExecutorService, this);
@@ -221,7 +206,7 @@
if (vm.getConfig().isVmOutputCaptured()) {
logVmOutputAndMonitorBootTimeMetrics(
logTag, vm.getConsoleOutput(), "Console", mConsoleOutput);
- logVmOutput(logTag, vm.getLogOutput(), "Log", mLogOutput);
+ logVmOutputAndMonitorBootTimeMetrics(logTag, vm.getLogOutput(), "Log", mLogOutput);
}
mExecutorService.awaitTermination(300, TimeUnit.SECONDS);
}
@@ -466,11 +451,13 @@
public String mApkContentsPath;
public String mEncryptedStoragePath;
public String[] mEffectiveCapabilities;
+ public int mUid;
public String mFileContent;
public byte[] mBcc;
public long[] mTimings;
public int mFileMode;
public int mMountFlags;
+ public String mConsoleInput;
public void assertNoException() {
if (mException != null) {
diff --git a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
index 81ccec7..98327a9 100644
--- a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
+++ b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
@@ -89,6 +89,10 @@
return DeviceProperties.create(getDevice()::getProperty).isCuttlefish();
}
+ protected boolean isHwasan() {
+ return DeviceProperties.create(getDevice()::getProperty).isHwasan();
+ }
+
protected String getMetricPrefix() {
return MetricsProcessor.getMetricPrefix(
DeviceProperties.create(getDevice()::getProperty).getMetricsTag());
diff --git a/tests/hostside/helper/java/com/android/microdroid/test/host/Pvmfw.java b/tests/hostside/helper/java/com/android/microdroid/test/host/Pvmfw.java
index 95eaa58..d752108 100644
--- a/tests/hostside/helper/java/com/android/microdroid/test/host/Pvmfw.java
+++ b/tests/hostside/helper/java/com/android/microdroid/test/host/Pvmfw.java
@@ -33,20 +33,24 @@
private static final int SIZE_8B = 8; // 8 bytes
private static final int SIZE_4K = 4 << 10; // 4 KiB, PAGE_SIZE
private static final int BUFFER_SIZE = 1024;
- private static final int HEADER_SIZE = Integer.BYTES * 8; // Header has 8 integers.
private static final int HEADER_MAGIC = 0x666d7670;
- private static final int HEADER_VERSION = getVersion(1, 0);
+ private static final int HEADER_DEFAULT_VERSION = getVersion(1, 0);
private static final int HEADER_FLAGS = 0;
@NonNull private final File mPvmfwBinFile;
@NonNull private final File mBccFile;
@Nullable private final File mDebugPolicyFile;
+ private final int mVersion;
private Pvmfw(
- @NonNull File pvmfwBinFile, @NonNull File bccFile, @Nullable File debugPolicyFile) {
+ @NonNull File pvmfwBinFile,
+ @NonNull File bccFile,
+ @Nullable File debugPolicyFile,
+ int version) {
mPvmfwBinFile = Objects.requireNonNull(pvmfwBinFile);
mBccFile = Objects.requireNonNull(bccFile);
mDebugPolicyFile = debugPolicyFile;
+ mVersion = version;
}
/**
@@ -56,17 +60,22 @@
public void serialize(@NonNull File outFile) throws IOException {
Objects.requireNonNull(outFile);
- int bccOffset = HEADER_SIZE;
+ int headerSize = alignTo(getHeaderSize(mVersion), SIZE_8B);
+ int bccOffset = headerSize;
int bccSize = (int) mBccFile.length();
int debugPolicyOffset = alignTo(bccOffset + bccSize, SIZE_8B);
int debugPolicySize = mDebugPolicyFile == null ? 0 : (int) mDebugPolicyFile.length();
int totalSize = debugPolicyOffset + debugPolicySize;
+ if (hasVmDtbo(mVersion)) {
+ // Add VM DTBO size as well.
+ totalSize += Integer.BYTES * 2;
+ }
- ByteBuffer header = ByteBuffer.allocate(HEADER_SIZE).order(LITTLE_ENDIAN);
+ ByteBuffer header = ByteBuffer.allocate(headerSize).order(LITTLE_ENDIAN);
header.putInt(HEADER_MAGIC);
- header.putInt(HEADER_VERSION);
+ header.putInt(mVersion);
header.putInt(totalSize);
header.putInt(HEADER_FLAGS);
header.putInt(bccOffset);
@@ -74,11 +83,18 @@
header.putInt(debugPolicyOffset);
header.putInt(debugPolicySize);
+ if (hasVmDtbo(mVersion)) {
+ // Add placeholder entry for VM DTBO.
+ // TODO(b/291191157): Add a real DTBO and test.
+ header.putInt(0);
+ header.putInt(0);
+ }
+
try (FileOutputStream pvmfw = new FileOutputStream(outFile)) {
appendFile(pvmfw, mPvmfwBinFile);
padTo(pvmfw, SIZE_4K);
pvmfw.write(header.array());
- padTo(pvmfw, HEADER_SIZE);
+ padTo(pvmfw, SIZE_8B);
appendFile(pvmfw, mBccFile);
if (mDebugPolicyFile != null) {
padTo(pvmfw, SIZE_8B);
@@ -110,6 +126,19 @@
}
}
+ private static int getHeaderSize(int version) {
+ if (version == getVersion(1, 0)) {
+ return Integer.BYTES * 8; // Header has 8 integers.
+ }
+ return Integer.BYTES * 10; // Default + VM DTBO (offset, size)
+ }
+
+ private static boolean hasVmDtbo(int version) {
+ int major = getMajorVersion(version);
+ int minor = getMinorVersion(version);
+ return major > 1 || (major == 1 && minor >= 1);
+ }
+
private static int alignTo(int x, int size) {
return (x + size - 1) & ~(size - 1);
}
@@ -118,15 +147,25 @@
return ((major & 0xFFFF) << 16) | (minor & 0xFFFF);
}
+ private static int getMajorVersion(int version) {
+ return (version >> 16) & 0xFFFF;
+ }
+
+ private static int getMinorVersion(int version) {
+ return version & 0xFFFF;
+ }
+
/** Builder for {@link Pvmfw}. */
public static final class Builder {
@NonNull private final File mPvmfwBinFile;
@NonNull private final File mBccFile;
@Nullable private File mDebugPolicyFile;
+ private int mVersion;
public Builder(@NonNull File pvmfwBinFile, @NonNull File bccFile) {
mPvmfwBinFile = Objects.requireNonNull(pvmfwBinFile);
mBccFile = Objects.requireNonNull(bccFile);
+ mVersion = HEADER_DEFAULT_VERSION;
}
@NonNull
@@ -136,8 +175,14 @@
}
@NonNull
+ public Builder setVersion(int major, int minor) {
+ mVersion = getVersion(major, minor);
+ return this;
+ }
+
+ @NonNull
public Pvmfw build() {
- return new Pvmfw(mPvmfwBinFile, mBccFile, mDebugPolicyFile);
+ return new Pvmfw(mPvmfwBinFile, mBccFile, mDebugPolicyFile, mVersion);
}
}
}
diff --git a/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java b/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
index 014f9f0..9cf28c7 100644
--- a/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/DebugPolicyHostTests.java
@@ -115,7 +115,7 @@
mAndroidDevice.supportsMicrodroid(/* protectedVm= */ true));
assumeFalse("Test requires setprop for using custom pvmfw and adb root", isUserBuild());
- mAndroidDevice.enableAdbRoot();
+ assumeTrue("Skip if adb root fails", mAndroidDevice.enableAdbRoot());
// tradefed copies the test artfacts under /tmp when running tests,
// so we should *find* the artifacts with the file name.
diff --git a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
index a9c404f..94f0cf1 100644
--- a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
@@ -42,7 +42,6 @@
import com.android.tradefed.device.DeviceNotAvailableException;
import com.android.tradefed.device.ITestDevice;
import com.android.tradefed.device.TestDevice;
-import com.android.tradefed.log.LogUtil.CLog;
import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
import com.android.tradefed.testtype.DeviceJUnit4ClassRunner.TestMetrics;
import com.android.tradefed.util.CommandResult;
@@ -62,12 +61,8 @@
import org.xml.sax.Attributes;
import org.xml.sax.helpers.DefaultHandler;
-import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.util.ArrayList;
@@ -75,7 +70,6 @@
import java.util.Collections;
import java.util.List;
import java.util.Map;
-import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
@@ -95,15 +89,11 @@
private static final int BOOT_COMPLETE_TIMEOUT = 30000; // 30 seconds
- private static final Pattern sCIDPattern = Pattern.compile("with CID (\\d+)");
-
private static class VmInfo {
final Process mProcess;
- final String mCid;
- VmInfo(Process process, String cid) {
+ VmInfo(Process process) {
mProcess = process;
- mCid = cid;
}
}
@@ -376,44 +366,14 @@
PipedInputStream pis = new PipedInputStream();
Process process = RunUtil.getDefault().runCmdInBackground(args, new PipedOutputStream(pis));
- return new VmInfo(process, extractCidFrom(pis));
- }
-
- private static Optional<String> tryExtractCidFrom(String str) {
- Matcher matcher = sCIDPattern.matcher(str);
- if (matcher.find()) {
- return Optional.of(matcher.group(1));
- }
- return Optional.empty();
- }
-
- private static String extractCidFrom(InputStream input) throws IOException {
- String cid = null;
- String line;
- try (BufferedReader out = new BufferedReader(new InputStreamReader(input))) {
- while ((line = out.readLine()) != null) {
- CLog.i("VM output: " + line);
- Optional<String> result = tryExtractCidFrom(line);
- if (result.isPresent()) {
- cid = result.get();
- break;
- }
- }
- }
- assertWithMessage("The output does not contain the expected pattern for CID.")
- .that(cid)
- .isNotNull();
- return cid;
+ return new VmInfo(process);
}
@Test
@CddTest(requirements = {"9.17/C-2-1", "9.17/C-2-2", "9.17/C-2-6"})
public void protectedVmRunsPvmfw() throws Exception {
// Arrange
- boolean protectedVm = true;
- assumeTrue(
- "Skip if protected VMs are not supported",
- getAndroidDevice().supportsMicrodroid(protectedVm));
+ assumeProtectedVmSupported();
final String configPath = "assets/vm_config_apex.json";
// Act
@@ -422,7 +382,7 @@
.debugLevel("full")
.memoryMib(minMemorySize())
.cpuTopology("match_host")
- .protectedVm(protectedVm)
+ .protectedVm(true)
.build(getAndroidDevice());
// Assert
@@ -441,16 +401,16 @@
@CddTest(requirements = {"9.17/C-2-1", "9.17/C-2-2", "9.17/C-2-6"})
public void protectedVmWithImageSignedWithDifferentKeyRunsPvmfw() throws Exception {
// Arrange
- boolean protectedVm = true;
- assumeTrue(
- "Skip if protected VMs are not supported",
- getAndroidDevice().supportsMicrodroid(protectedVm));
+ assumeProtectedVmSupported();
File key = findTestFile("test.com.android.virt.pem");
// Act
VmInfo vmInfo =
runMicrodroidWithResignedImages(
- key, /*keyOverrides=*/ Map.of(), protectedVm, /*updateBootconfigs=*/ true);
+ key,
+ /*keyOverrides=*/ Map.of(),
+ /*isProtected=*/ true,
+ /*updateBootconfigs=*/ true);
// Assert
vmInfo.mProcess.waitFor(5L, TimeUnit.SECONDS);
@@ -465,6 +425,7 @@
@CddTest(requirements = {"9.17/C-2-2", "9.17/C-2-6"})
public void testBootSucceedsWhenNonProtectedVmStartsWithImagesSignedWithDifferentKey()
throws Exception {
+ assumeNonProtectedVmSupported();
File key = findTestFile("test.com.android.virt.pem");
Map<String, File> keyOverrides = Map.of();
VmInfo vmInfo =
@@ -472,7 +433,7 @@
key, keyOverrides, /*isProtected=*/ false, /*updateBootconfigs=*/ true);
assertThatEventually(
100000,
- () -> getDevice().pullFileContents(CONSOLE_PATH),
+ () -> getDevice().pullFileContents(LOG_PATH),
containsString("boot completed, time to run payload"));
vmInfo.mProcess.destroy();
@@ -481,6 +442,8 @@
@Test
@CddTest(requirements = {"9.17/C-2-2", "9.17/C-2-6"})
public void testBootFailsWhenVbMetaDigestDoesNotMatchBootconfig() throws Exception {
+ // protectedVmWithImageSignedWithDifferentKeyRunsPvmfw() is the protected case.
+ assumeNonProtectedVmSupported();
// Sign everything with key1 except vbmeta
File key = findTestFile("test.com.android.virt.pem");
// To be able to stop it, it should be a daemon.
@@ -495,7 +458,7 @@
vmInfo.mProcess.destroy();
}
- private void waitForCrosvmExit(CommandRunner android) throws Exception {
+ private void waitForCrosvmExit(CommandRunner android, String testStartTime) throws Exception {
// TODO: improve crosvm exit check. b/258848245
android.runWithTimeout(
15000,
@@ -503,10 +466,12 @@
"-m",
"1",
"-e",
- "'virtualizationmanager::crosvm.*exited with status exit status:'");
+ "'virtualizationmanager::crosvm.*exited with status exit status:'",
+ "-T",
+ "'" + testStartTime + "'");
}
- private boolean isTombstoneReceivedFromHostLogcat() throws Exception {
+ private boolean isTombstoneReceivedFromHostLogcat(String testStartTime) throws Exception {
// Note this method relies on logcat values being printed by the receiver on host
// userspace crash log: virtualizationservice/src/aidl.rs
// kernel ramdump log: virtualizationmanager/src/crosvm.rs
@@ -525,12 +490,17 @@
"-m",
"1",
"-e",
- ramdumpRegex);
+ ramdumpRegex,
+ "-T",
+ testStartTime);
return !result.trim().isEmpty();
}
private boolean isTombstoneGeneratedWithCmd(
boolean protectedVm, String configPath, String... crashCommand) throws Exception {
+ CommandRunner android = new CommandRunner(getDevice());
+ String testStartTime = android.runWithTimeout(1000, "date", "'+%Y-%m-%d %H:%M:%S.%N'");
+
mMicrodroidDevice =
MicrodroidBuilder.fromDevicePath(getPathForPackage(PACKAGE_NAME), configPath)
.debugLevel("full")
@@ -545,17 +515,32 @@
microdroid.run(crashCommand);
// check until microdroid is shut down
- CommandRunner android = new CommandRunner(getDevice());
- waitForCrosvmExit(android);
+ waitForCrosvmExit(android, testStartTime);
- return isTombstoneReceivedFromHostLogcat();
+ return isTombstoneReceivedFromHostLogcat(testStartTime);
}
@Test
- public void testTombstonesAreGeneratedUponUserspaceCrash() throws Exception {
+ public void testTombstonesAreGeneratedUponUserspaceCrashOnNonPvm() throws Exception {
+ assumeNonProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ testTombstonesAreGeneratedUponUserspaceCrash(false);
+ }
+
+ @Test
+ public void testTombstonesAreGeneratedUponUserspaceCrashOnPvm() throws Exception {
+ assumeProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ testTombstonesAreGeneratedUponUserspaceCrash(true);
+ }
+
+ private void testTombstonesAreGeneratedUponUserspaceCrash(boolean protectedVm)
+ throws Exception {
assertThat(
isTombstoneGeneratedWithCmd(
- false,
+ protectedVm,
"assets/vm_config.json",
"kill",
"-SIGSEGV",
@@ -564,10 +549,28 @@
}
@Test
- public void testTombstonesAreNotGeneratedIfNotExportedUponUserspaceCrash() throws Exception {
+ public void testTombstonesAreNotGeneratedIfNotExportedUponUserspaceCrashOnNonPvm()
+ throws Exception {
+ assumeNonProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ testTombstonesAreNotGeneratedIfNotExportedUponUserspaceCrash(false);
+ }
+
+ @Test
+ public void testTombstonesAreNotGeneratedIfNotExportedUponUserspaceCrashOnPvm()
+ throws Exception {
+ assumeProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ testTombstonesAreNotGeneratedIfNotExportedUponUserspaceCrash(true);
+ }
+
+ private void testTombstonesAreNotGeneratedIfNotExportedUponUserspaceCrash(boolean protectedVm)
+ throws Exception {
assertThat(
isTombstoneGeneratedWithCmd(
- false,
+ protectedVm,
"assets/vm_config_no_tombstone.json",
"kill",
"-SIGSEGV",
@@ -591,21 +594,21 @@
@Test
public void testTombstonesAreGeneratedUponKernelCrashOnNonPvm() throws Exception {
- testTombstonesAreGeneratedUponKernelCrash(false);
+ assumeNonProtectedVmSupported();
+ testTombstonesAreGeneratedUponKernelCrash(/* protectedVm=*/ false);
}
@Test
public void testTombstonesAreGeneratedUponKernelCrashOnPvm() throws Exception {
- assumeTrue(
- "Protected VMs are not supported",
- getAndroidDevice().supportsMicrodroid(/*protectedVm=*/ true));
- testTombstonesAreGeneratedUponKernelCrash(true);
+ assumeProtectedVmSupported();
+ testTombstonesAreGeneratedUponKernelCrash(/* protectedVm=*/ true);
}
- private boolean isTombstoneGeneratedWithVmRunApp(boolean debuggable, String... additionalArgs)
- throws Exception {
+ private boolean isTombstoneGeneratedWithVmRunApp(
+ boolean protectedVm, boolean debuggable, String... additionalArgs) throws Exception {
// we can't use microdroid builder as it wants ADB connection (debuggable)
CommandRunner android = new CommandRunner(getDevice());
+ String testStartTime = android.runWithTimeout(1000, "date", "'+%Y-%m-%d %H:%M:%S.%N'");
android.run("rm", "-rf", TEST_ROOT + "*");
android.run("mkdir", "-p", TEST_ROOT + "*");
@@ -623,44 +626,130 @@
apkPath,
idsigPath,
instanceImgPath));
+ if (protectedVm) {
+ cmd.add("--protected");
+ }
Collections.addAll(cmd, additionalArgs);
android.run(cmd.toArray(new String[0]));
- return isTombstoneReceivedFromHostLogcat();
+ return isTombstoneReceivedFromHostLogcat(testStartTime);
}
- private boolean isTombstoneGeneratedWithCrashPayload(boolean debuggable) throws Exception {
+ private boolean isTombstoneGeneratedWithCrashPayload(boolean protectedVm, boolean debuggable)
+ throws Exception {
return isTombstoneGeneratedWithVmRunApp(
- debuggable, "--payload-binary-name", "MicrodroidCrashNativeLib.so");
+ protectedVm, debuggable, "--payload-binary-name", "MicrodroidCrashNativeLib.so");
}
@Test
- public void testTombstonesAreGeneratedWithCrashPayload() throws Exception {
- assertThat(isTombstoneGeneratedWithCrashPayload(true /* debuggable */)).isTrue();
+ public void testTombstonesAreGeneratedWithCrashPayloadOnPvm() throws Exception {
+ assumeProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ assertThat(
+ isTombstoneGeneratedWithCrashPayload(
+ /*protectedVm=*/ true, /*debuggable=*/ true))
+ .isTrue();
}
@Test
- public void testTombstonesAreNotGeneratedWithCrashPayloadWhenNonDebuggable() throws Exception {
- assertThat(isTombstoneGeneratedWithCrashPayload(false /* debuggable */)).isFalse();
+ public void testTombstonesAreGeneratedWithCrashPayloadOnNonPvm() throws Exception {
+ assumeNonProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ assertThat(
+ isTombstoneGeneratedWithCrashPayload(
+ /*protectedVm=*/ false, /*debuggable=*/ true))
+ .isTrue();
}
- private boolean isTombstoneGeneratedWithCrashConfig(boolean debuggable) throws Exception {
+ @Test
+ public void testTombstonesAreNotGeneratedWithCrashPayloadWhenNonDebuggableOnPvm()
+ throws Exception {
+ assumeProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ assertThat(
+ isTombstoneGeneratedWithCrashPayload(
+ /*protectedVm=*/ true, /*debuggable=*/ false))
+ .isFalse();
+ }
+
+ @Test
+ public void testTombstonesAreNotGeneratedWithCrashPayloadWhenNonDebuggableOnNonPvm()
+ throws Exception {
+ assumeNonProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ assertThat(
+ isTombstoneGeneratedWithCrashPayload(
+ /*protectedVm=*/ false, /*debuggable=*/ false))
+ .isFalse();
+ }
+
+ private boolean isTombstoneGeneratedWithCrashConfig(boolean protectedVm, boolean debuggable)
+ throws Exception {
return isTombstoneGeneratedWithVmRunApp(
- debuggable, "--config-path", "assets/vm_config_crash.json");
+ protectedVm, debuggable, "--config-path", "assets/vm_config_crash.json");
}
@Test
- public void testTombstonesAreGeneratedWithCrashConfig() throws Exception {
- assertThat(isTombstoneGeneratedWithCrashConfig(true /* debuggable */)).isTrue();
+ public void testTombstonesAreGeneratedWithCrashConfigOnPvm() throws Exception {
+ assumeProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ assertThat(isTombstoneGeneratedWithCrashConfig(/*protectedVm=*/ true, /*debuggable=*/ true))
+ .isTrue();
}
@Test
- public void testTombstonesAreNotGeneratedWithCrashConfigWhenNonDebuggable() throws Exception {
- assertThat(isTombstoneGeneratedWithCrashConfig(false /* debuggable */)).isFalse();
+ public void testTombstonesAreGeneratedWithCrashConfigOnNonPvm() throws Exception {
+ assumeNonProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ assertThat(
+ isTombstoneGeneratedWithCrashConfig(
+ /*protectedVm=*/ false, /*debuggable=*/ true))
+ .isTrue();
}
@Test
- public void testTelemetryPushedAtoms() throws Exception {
+ public void testTombstonesAreNotGeneratedWithCrashConfigWhenNonDebuggableOnPvm()
+ throws Exception {
+ assumeProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ assertThat(
+ isTombstoneGeneratedWithCrashConfig(
+ /*protectedVm=*/ true, /*debuggable=*/ false))
+ .isFalse();
+ }
+
+ @Test
+ public void testTombstonesAreNotGeneratedWithCrashConfigWhenNonDebuggableOnNonPvm()
+ throws Exception {
+ assumeNonProtectedVmSupported();
+ // TODO(b/291867858): tombstones are failing in HWASAN enabled Microdroid.
+ assumeFalse("tombstones are failing in HWASAN enabled Microdroid.", isHwasan());
+ assertThat(
+ isTombstoneGeneratedWithCrashConfig(
+ /*protectedVm=*/ false, /*debuggable=*/ false))
+ .isFalse();
+ }
+
+ @Test
+ public void testTelemetryPushedAtomsOnNonPvm() throws Exception {
+ assumeNonProtectedVmSupported();
+ testTelemetryPushedAtoms(false);
+ }
+
+ @Test
+ public void testTelemetryPushedAtomsOnPvm() throws Exception {
+ assumeProtectedVmSupported();
+ testTelemetryPushedAtoms(true);
+ }
+
+ private void testTelemetryPushedAtoms(boolean protectedVm) throws Exception {
// Reset statsd config and report before the test
ConfigUtils.removeConfig(getDevice());
ReportUtils.clearReports(getDevice());
@@ -681,6 +770,7 @@
.debugLevel("full")
.memoryMib(minMemorySize())
.cpuTopology("match_host")
+ .protectedVm(protectedVm)
.build(device);
microdroid.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
device.shutdownMicrodroid(microdroid);
@@ -707,7 +797,7 @@
data.get(0).getAtom().getVmCreationRequested();
assertThat(atomVmCreationRequested.getHypervisor())
.isEqualTo(AtomsProto.VmCreationRequested.Hypervisor.PKVM);
- assertThat(atomVmCreationRequested.getIsProtected()).isFalse();
+ assertThat(atomVmCreationRequested.getIsProtected()).isEqualTo(protectedVm);
assertThat(atomVmCreationRequested.getCreationSucceeded()).isTrue();
assertThat(atomVmCreationRequested.getBinderExceptionCode()).isEqualTo(0);
assertThat(atomVmCreationRequested.getVmIdentifier()).isEqualTo("VmRunApp");
@@ -736,7 +826,19 @@
@Test
@CddTest(requirements = {"9.17/C-1-1", "9.17/C-1-2", "9.17/C/1-3"})
- public void testMicrodroidBoots() throws Exception {
+ public void testMicrodroidBootsOnPvm() throws Exception {
+ assumeProtectedVmSupported();
+ testMicrodroidBoots(true);
+ }
+
+ @Test
+ @CddTest(requirements = {"9.17/C-1-1", "9.17/C-1-2", "9.17/C/1-3"})
+ public void testMicrodroidBootsOnNonPvm() throws Exception {
+ assumeNonProtectedVmSupported();
+ testMicrodroidBoots(false);
+ }
+
+ private void testMicrodroidBoots(boolean protectedVm) throws Exception {
CommandRunner android = new CommandRunner(getDevice());
final String configPath = "assets/vm_config.json"; // path inside the APK
@@ -745,6 +847,7 @@
.debugLevel("full")
.memoryMib(minMemorySize())
.cpuTopology("match_host")
+ .protectedVm(protectedVm)
.build(getAndroidDevice());
mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
CommandRunner microdroid = new CommandRunner(mMicrodroidDevice);
@@ -803,13 +906,25 @@
}
@Test
- public void testMicrodroidRamUsage() throws Exception {
+ public void testMicrodroidRamUsageOnPvm() throws Exception {
+ assumeProtectedVmSupported();
+ testMicrodroidRamUsage(true);
+ }
+
+ @Test
+ public void testMicrodroidRamUsageOnNonPvm() throws Exception {
+ assumeNonProtectedVmSupported();
+ testMicrodroidRamUsage(false);
+ }
+
+ private void testMicrodroidRamUsage(boolean protectedVm) throws Exception {
final String configPath = "assets/vm_config.json";
mMicrodroidDevice =
MicrodroidBuilder.fromDevicePath(getPathForPackage(PACKAGE_NAME), configPath)
.debugLevel("full")
.memoryMib(minMemorySize())
.cpuTopology("match_host")
+ .protectedVm(protectedVm)
.build(getAndroidDevice());
mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
mMicrodroidDevice.enableAdbRoot();
@@ -845,43 +960,6 @@
}
@Test
- public void testCustomVirtualMachinePermission() throws Exception {
- assumeTrue(
- "Protected VMs are not supported",
- getAndroidDevice().supportsMicrodroid(/*protectedVm=*/ true));
- assumeTrue("Test requires adb unroot", getDevice().disableAdbRoot());
- CommandRunner android = new CommandRunner(getDevice());
-
- // Pull etc/microdroid.json
- File virtApexDir = FileUtil.createTempDir("virt_apex");
- File microdroidConfigFile = new File(virtApexDir, "microdroid.json");
- assertThat(getDevice().pullFile(VIRT_APEX + "etc/microdroid.json", microdroidConfigFile))
- .isTrue();
- JSONObject config = new JSONObject(FileUtil.readStringFromFile(microdroidConfigFile));
-
- // USE_CUSTOM_VIRTUAL_MACHINE is enforced only on protected mode
- config.put("protected", true);
-
- // Write updated config
- final String configPath = TEST_ROOT + "raw_config.json";
- getDevice().pushString(config.toString(), configPath);
-
- // temporarily revoke the permission
- android.run(
- "pm",
- "revoke",
- SHELL_PACKAGE_NAME,
- "android.permission.USE_CUSTOM_VIRTUAL_MACHINE");
- final String ret =
- android.runForResult(VIRT_APEX + "bin/vm run", configPath).getStderr().trim();
-
- assertThat(ret)
- .contains(
- "does not have the android.permission.USE_CUSTOM_VIRTUAL_MACHINE"
- + " permission");
- }
-
- @Test
public void testPathToBinaryIsRejected() throws Exception {
CommandRunner android = new CommandRunner(getDevice());
@@ -987,6 +1065,27 @@
}
}
+ @Test
+ public void testDevcieAssignment() throws Exception {
+ assumeProtectedVmSupported();
+ assumeVfioPlatformSupported();
+
+ List<String> devices = getAssignableDevices();
+ assumeFalse("no assignable devices", devices.isEmpty());
+
+ final String configPath = "assets/vm_config.json";
+ mMicrodroidDevice =
+ MicrodroidBuilder.fromDevicePath(getPathForPackage(PACKAGE_NAME), configPath)
+ .debugLevel("full")
+ .memoryMib(minMemorySize())
+ .cpuTopology("match_host")
+ .protectedVm(true)
+ .addAssignableDevice(devices.get(0))
+ .build(getAndroidDevice());
+
+ mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
+ }
+
@Before
public void setUp() throws Exception {
assumeDeviceIsCapable(getDevice());
@@ -996,9 +1095,6 @@
prepareVirtualizationTestSetup(getDevice());
getDevice().installPackage(findTestFile(APK_NAME), /* reinstall */ false);
-
- // clear the log
- getDevice().executeShellV2Command("logcat -c");
}
@After
@@ -1023,6 +1119,43 @@
"android.permission.USE_CUSTOM_VIRTUAL_MACHINE");
}
+ private void assumeProtectedVmSupported() throws Exception {
+ assumeTrue(
+ "Test skipped because protected VMs are not supported",
+ getAndroidDevice().supportsMicrodroid(true));
+ }
+
+ private void assumeNonProtectedVmSupported() throws Exception {
+ assumeTrue(
+ "Test skipped because non-protected VMs are not supported",
+ getAndroidDevice().supportsMicrodroid(false));
+ }
+
+ private void assumeVfioPlatformSupported() throws Exception {
+ TestDevice device = getAndroidDevice();
+ assumeTrue(
+ "Test skipped because VFIO platform is not supported.",
+ device.doesFileExist("/dev/vfio/vfio")
+ && device.doesFileExist("/sys/bus/platform/drivers/vfio-platform"));
+ }
+
+ private List<String> getAssignableDevices() throws Exception {
+ CommandRunner android = new CommandRunner(getDevice());
+ String result = android.run("/apex/com.android.virt/bin/vm", "info");
+ List<String> devices = new ArrayList<>();
+ for (String line : result.split("\n")) {
+ final String header = "Assignable devices: ";
+ if (!line.startsWith(header)) continue;
+
+ JSONArray jsonArray = new JSONArray(line.substring(header.length()));
+ for (int i = 0; i < jsonArray.length(); i++) {
+ devices.add(jsonArray.getString(i));
+ }
+ break;
+ }
+ return devices;
+ }
+
private TestDevice getAndroidDevice() {
TestDevice androidDevice = (TestDevice) getDevice();
assertThat(androidDevice).isNotNull();
diff --git a/tests/hostside/java/com/android/microdroid/test/PvmfwImgTest.java b/tests/hostside/java/com/android/microdroid/test/PvmfwImgTest.java
new file mode 100644
index 0000000..320b722
--- /dev/null
+++ b/tests/hostside/java/com/android/microdroid/test/PvmfwImgTest.java
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.microdroid.test;
+
+import static com.android.tradefed.device.TestDevice.MicrodroidBuilder;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.junit.Assume.assumeTrue;
+import static org.junit.Assume.assumeFalse;
+import static org.junit.Assert.assertThrows;
+
+import androidx.annotation.NonNull;
+import androidx.annotation.Nullable;
+
+import com.android.microdroid.test.host.MicrodroidHostTestCaseBase;
+import com.android.microdroid.test.host.Pvmfw;
+import com.android.tradefed.device.DeviceNotAvailableException;
+import com.android.tradefed.device.DeviceRuntimeException;
+import com.android.tradefed.device.ITestDevice;
+import com.android.tradefed.device.TestDevice;
+import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
+import com.android.tradefed.util.FileUtil;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.io.File;
+import java.util.Objects;
+
+/** Tests pvmfw.img and pvmfw */
+@RunWith(DeviceJUnit4ClassRunner.class)
+public class PvmfwImgTest extends MicrodroidHostTestCaseBase {
+ @NonNull private static final String PVMFW_FILE_NAME = "pvmfw_test.bin";
+ @NonNull private static final String BCC_FILE_NAME = "bcc.dat";
+ @NonNull private static final String PACKAGE_FILE_NAME = "MicrodroidTestApp.apk";
+ @NonNull private static final String PACKAGE_NAME = "com.android.microdroid.test";
+ @NonNull private static final String MICRODROID_DEBUG_FULL = "full";
+ @NonNull private static final String MICRODROID_CONFIG_PATH = "assets/vm_config_apex.json";
+ private static final int BOOT_COMPLETE_TIMEOUT_MS = 30000; // 30 seconds
+ private static final int BOOT_FAILURE_WAIT_TIME_MS = 10000; // 10 seconds
+
+ @NonNull private static final String CUSTOM_PVMFW_FILE_PREFIX = "pvmfw";
+ @NonNull private static final String CUSTOM_PVMFW_FILE_SUFFIX = ".bin";
+ @NonNull private static final String CUSTOM_PVMFW_IMG_PATH = TEST_ROOT + PVMFW_FILE_NAME;
+ @NonNull private static final String CUSTOM_PVMFW_IMG_PATH_PROP = "hypervisor.pvmfw.path";
+
+ @Nullable private static File mPvmfwBinFileOnHost;
+ @Nullable private static File mBccFileOnHost;
+
+ @Nullable private TestDevice mAndroidDevice;
+ @Nullable private ITestDevice mMicrodroidDevice;
+ @Nullable private File mCustomPvmfwBinFileOnHost;
+
+ @Before
+ public void setUp() throws Exception {
+ mAndroidDevice = (TestDevice) Objects.requireNonNull(getDevice());
+
+ // Check device capabilities
+ assumeDeviceIsCapable(mAndroidDevice);
+ assumeTrue(
+ "Skip if protected VMs are not supported",
+ mAndroidDevice.supportsMicrodroid(/* protectedVm= */ true));
+ assumeFalse("Test requires setprop for using custom pvmfw and adb root", isUserBuild());
+
+ assumeTrue("Skip if adb root fails", mAndroidDevice.enableAdbRoot());
+
+ // tradefed copies the test artfacts under /tmp when running tests,
+ // so we should *find* the artifacts with the file name.
+ mPvmfwBinFileOnHost =
+ getTestInformation().getDependencyFile(PVMFW_FILE_NAME, /* targetFirst= */ false);
+ mBccFileOnHost =
+ getTestInformation().getDependencyFile(BCC_FILE_NAME, /* targetFirst= */ false);
+
+ // Prepare for system properties for custom pvmfw.img.
+ // File will be prepared later in individual test and then pushed to device
+ // when launching with launchProtectedVmAndWaitForBootCompleted().
+ mCustomPvmfwBinFileOnHost =
+ FileUtil.createTempFile(CUSTOM_PVMFW_FILE_PREFIX, CUSTOM_PVMFW_FILE_SUFFIX);
+ mAndroidDevice.setProperty(CUSTOM_PVMFW_IMG_PATH_PROP, CUSTOM_PVMFW_IMG_PATH);
+
+ // Prepare for launching microdroid
+ mAndroidDevice.installPackage(findTestFile(PACKAGE_FILE_NAME), /* reinstall */ false);
+ prepareVirtualizationTestSetup(mAndroidDevice);
+ mMicrodroidDevice = null;
+ }
+
+ @After
+ public void shutdown() throws Exception {
+ if (!mAndroidDevice.supportsMicrodroid(/* protectedVm= */ true)) {
+ return;
+ }
+ if (mMicrodroidDevice != null) {
+ mAndroidDevice.shutdownMicrodroid(mMicrodroidDevice);
+ mMicrodroidDevice = null;
+ }
+ mAndroidDevice.uninstallPackage(PACKAGE_NAME);
+
+ // Cleanup for custom pvmfw.img
+ mAndroidDevice.setProperty(CUSTOM_PVMFW_IMG_PATH_PROP, "");
+ FileUtil.deleteFile(mCustomPvmfwBinFileOnHost);
+
+ cleanUpVirtualizationTestSetup(mAndroidDevice);
+
+ mAndroidDevice.disableAdbRoot();
+ }
+
+ @Test
+ public void testConfigVersion1_0_boots() throws Exception {
+ Pvmfw pvmfw =
+ new Pvmfw.Builder(mPvmfwBinFileOnHost, mBccFileOnHost).setVersion(1, 0).build();
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ launchProtectedVmAndWaitForBootCompleted(BOOT_COMPLETE_TIMEOUT_MS);
+ }
+
+ @Test
+ public void testConfigVersion1_1_boots() throws Exception {
+ Pvmfw pvmfw =
+ new Pvmfw.Builder(mPvmfwBinFileOnHost, mBccFileOnHost).setVersion(1, 1).build();
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ launchProtectedVmAndWaitForBootCompleted(BOOT_COMPLETE_TIMEOUT_MS);
+ }
+
+ @Test
+ public void testInvalidConfigVersion_doesNotBoot() throws Exception {
+ // Disclaimer: Update versions when it becomes valid
+ Pvmfw pvmfw =
+ new Pvmfw.Builder(mPvmfwBinFileOnHost, mBccFileOnHost).setVersion(2, 0).build();
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ assertThrows(
+ "pvmfw shouldn't boot with invalid version",
+ DeviceRuntimeException.class,
+ () -> launchProtectedVmAndWaitForBootCompleted(BOOT_FAILURE_WAIT_TIME_MS));
+ }
+
+ private ITestDevice launchProtectedVmAndWaitForBootCompleted(long adbTimeoutMs)
+ throws DeviceNotAvailableException {
+ mMicrodroidDevice =
+ MicrodroidBuilder.fromDevicePath(
+ getPathForPackage(PACKAGE_NAME), MICRODROID_CONFIG_PATH)
+ .debugLevel(MICRODROID_DEBUG_FULL)
+ .protectedVm(true)
+ .addBootFile(mCustomPvmfwBinFileOnHost, PVMFW_FILE_NAME)
+ .setAdbConnectTimeoutMs(adbTimeoutMs)
+ .build(mAndroidDevice);
+ assertThat(mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT_MS)).isTrue();
+ return mMicrodroidDevice;
+ }
+}
diff --git a/tests/testapk/Android.bp b/tests/testapk/Android.bp
index fe8f5c9..526f240 100644
--- a/tests/testapk/Android.bp
+++ b/tests/testapk/Android.bp
@@ -2,6 +2,17 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
+android_app_certificate {
+ name: "MicrodroidTestAppCert",
+
+ // The default app cert is the same as the default platform cert
+ // (on a test-keys build), which means we end up getting assigned
+ // the permissions via signature and can't reliably disclaim
+ // them. So instead we use our own custom cert. See b/290582742.
+ // Created via: development/tools/make_key microdroid_test_app '/CN=microdroid_test_app'
+ certificate: "microdroid_test_app",
+}
+
java_defaults {
name: "MicrodroidTestAppsDefaults",
test_suites: [
@@ -12,6 +23,7 @@
"com.android.microdroid.testservice-java",
"com.android.microdroid.test.vmshare_service-java",
],
+ certificate: ":MicrodroidTestAppCert",
sdk_version: "test_current",
jni_uses_platform_apis: true,
use_embedded_native_libs: true,
@@ -43,7 +55,10 @@
],
min_sdk_version: "33",
// Defined in ../vmshareapp/Android.bp
- data: [":MicrodroidVmShareApp"],
+ data: [
+ ":MicrodroidVmShareApp",
+ ":test_microdroid_vendor_image",
+ ],
}
// Defaults shared between MicrodroidTestNativeLib and MicrodroidPayloadInOtherAppNativeLib shared
diff --git a/tests/testapk/AndroidManifest.xml b/tests/testapk/AndroidManifest.xml
index 2ea3f6c..d6e6004 100644
--- a/tests/testapk/AndroidManifest.xml
+++ b/tests/testapk/AndroidManifest.xml
@@ -22,8 +22,7 @@
<queries>
<package android:name="com.android.microdroid.vmshare_app" />
</queries>
- <application>
- </application>
+ <application />
<instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
android:targetPackage="com.android.microdroid.test"
android:label="Microdroid Test" />
diff --git a/tests/testapk/AndroidTest.xml b/tests/testapk/AndroidTest.xml
index 929dd31..e72a2e3 100644
--- a/tests/testapk/AndroidTest.xml
+++ b/tests/testapk/AndroidTest.xml
@@ -23,6 +23,14 @@
<option name="test-file-name" value="MicrodroidTestApp.apk" />
<option name="test-file-name" value="MicrodroidVmShareApp.apk" />
</target_preparer>
+ <target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
+ <option name="run-command" value="mkdir -p /data/local/tmp/cts/microdroid" />
+ <option name="teardown-command" value="rm -rf /data/local/tmp/cts/microdroid" />
+ </target_preparer>
+ <target_preparer class="com.android.compatibility.common.tradefed.targetprep.FilePusher">
+ <option name="cleanup" value="true" />
+ <option name="push" value="test_microdroid_vendor_image.img->/data/local/tmp/cts/microdroid/test_microdroid_vendor_image.img" />
+ </target_preparer>
<test class="com.android.tradefed.testtype.AndroidJUnitTest" >
<option name="package" value="com.android.microdroid.test" />
<option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
diff --git a/tests/testapk/microdroid_test_app.pk8 b/tests/testapk/microdroid_test_app.pk8
new file mode 100644
index 0000000..dc012bd
--- /dev/null
+++ b/tests/testapk/microdroid_test_app.pk8
Binary files differ
diff --git a/tests/testapk/microdroid_test_app.x509.pem b/tests/testapk/microdroid_test_app.x509.pem
new file mode 100644
index 0000000..9a0309c
--- /dev/null
+++ b/tests/testapk/microdroid_test_app.x509.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDHzCCAgegAwIBAgIUNnOI4tOMieX67OtyD+6BjTsLm0IwDQYJKoZIhvcNAQEL
+BQAwHjEcMBoGA1UEAwwTbWljcm9kcm9pZF90ZXN0X2FwcDAgFw0yMzA4MTgxNDA4
+MDZaGA8yMDUxMDEwMzE0MDgwNlowHjEcMBoGA1UEAwwTbWljcm9kcm9pZF90ZXN0
+X2FwcDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK7B9xDTD2kS4xFQ
+gwQThRqnxKzOmckYqv2XznXq7tCuhU+RgXDrub7Aiq+QgA25Ouw8ORM5FkZAxD6j
+hCRSVo8cyXdNfPygRY/56umL6KqLMqB0tXLHPst3Lh8fl2su2S+jWL71lUwdOBmu
+nBIa1UqxI9PChR/uIqGyDxNRlUnqOA5/FgyX95P9wj8zmXEFe5No8rL/9hjpBvw1
+cOJCH4hea6JKDA15XYxDaTyj5pkmGb228ZbQb10XwOIhtS94CVxIvqmREzZHL7b0
+cjzCwFDDF6sQoVDi71eFYSWInxSNErDU6wv5h2t6+PV+9mGwTi/AJuxTmevSUoAp
+tGwq0NMCAwEAAaNTMFEwHQYDVR0OBBYEFI2m/0SoaNew99YPQlo6oYPJfh7lMB8G
+A1UdIwQYMBaAFI2m/0SoaNew99YPQlo6oYPJfh7lMA8GA1UdEwEB/wQFMAMBAf8w
+DQYJKoZIhvcNAQELBQADggEBABxIQ66ACIrSnDCiI/DqdPPwHf4vva2Y0bVJ5tXN
+ufFQN0Hr4UnttDzWPtfZHQTnrA478b9Z/g4Y0qg/tj2g5oZP50coF9a39mPe6v2k
+vazkMp2H/+ilG4c8L6QsC7UKXn7Lxxznn3ijlh1lYVJ3E6nMibGRKrfaVFpEwtvy
+zT0K8eK9KUZIyG5nf1v8On4Vfu7MnavuxNubKoUhfu0B8hSd5JKiGDuUkSk3MiFX
+uctYmJZEUD1xLI787SzqrhuYMGfuwmrrI0N46yvUgRgxpkVj2s6GNWqRD3F/fOG+
+qFbeenHjFoMJN9HIAZaz4OqzgGfhfMf596rn+HPAJnRMtsI=
+-----END CERTIFICATE-----
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index 0ddafeb..a928dcf 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -72,6 +72,7 @@
import org.junit.After;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.function.ThrowingRunnable;
@@ -128,6 +129,13 @@
public void setup() {
grantPermission(VirtualMachine.MANAGE_VIRTUAL_MACHINE_PERMISSION);
prepareTestSetup(mProtectedVm);
+ // USE_CUSTOM_VIRTUAL_MACHINE permission has protection level signature|development, meaning
+ // that it will be automatically granted when test apk is installed. We have some tests
+ // checking the behavior when caller doesn't have this permission (e.g.
+ // createVmWithConfigRequiresPermission). Proactively revoke the permission so that such
+ // tests can pass when ran by itself, e.g.:
+ // atest com.android.microdroid.test.MicrodroidTests#createVmWithConfigRequiresPermission
+ revokePermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
}
@After
@@ -548,6 +556,14 @@
.setVmOutputCaptured(true);
e = assertThrows(IllegalStateException.class, () -> captureOutputOnNonDebuggable.build());
assertThat(e).hasMessageThat().contains("debug level must be FULL to capture output");
+
+ VirtualMachineConfig.Builder captureInputOnNonDebuggable =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("binary.so")
+ .setDebugLevel(VirtualMachineConfig.DEBUG_LEVEL_NONE)
+ .setVmConsoleInputSupported(true);
+ e = assertThrows(IllegalStateException.class, () -> captureInputOnNonDebuggable.build());
+ assertThat(e).hasMessageThat().contains("debug level must be FULL to use console input");
}
@Test
@@ -586,6 +602,9 @@
newBaselineBuilder().setDebugLevel(DEBUG_LEVEL_FULL);
VirtualMachineConfig debuggable = debuggableBuilder.build();
assertConfigCompatible(debuggable, debuggableBuilder.setVmOutputCaptured(true)).isFalse();
+ assertConfigCompatible(debuggable, debuggableBuilder.setVmOutputCaptured(false)).isTrue();
+ assertConfigCompatible(debuggable, debuggableBuilder.setVmConsoleInputSupported(true))
+ .isFalse();
VirtualMachineConfig currentContextConfig =
new VirtualMachineConfig.Builder(getContext())
@@ -1251,7 +1270,7 @@
assertThat(payloadStarted.getNow(false)).isTrue();
assertThat(exitCodeFuture.getNow(0)).isNotEqualTo(0);
- assertThat(listener.getConsoleOutput()).contains(reason);
+ assertThat(listener.getLogOutput()).contains(reason);
}
@Test
@@ -1505,6 +1524,30 @@
}
@Test
+ @Ignore // Figure out how to run this conditionally
+ @CddTest(requirements = {"9.17/C-1-1"})
+ public void payloadIsNotRoot() throws Exception {
+ assumeSupportedDevice();
+
+ VirtualMachineConfig config =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setMemoryBytes(minMemoryRequired())
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .build();
+ VirtualMachine vm = forceCreateNewVirtualMachine("test_vm", config);
+ TestResults testResults =
+ runVmTestService(
+ TAG,
+ vm,
+ (ts, tr) -> {
+ tr.mUid = ts.getUid();
+ });
+ testResults.assertNoException();
+ assertThat(testResults.mUid).isNotEqualTo(0);
+ }
+
+ @Test
@CddTest(requirements = {"9.17/C-1-1"})
public void encryptedStorageIsPersistent() throws Exception {
assumeSupportedDevice();
@@ -1575,6 +1618,7 @@
.setProtectedVm(mProtectedVm)
.setPayloadBinaryName("MicrodroidTestNativeLib.so")
.setDebugLevel(DEBUG_LEVEL_FULL)
+ .setVmConsoleInputSupported(true) // even if console input is supported
.build();
final VirtualMachine vm = forceCreateNewVirtualMachine("test_vm_forward_log", vmConfig);
vm.run();
@@ -1589,6 +1633,28 @@
}
}
+ @Test
+ public void inputShouldBeExplicitlyAllowed() throws Exception {
+ assumeSupportedDevice();
+
+ final VirtualMachineConfig vmConfig =
+ new VirtualMachineConfig.Builder(getContext())
+ .setProtectedVm(mProtectedVm)
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .setVmOutputCaptured(true) // even if output is captured
+ .build();
+ final VirtualMachine vm = forceCreateNewVirtualMachine("test_vm_forward_log", vmConfig);
+ vm.run();
+
+ try {
+ assertThrowsVmExceptionContaining(
+ () -> vm.getConsoleInput(), "VM console input is not supported");
+ } finally {
+ vm.stop();
+ }
+ }
+
private boolean checkVmOutputIsRedirectedToLogcat(boolean debuggable) throws Exception {
String time =
LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"));
@@ -1609,7 +1675,7 @@
.command(
"logcat",
"-e",
- "virtualizationmanager::aidl: Console.*executing main task",
+ "virtualizationmanager::aidl: Log.*executing main task",
"-t",
time)
.start();
@@ -1652,6 +1718,35 @@
}
@Test
+ public void testConsoleInputSupported() throws Exception {
+ assumeSupportedDevice();
+
+ VirtualMachineConfig config =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .setVmConsoleInputSupported(true)
+ .setVmOutputCaptured(true)
+ .build();
+ VirtualMachine vm = forceCreateNewVirtualMachine("test_vm_console_in", config);
+
+ final String TYPED = "this is a console input\n";
+ TestResults testResults =
+ runVmTestService(
+ TAG,
+ vm,
+ (ts, tr) -> {
+ OutputStreamWriter consoleIn =
+ new OutputStreamWriter(vm.getConsoleInput());
+ consoleIn.write(TYPED);
+ consoleIn.close();
+ tr.mConsoleInput = ts.readLineFromConsole();
+ });
+ testResults.assertNoException();
+ assertThat(testResults.mConsoleInput).isEqualTo(TYPED);
+ }
+
+ @Test
public void testStartVmWithPayloadOfAnotherApp() throws Exception {
assumeSupportedDevice();
@@ -1901,12 +1996,18 @@
| OsConstants.S_IROTH
| OsConstants.S_IWOTH
| OsConstants.S_IXOTH;
- assertThat(testResults.mFileMode & allPermissionsMask)
- .isEqualTo(OsConstants.S_IRUSR | OsConstants.S_IXUSR);
+ int expectedPermissions =
+ OsConstants.S_IRUSR
+ | OsConstants.S_IXUSR
+ | OsConstants.S_IRGRP
+ | OsConstants.S_IXGRP;
+ assertThat(testResults.mFileMode & allPermissionsMask).isEqualTo(expectedPermissions);
}
- // Taken from bionic/libs/kernel/uapi/linux/mounth.h.
+ // Taken from bionic/libc/kernel/uapi/linux/mount.h
+ private static final int MS_RDONLY = 1;
private static final int MS_NOEXEC = 8;
+ private static final int MS_NOATIME = 1024;
@Test
@CddTest(requirements = {"9.17/C-1-5"})
@@ -1980,6 +2081,85 @@
}
}
+ @Test
+ public void configuringVendorDiskImageRequiresCustomPermission() throws Exception {
+ assumeSupportedDevice();
+
+ File vendorDiskImage =
+ new File("/data/local/tmp/cts/microdroid/test_microdroid_vendor_image.img");
+ VirtualMachineConfig config =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setVendorDiskImage(vendorDiskImage)
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .build();
+
+ VirtualMachine vm =
+ forceCreateNewVirtualMachine("test_vendor_image_req_custom_permission", config);
+
+ SecurityException e =
+ assertThrows(
+ SecurityException.class, () -> runVmTestService(TAG, vm, (ts, tr) -> {}));
+ assertThat(e)
+ .hasMessageThat()
+ .contains("android.permission.USE_CUSTOM_VIRTUAL_MACHINE permission");
+ }
+
+ @Test
+ public void bootsWithVendorPartition() throws Exception {
+ assumeSupportedDevice();
+
+ grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
+
+ File vendorDiskImage =
+ new File("/data/local/tmp/cts/microdroid/test_microdroid_vendor_image.img");
+ VirtualMachineConfig config =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setVendorDiskImage(vendorDiskImage)
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .build();
+
+ VirtualMachine vm = forceCreateNewVirtualMachine("test_boot_with_vendor", config);
+
+ TestResults testResults =
+ runVmTestService(
+ TAG,
+ vm,
+ (ts, tr) -> {
+ tr.mMountFlags = ts.getMountFlags("/vendor");
+ });
+
+ assertThat(testResults.mException).isNull();
+ int expectedFlags = MS_NOATIME | MS_RDONLY;
+ assertThat(testResults.mMountFlags & expectedFlags).isEqualTo(expectedFlags);
+ }
+
+ @Test
+ public void systemPartitionMountFlags() throws Exception {
+ assumeSupportedDevice();
+
+ VirtualMachineConfig config =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .build();
+
+ VirtualMachine vm = forceCreateNewVirtualMachine("test_system_mount_flags", config);
+
+ TestResults testResults =
+ runVmTestService(
+ TAG,
+ vm,
+ (ts, tr) -> {
+ tr.mMountFlags = ts.getMountFlags("/");
+ });
+
+ assertThat(testResults.mException).isNull();
+ int expectedFlags = MS_NOATIME | MS_RDONLY;
+ assertThat(testResults.mMountFlags & expectedFlags).isEqualTo(expectedFlags);
+ }
+
private static class VmShareServiceConnection implements ServiceConnection {
private final CountDownLatch mLatch = new CountDownLatch(1);
diff --git a/tests/testapk/src/native/testbinary.cpp b/tests/testapk/src/native/testbinary.cpp
index 7e0fc5b..c9b5e3a 100644
--- a/tests/testapk/src/native/testbinary.cpp
+++ b/tests/testapk/src/native/testbinary.cpp
@@ -248,6 +248,11 @@
return ScopedAStatus::ok();
}
+ ScopedAStatus getUid(int* out) override {
+ *out = getuid();
+ return ScopedAStatus::ok();
+ }
+
ScopedAStatus runEchoReverseServer() override {
auto result = start_echo_reverse_server();
if (result.ok()) {
@@ -313,6 +318,26 @@
return ScopedAStatus::ok();
}
+ ScopedAStatus readLineFromConsole(std::string* out) {
+ FILE* f = fopen("/dev/console", "r");
+ if (f == nullptr) {
+ return ScopedAStatus::fromExceptionCodeWithMessage(EX_SERVICE_SPECIFIC,
+ "failed to open /dev/console");
+ }
+ char* line = nullptr;
+ size_t len = 0;
+ ssize_t nread = getline(&line, &len, f);
+
+ if (nread == -1) {
+ free(line);
+ return ScopedAStatus::fromExceptionCodeWithMessage(EX_SERVICE_SPECIFIC,
+ "failed to read /dev/console");
+ }
+ out->append(line, nread);
+ free(line);
+ return ScopedAStatus::ok();
+ }
+
ScopedAStatus quit() override { exit(0); }
};
auto testService = ndk::SharedRefBase::make<TestService>();
diff --git a/tests/testapk/test.keystore b/tests/testapk/test.keystore
deleted file mode 100644
index 2946641..0000000
--- a/tests/testapk/test.keystore
+++ /dev/null
Binary files differ
diff --git a/tests/vendor_images/Android.bp b/tests/vendor_images/Android.bp
new file mode 100644
index 0000000..09c657c
--- /dev/null
+++ b/tests/vendor_images/Android.bp
@@ -0,0 +1,9 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+android_filesystem {
+ name: "test_microdroid_vendor_image",
+ type: "ext4",
+ file_contexts: ":microdroid_vendor_file_contexts.gen",
+}
diff --git a/tests/vmshareapp/src/java/com/android/microdroid/test/sharevm/VmShareServiceImpl.java b/tests/vmshareapp/src/java/com/android/microdroid/test/sharevm/VmShareServiceImpl.java
index 1f7ffb7..dc8908b 100644
--- a/tests/vmshareapp/src/java/com/android/microdroid/test/sharevm/VmShareServiceImpl.java
+++ b/tests/vmshareapp/src/java/com/android/microdroid/test/sharevm/VmShareServiceImpl.java
@@ -220,6 +220,11 @@
}
@Override
+ public int getUid() throws RemoteException {
+ throw new UnsupportedOperationException("Not supported");
+ }
+
+ @Override
public void writeToFile(String content, String path) throws RemoteException {
throw new UnsupportedOperationException("Not supported");
}
@@ -245,6 +250,11 @@
}
@Override
+ public String readLineFromConsole() {
+ throw new UnsupportedOperationException("Not supported");
+ }
+
+ @Override
public void quit() throws RemoteException {
throw new UnsupportedOperationException("Not supported");
}
diff --git a/virtualizationmanager/Android.bp b/virtualizationmanager/Android.bp
index 59e507f..de39aa2 100644
--- a/virtualizationmanager/Android.bp
+++ b/virtualizationmanager/Android.bp
@@ -27,6 +27,7 @@
"libandroid_logger",
"libanyhow",
"libapkverify",
+ "libavflog",
"libbase_rust",
"libbinder_rs",
"libclap",
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index 86c8596..97151d7 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -20,7 +20,7 @@
use crate::composite::make_composite_image;
use crate::crosvm::{CrosvmConfig, DiskFile, PayloadState, VmContext, VmInstance, VmState};
use crate::debug_config::DebugConfig;
-use crate::payload::{add_microdroid_payload_images, add_microdroid_system_images};
+use crate::payload::{add_microdroid_payload_images, add_microdroid_system_images, add_microdroid_vendor_image};
use crate::selinux::{getfilecon, SeContext};
use android_os_permissions_aidl::aidl::android::os::IPermissionController;
use android_system_virtualizationcommon::aidl::android::system::virtualizationcommon::{
@@ -28,6 +28,7 @@
ErrorCode::ErrorCode,
};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
+ AssignableDevice::AssignableDevice,
CpuTopology::CpuTopology,
DiskImage::DiskImage,
IVirtualMachine::{BnVirtualMachine, IVirtualMachine},
@@ -49,9 +50,11 @@
};
use anyhow::{anyhow, bail, Context, Result};
use apkverify::{HashAlgorithm, V4Signature};
+use avflog::LogResult;
use binder::{
self, wait_for_interface, BinderFeatures, ExceptionCode, Interface, ParcelFileDescriptor,
Status, StatusCode, Strong,
+ IntoBinderResult,
};
use disk::QcowFile;
use lazy_static::lazy_static;
@@ -61,9 +64,10 @@
use rpcbinder::RpcServer;
use rustutils::system_properties;
use semver::VersionReq;
+use std::collections::HashSet;
use std::convert::TryInto;
use std::ffi::CStr;
-use std::fs::{read_dir, remove_file, File, OpenOptions};
+use std::fs::{canonicalize, read_dir, remove_file, File, OpenOptions};
use std::io::{BufRead, BufReader, Error, ErrorKind, Write};
use std::num::{NonZeroU16, NonZeroU32};
use std::os::unix::io::{FromRawFd, IntoRawFd};
@@ -116,6 +120,20 @@
.context("failed to create idsig")?;
let mut output = clone_file(idsig_fd)?;
+
+ // Optimization. We don't have to update idsig file whenever a VM is started. Don't update it,
+ // if the idsig file already has the same APK digest.
+ if output.metadata()?.len() > 0 {
+ if let Ok(out_sig) = V4Signature::from_idsig(&mut output) {
+ if out_sig.signing_info.apk_digest == sig.signing_info.apk_digest {
+ debug!("idsig {:?} is up-to-date with apk {:?}.", output, input);
+ return Ok(());
+ }
+ }
+ // if we fail to read v4signature from output, that's fine. User can pass a random file.
+ // We will anyway overwrite the file to the v4signature generated from input_fd.
+ }
+
output.set_len(0).context("failed to set_len on the idsig output")?;
sig.write_into(&mut output).context("failed to write idsig")?;
Ok(())
@@ -163,7 +181,6 @@
Ok(())
}
}
-
impl IVirtualizationService for VirtualizationService {
/// Creates (but does not start) a new VM with the given configuration, assigning it the next
/// available CID.
@@ -172,11 +189,18 @@
fn createVm(
&self,
config: &VirtualMachineConfig,
- console_fd: Option<&ParcelFileDescriptor>,
+ console_out_fd: Option<&ParcelFileDescriptor>,
+ console_in_fd: Option<&ParcelFileDescriptor>,
log_fd: Option<&ParcelFileDescriptor>,
) -> binder::Result<Strong<dyn IVirtualMachine>> {
let mut is_protected = false;
- let ret = self.create_vm_internal(config, console_fd, log_fd, &mut is_protected);
+ let ret = self.create_vm_internal(
+ config,
+ console_out_fd,
+ console_in_fd,
+ log_fd,
+ &mut is_protected,
+ );
write_vm_creation_stats(config, is_protected, &ret);
ret
}
@@ -189,27 +213,17 @@
partition_type: PartitionType,
) -> binder::Result<()> {
check_manage_access()?;
- let size_bytes = size_bytes.try_into().map_err(|e| {
- Status::new_exception_str(
- ExceptionCode::ILLEGAL_ARGUMENT,
- Some(format!("Invalid size {}: {:?}", size_bytes, e)),
- )
- })?;
+ let size_bytes = size_bytes
+ .try_into()
+ .with_context(|| format!("Invalid size: {}", size_bytes))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT)?;
let size_bytes = round_up(size_bytes, PARTITION_GRANULARITY_BYTES);
let image = clone_file(image_fd)?;
// initialize the file. Any data in the file will be erased.
- image.set_len(0).map_err(|e| {
- Status::new_service_specific_error_str(
- -1,
- Some(format!("Failed to reset a file: {:?}", e)),
- )
- })?;
- let mut part = QcowFile::new(image, size_bytes).map_err(|e| {
- Status::new_service_specific_error_str(
- -1,
- Some(format!("Failed to create QCOW2 image: {:?}", e)),
- )
- })?;
+ image.set_len(0).context("Failed to reset a file").or_service_specific_exception(-1)?;
+ let mut part = QcowFile::new(image, size_bytes)
+ .context("Failed to create QCOW2 image")
+ .or_service_specific_exception(-1)?;
match partition_type {
PartitionType::RAW => Ok(()),
@@ -220,12 +234,8 @@
format!("Unsupported partition type {:?}", partition_type),
)),
}
- .map_err(|e| {
- Status::new_service_specific_error_str(
- -1,
- Some(format!("Failed to initialize partition as {:?}: {:?}", partition_type, e)),
- )
- })?;
+ .with_context(|| format!("Failed to initialize partition as {:?}", partition_type))
+ .or_service_specific_exception(-1)?;
Ok(())
}
@@ -236,13 +246,9 @@
input_fd: &ParcelFileDescriptor,
idsig_fd: &ParcelFileDescriptor,
) -> binder::Result<()> {
- // TODO(b/193504400): do this only when (1) idsig_fd is empty or (2) the APK digest in
- // idsig_fd is different from APK digest in input_fd
-
check_manage_access()?;
- create_or_update_idsig_file(input_fd, idsig_fd)
- .map_err(|e| Status::new_service_specific_error_str(-1, Some(format!("{:?}", e))))?;
+ create_or_update_idsig_file(input_fd, idsig_fd).or_service_specific_exception(-1)?;
Ok(())
}
@@ -252,6 +258,12 @@
// Delegate to the global service, including checking the debug permission.
GLOBAL_SERVICE.debugListVms()
}
+
+ /// Get a list of assignable device types.
+ fn getAssignableDevices(&self) -> binder::Result<Vec<AssignableDevice>> {
+ // Delegate to the global service, including checking the permission.
+ GLOBAL_SERVICE.getAssignableDevices()
+ }
}
impl VirtualizationService {
@@ -283,16 +295,15 @@
}
}
}
- Err(Status::new_service_specific_error_str(
- -1,
- Some("Too many attempts to create VM context failed."),
- ))
+ Err(anyhow!("Too many attempts to create VM context failed"))
+ .or_service_specific_exception(-1)
}
fn create_vm_internal(
&self,
config: &VirtualMachineConfig,
- console_fd: Option<&ParcelFileDescriptor>,
+ console_out_fd: Option<&ParcelFileDescriptor>,
+ console_in_fd: Option<&ParcelFileDescriptor>,
log_fd: Option<&ParcelFileDescriptor>,
is_protected: &mut bool,
) -> binder::Result<Strong<dyn IVirtualMachine>> {
@@ -306,15 +317,14 @@
VirtualMachineConfig::RawConfig(_) => true,
VirtualMachineConfig::AppConfig(config) => {
// Some features are reserved for platform apps only, even when using
- // VirtualMachineAppConfig:
+ // VirtualMachineAppConfig. Almost all of these features are grouped in the
+ // CustomConfig struct:
// - controlling CPUs;
- // - specifying a config file in the APK;
+ // - specifying a config file in the APK; (this one is not part of CustomConfig)
// - gdbPort is set, meaning that crosvm will start a gdb server;
- // - using anything other than the default kernel.
- !config.taskProfiles.is_empty()
- || matches!(config.payload, Payload::ConfigPath(_))
- || config.gdbPort > 0
- || config.customKernelImage.as_ref().is_some()
+ // - using anything other than the default kernel;
+ // - specifying devices to be assigned.
+ config.customConfig.is_some() || matches!(config.payload, Payload::ConfigPath(_))
}
};
if is_custom {
@@ -341,8 +351,9 @@
};
let state = &mut *self.state.lock().unwrap();
- let console_fd =
- clone_or_prepare_logger_fd(&debug_config, console_fd, format!("Console({})", cid))?;
+ let console_out_fd =
+ clone_or_prepare_logger_fd(&debug_config, console_out_fd, format!("Console({})", cid))?;
+ let console_in_fd = console_in_fd.map(clone_file).transpose()?;
let log_fd = clone_or_prepare_logger_fd(&debug_config, log_fd, format!("Log({})", cid))?;
// Counter to generate unique IDs for temporary image files.
@@ -354,12 +365,12 @@
let (is_app_config, config) = match config {
VirtualMachineConfig::RawConfig(config) => (false, BorrowedOrOwned::Borrowed(config)),
VirtualMachineConfig::AppConfig(config) => {
- let config =
- load_app_config(config, &debug_config, &temporary_directory).map_err(|e| {
+ let config = load_app_config(config, &debug_config, &temporary_directory)
+ .or_service_specific_exception_with(-1, |e| {
*is_protected = config.protectedVm;
let message = format!("Failed to load app config: {:?}", e);
error!("{}", message);
- Status::new_service_specific_error_str(-1, Some(message))
+ message
})?;
(true, BorrowedOrOwned::Owned(config))
}
@@ -383,26 +394,21 @@
}
})
.try_for_each(check_label_for_partition)
- .map_err(|e| Status::new_service_specific_error_str(-1, Some(format!("{:?}", e))))?;
+ .or_service_specific_exception(-1)?;
let kernel = maybe_clone_file(&config.kernel)?;
let initrd = maybe_clone_file(&config.initrd)?;
// In a protected VM, we require custom kernels to come from a trusted source (b/237054515).
if config.protectedVm {
- check_label_for_kernel_files(&kernel, &initrd).map_err(|e| {
- Status::new_service_specific_error_str(-1, Some(format!("{:?}", e)))
- })?;
+ check_label_for_kernel_files(&kernel, &initrd).or_service_specific_exception(-1)?;
}
let zero_filler_path = temporary_directory.join("zero.img");
- write_zero_filler(&zero_filler_path).map_err(|e| {
- error!("Failed to make composite image: {:?}", e);
- Status::new_service_specific_error_str(
- -1,
- Some(format!("Failed to make composite image: {:?}", e)),
- )
- })?;
+ write_zero_filler(&zero_filler_path)
+ .context("Failed to make composite image")
+ .with_log()
+ .or_service_specific_exception(-1)?;
// Assemble disk images if needed.
let disks = config
@@ -423,14 +429,48 @@
CpuTopology::MATCH_HOST => (None, true),
CpuTopology::ONE_CPU => (NonZeroU32::new(1), false),
val => {
- error!("Unexpected value of CPU topology: {:?}", val);
- return Err(Status::new_service_specific_error_str(
- -1,
- Some(format!("Failed to parse CPU topology value: {:?}", val)),
- ));
+ return Err(anyhow!("Failed to parse CPU topology value {:?}", val))
+ .with_log()
+ .or_service_specific_exception(-1);
}
};
+ let devices_dtbo = if !config.devices.is_empty() {
+ let mut set = HashSet::new();
+ for device in config.devices.iter() {
+ let path = canonicalize(device)
+ .with_context(|| format!("can't canonicalize {device}"))
+ .or_service_specific_exception(-1)?;
+ if !set.insert(path) {
+ return Err(anyhow!("duplicated device {device}"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
+ }
+ }
+ let dtbo_path = temporary_directory.join("dtbo");
+ // open a writable file descriptor for vfio_handler
+ let dtbo = File::create(&dtbo_path).map_err(|e| {
+ error!("Failed to create VM DTBO file {dtbo_path:?}: {e:?}");
+ Status::new_service_specific_error_str(
+ -1,
+ Some(format!("Failed to create VM DTBO file {dtbo_path:?}: {e:?}")),
+ )
+ })?;
+ GLOBAL_SERVICE
+ .bindDevicesToVfioDriver(&config.devices, &ParcelFileDescriptor::new(dtbo))?;
+
+ // open (again) a readable file descriptor for crosvm
+ let dtbo = File::open(&dtbo_path).map_err(|e| {
+ error!("Failed to open VM DTBO file {dtbo_path:?}: {e:?}");
+ Status::new_service_specific_error_str(
+ -1,
+ Some(format!("Failed to open VM DTBO file {dtbo_path:?}: {e:?}")),
+ )
+ })?;
+ Some(dtbo)
+ } else {
+ None
+ };
+
// Actually start the VM.
let crosvm_config = CrosvmConfig {
cid,
@@ -446,13 +486,16 @@
cpus,
host_cpu_topology,
task_profiles: config.taskProfiles.clone(),
- console_fd,
+ console_out_fd,
+ console_in_fd,
log_fd,
ramdump,
indirect_files,
platform_version: parse_platform_version_req(&config.platformVersion)?,
detect_hangup: is_app_config,
gdb_port,
+ vfio_devices: config.devices.iter().map(PathBuf::from).collect(),
+ devices_dtbo,
};
let instance = Arc::new(
VmInstance::new(
@@ -462,13 +505,9 @@
requester_debug_pid,
vm_context,
)
- .map_err(|e| {
- error!("Failed to create VM with config {:?}: {:?}", config, e);
- Status::new_service_specific_error_str(
- -1,
- Some(format!("Failed to create VM: {:?}", e)),
- )
- })?,
+ .with_context(|| format!("Failed to create VM with config {:?}", config))
+ .with_log()
+ .or_service_specific_exception(-1)?,
);
state.add_vm(Arc::downgrade(&instance));
Ok(VirtualMachine::create(instance))
@@ -519,10 +558,8 @@
let image = if !disk.partitions.is_empty() {
if disk.image.is_some() {
warn!("DiskImage {:?} contains both image and partitions.", disk);
- return Err(Status::new_exception_str(
- ExceptionCode::ILLEGAL_ARGUMENT,
- Some("DiskImage contains both image and partitions."),
- ));
+ return Err(anyhow!("DiskImage contains both image and partitions"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
}
let composite_image_filenames =
@@ -534,13 +571,9 @@
&composite_image_filenames.header,
&composite_image_filenames.footer,
)
- .map_err(|e| {
- error!("Failed to make composite image with config {:?}: {:?}", disk, e);
- Status::new_service_specific_error_str(
- -1,
- Some(format!("Failed to make composite image: {:?}", e)),
- )
- })?;
+ .with_context(|| format!("Failed to make composite disk image with config {:?}", disk))
+ .with_log()
+ .or_service_specific_exception(-1)?;
// Pass the file descriptors for the various partition files to crosvm when it
// is run.
@@ -551,15 +584,22 @@
clone_file(image)?
} else {
warn!("DiskImage {:?} didn't contain image or partitions.", disk);
- return Err(Status::new_exception_str(
- ExceptionCode::ILLEGAL_ARGUMENT,
- Some("DiskImage didn't contain image or partitions."),
- ));
+ return Err(anyhow!("DiskImage didn't contain image or partitions."))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
};
Ok(DiskFile { image, writable: disk.writable })
}
+fn append_kernel_param(param: &str, vm_config: &mut VirtualMachineRawConfig) {
+ if let Some(ref mut params) = vm_config.params {
+ params.push(' ');
+ params.push_str(param)
+ } else {
+ vm_config.params = Some(param.to_owned())
+ }
+}
+
fn load_app_config(
config: &VirtualMachineAppConfig,
debug_config: &DebugConfig,
@@ -595,8 +635,19 @@
let vm_config_file = File::open(vm_config_path)?;
let mut vm_config = VmConfig::load(&vm_config_file)?.to_parcelable()?;
- if let Some(file) = config.customKernelImage.as_ref() {
- vm_config.kernel = Some(ParcelFileDescriptor::new(clone_file(file)?))
+ if let Some(custom_config) = &config.customConfig {
+ if let Some(file) = custom_config.customKernelImage.as_ref() {
+ vm_config.kernel = Some(ParcelFileDescriptor::new(clone_file(file)?))
+ }
+ vm_config.taskProfiles = custom_config.taskProfiles.clone();
+ vm_config.gdbPort = custom_config.gdbPort;
+
+ if let Some(file) = custom_config.vendorImage.as_ref() {
+ add_microdroid_vendor_image(clone_file(file)?, &mut vm_config);
+ append_kernel_param("androidboot.microdroid.mount_vendor=1", &mut vm_config)
+ }
+
+ vm_config.devices = custom_config.devices.clone();
}
if config.memoryMib > 0 {
@@ -606,8 +657,6 @@
vm_config.name = config.name.clone();
vm_config.protectedVm = config.protectedVm;
vm_config.cpuTopology = config.cpuTopology;
- vm_config.taskProfiles = config.taskProfiles.clone();
- vm_config.gdbPort = config.gdbPort;
// Microdroid takes additional init ramdisk & (optionally) storage image
add_microdroid_system_images(config, instance_file, storage_image, &mut vm_config)?;
@@ -694,10 +743,8 @@
if perm_svc.checkPermission(perm, calling_pid, calling_uid as i32)? {
Ok(())
} else {
- Err(Status::new_exception_str(
- ExceptionCode::SECURITY,
- Some(format!("does not have the {} permission", perm)),
- ))
+ Err(anyhow!("does not have the {} permission", perm))
+ .or_binder_exception(ExceptionCode::SECURITY)
}
}
@@ -803,40 +850,41 @@
}
fn start(&self) -> binder::Result<()> {
- self.instance.start().map_err(|e| {
- error!("Error starting VM with CID {}: {:?}", self.instance.cid, e);
- Status::new_service_specific_error_str(-1, Some(e.to_string()))
- })
+ self.instance
+ .start()
+ .with_context(|| format!("Error starting VM with CID {}", self.instance.cid))
+ .with_log()
+ .or_service_specific_exception(-1)
}
fn stop(&self) -> binder::Result<()> {
- self.instance.kill().map_err(|e| {
- error!("Error stopping VM with CID {}: {:?}", self.instance.cid, e);
- Status::new_service_specific_error_str(-1, Some(e.to_string()))
- })
+ self.instance
+ .kill()
+ .with_context(|| format!("Error stopping VM with CID {}", self.instance.cid))
+ .with_log()
+ .or_service_specific_exception(-1)
}
fn onTrimMemory(&self, level: MemoryTrimLevel) -> binder::Result<()> {
- self.instance.trim_memory(level).map_err(|e| {
- error!("Error trimming VM with CID {}: {:?}", self.instance.cid, e);
- Status::new_service_specific_error_str(-1, Some(e.to_string()))
- })
+ self.instance
+ .trim_memory(level)
+ .with_context(|| format!("Error trimming VM with CID {}", self.instance.cid))
+ .with_log()
+ .or_service_specific_exception(-1)
}
fn connectVsock(&self, port: i32) -> binder::Result<ParcelFileDescriptor> {
if !matches!(&*self.instance.vm_state.lock().unwrap(), VmState::Running { .. }) {
- return Err(Status::new_service_specific_error_str(-1, Some("VM is not running")));
+ return Err(anyhow!("VM is not running")).or_service_specific_exception(-1);
}
let port = port as u32;
if port < 1024 {
- return Err(Status::new_service_specific_error_str(
- -1,
- Some(format!("Can't connect to privileged port {port}")),
- ));
+ return Err(anyhow!("Can't connect to privileged port {port}"))
+ .or_service_specific_exception(-1);
}
- let stream = VsockStream::connect_with_cid_port(self.instance.cid, port).map_err(|e| {
- Status::new_service_specific_error_str(-1, Some(format!("Failed to connect: {:?}", e)))
- })?;
+ let stream = VsockStream::connect_with_cid_port(self.instance.cid, port)
+ .context("Failed to connect")
+ .or_service_specific_exception(-1)?;
Ok(vsock_stream_to_pfd(stream))
}
}
@@ -962,17 +1010,15 @@
}
/// Converts a `&ParcelFileDescriptor` to a `File` by cloning the file.
-pub fn clone_file(file: &ParcelFileDescriptor) -> Result<File, Status> {
- file.as_ref().try_clone().map_err(|e| {
- Status::new_exception_str(
- ExceptionCode::BAD_PARCELABLE,
- Some(format!("Failed to clone File from ParcelFileDescriptor: {:?}", e)),
- )
- })
+pub fn clone_file(file: &ParcelFileDescriptor) -> binder::Result<File> {
+ file.as_ref()
+ .try_clone()
+ .context("Failed to clone File from ParcelFileDescriptor")
+ .or_binder_exception(ExceptionCode::BAD_PARCELABLE)
}
/// Converts an `&Option<ParcelFileDescriptor>` to an `Option<File>` by cloning the file.
-fn maybe_clone_file(file: &Option<ParcelFileDescriptor>) -> Result<Option<File>, Status> {
+fn maybe_clone_file(file: &Option<ParcelFileDescriptor>) -> binder::Result<Option<File>> {
file.as_ref().map(clone_file).transpose()
}
@@ -984,13 +1030,10 @@
}
/// Parses the platform version requirement string.
-fn parse_platform_version_req(s: &str) -> Result<VersionReq, Status> {
- VersionReq::parse(s).map_err(|e| {
- Status::new_exception_str(
- ExceptionCode::BAD_PARCELABLE,
- Some(format!("Invalid platform version requirement {}: {:?}", s, e)),
- )
- })
+fn parse_platform_version_req(s: &str) -> binder::Result<VersionReq> {
+ VersionReq::parse(s)
+ .with_context(|| format!("Invalid platform version requirement {}", s))
+ .or_binder_exception(ExceptionCode::BAD_PARCELABLE)
}
/// Create the empty ramdump file
@@ -999,13 +1042,10 @@
// VM will emit ramdump to. `ramdump_read` will be sent back to the client (i.e. the VM
// owner) for readout.
let ramdump_path = temporary_directory.join("ramdump");
- let ramdump = File::create(ramdump_path).map_err(|e| {
- error!("Failed to prepare ramdump file: {:?}", e);
- Status::new_service_specific_error_str(
- -1,
- Some(format!("Failed to prepare ramdump file: {:?}", e)),
- )
- })?;
+ let ramdump = File::create(ramdump_path)
+ .context("Failed to prepare ramdump file")
+ .with_log()
+ .or_service_specific_exception(-1)?;
Ok(ramdump)
}
@@ -1018,20 +1058,16 @@
fn check_gdb_allowed(config: &VirtualMachineConfig) -> binder::Result<()> {
if is_protected(config) {
- return Err(Status::new_exception_str(
- ExceptionCode::SECURITY,
- Some("can't use gdb with protected VMs"),
- ));
+ return Err(anyhow!("Can't use gdb with protected VMs"))
+ .or_binder_exception(ExceptionCode::SECURITY);
}
match config {
VirtualMachineConfig::RawConfig(_) => Ok(()),
VirtualMachineConfig::AppConfig(config) => {
if config.debugLevel != DebugLevel::FULL {
- Err(Status::new_exception_str(
- ExceptionCode::SECURITY,
- Some("can't use gdb with non-debuggable VMs"),
- ))
+ Err(anyhow!("Can't use gdb with non-debuggable VMs"))
+ .or_binder_exception(ExceptionCode::SECURITY)
} else {
Ok(())
}
@@ -1042,7 +1078,9 @@
fn extract_gdb_port(config: &VirtualMachineConfig) -> Option<NonZeroU16> {
match config {
VirtualMachineConfig::RawConfig(config) => NonZeroU16::new(config.gdbPort as u16),
- VirtualMachineConfig::AppConfig(config) => NonZeroU16::new(config.gdbPort as u16),
+ VirtualMachineConfig::AppConfig(config) => {
+ NonZeroU16::new(config.customConfig.as_ref().map(|c| c.gdbPort).unwrap_or(0) as u16)
+ }
}
}
@@ -1059,12 +1097,12 @@
return Ok(None);
};
- let (raw_read_fd, raw_write_fd) = pipe().map_err(|e| {
- Status::new_service_specific_error_str(-1, Some(format!("Failed to create pipe: {:?}", e)))
- })?;
+ let (raw_read_fd, raw_write_fd) =
+ pipe().context("Failed to create pipe").or_service_specific_exception(-1)?;
- // SAFETY: We are the sole owners of these fds as they were just created.
+ // SAFETY: We are the sole owner of this FD as we just created it, and it is valid and open.
let mut reader = BufReader::new(unsafe { File::from_raw_fd(raw_read_fd) });
+ // SAFETY: We are the sole owner of this FD as we just created it, and it is valid and open.
let write_fd = unsafe { File::from_raw_fd(raw_write_fd) };
std::thread::spawn(move || loop {
@@ -1120,9 +1158,8 @@
let cid = self.cid;
if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
info!("VM with CID {} started payload", cid);
- vm.update_payload_state(PayloadState::Started).map_err(|e| {
- Status::new_exception_str(ExceptionCode::ILLEGAL_STATE, Some(e.to_string()))
- })?;
+ vm.update_payload_state(PayloadState::Started)
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE)?;
vm.callbacks.notify_payload_started(cid);
let vm_start_timestamp = vm.vm_metric.lock().unwrap().start_timestamp;
@@ -1130,10 +1167,7 @@
Ok(())
} else {
error!("notifyPayloadStarted is called from an unknown CID {}", cid);
- Err(Status::new_service_specific_error_str(
- -1,
- Some(format!("cannot find a VM with CID {}", cid)),
- ))
+ Err(anyhow!("cannot find a VM with CID {}", cid)).or_service_specific_exception(-1)
}
}
@@ -1141,17 +1175,13 @@
let cid = self.cid;
if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
info!("VM with CID {} reported payload is ready", cid);
- vm.update_payload_state(PayloadState::Ready).map_err(|e| {
- Status::new_exception_str(ExceptionCode::ILLEGAL_STATE, Some(e.to_string()))
- })?;
+ vm.update_payload_state(PayloadState::Ready)
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE)?;
vm.callbacks.notify_payload_ready(cid);
Ok(())
} else {
error!("notifyPayloadReady is called from an unknown CID {}", cid);
- Err(Status::new_service_specific_error_str(
- -1,
- Some(format!("cannot find a VM with CID {}", cid)),
- ))
+ Err(anyhow!("cannot find a VM with CID {}", cid)).or_service_specific_exception(-1)
}
}
@@ -1159,17 +1189,13 @@
let cid = self.cid;
if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
info!("VM with CID {} finished payload", cid);
- vm.update_payload_state(PayloadState::Finished).map_err(|e| {
- Status::new_exception_str(ExceptionCode::ILLEGAL_STATE, Some(e.to_string()))
- })?;
+ vm.update_payload_state(PayloadState::Finished)
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE)?;
vm.callbacks.notify_payload_finished(cid, exit_code);
Ok(())
} else {
error!("notifyPayloadFinished is called from an unknown CID {}", cid);
- Err(Status::new_service_specific_error_str(
- -1,
- Some(format!("cannot find a VM with CID {}", cid)),
- ))
+ Err(anyhow!("cannot find a VM with CID {}", cid)).or_service_specific_exception(-1)
}
}
@@ -1177,17 +1203,13 @@
let cid = self.cid;
if let Some(vm) = self.state.lock().unwrap().get_vm(cid) {
info!("VM with CID {} encountered an error", cid);
- vm.update_payload_state(PayloadState::Finished).map_err(|e| {
- Status::new_exception_str(ExceptionCode::ILLEGAL_STATE, Some(e.to_string()))
- })?;
+ vm.update_payload_state(PayloadState::Finished)
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE)?;
vm.callbacks.notify_error(cid, error_code, message);
Ok(())
} else {
error!("notifyError is called from an unknown CID {}", cid);
- Err(Status::new_service_specific_error_str(
- -1,
- Some(format!("cannot find a VM with CID {}", cid)),
- ))
+ Err(anyhow!("cannot find a VM with CID {}", cid)).or_service_specific_exception(-1)
}
}
@@ -1195,10 +1217,8 @@
let cid = self.cid;
let Some(vm) = self.state.lock().unwrap().get_vm(cid) else {
error!("requestCertificate is called from an unknown CID {cid}");
- return Err(Status::new_service_specific_error_str(
- -1,
- Some(format!("cannot find a VM with CID {}", cid)),
- ))
+ return Err(anyhow!("cannot find a VM with CID {}", cid))
+ .or_service_specific_exception(-1);
};
let instance_img_path = vm.temporary_directory.join("rkpvm_instance.img");
let instance_img = OpenOptions::new()
@@ -1206,13 +1226,9 @@
.read(true)
.write(true)
.open(instance_img_path)
- .map_err(|e| {
- error!("Failed to create rkpvm_instance.img file: {:?}", e);
- Status::new_service_specific_error_str(
- -1,
- Some(format!("Failed to create rkpvm_instance.img file: {:?}", e)),
- )
- })?;
+ .context("Failed to create rkpvm_instance.img file")
+ .with_log()
+ .or_service_specific_exception(-1)?;
GLOBAL_SERVICE.requestCertificate(csr, &ParcelFileDescriptor::new(instance_img))
}
}
@@ -1300,4 +1316,45 @@
assert!(ret.is_err(), "should fail");
Ok(())
}
+
+ #[test]
+ fn test_create_or_update_idsig_does_not_update_if_already_valid() -> Result<()> {
+ use std::io::Seek;
+
+ // Pick any APK
+ let mut apk = File::open("/system/priv-app/Shell/Shell.apk").unwrap();
+ let mut idsig = tempfile::tempfile().unwrap();
+
+ create_or_update_idsig_file(
+ &ParcelFileDescriptor::new(apk.try_clone()?),
+ &ParcelFileDescriptor::new(idsig.try_clone()?),
+ )?;
+ let modified_orig = idsig.metadata()?.modified()?;
+ apk.rewind()?;
+ idsig.rewind()?;
+
+ // Call the function again
+ create_or_update_idsig_file(
+ &ParcelFileDescriptor::new(apk.try_clone()?),
+ &ParcelFileDescriptor::new(idsig.try_clone()?),
+ )?;
+ let modified_new = idsig.metadata()?.modified()?;
+ assert!(modified_orig == modified_new, "idsig file was updated unnecessarily");
+ Ok(())
+ }
+
+ #[test]
+ fn test_append_kernel_param_first_param() {
+ let mut vm_config = VirtualMachineRawConfig { ..Default::default() };
+ append_kernel_param("foo=1", &mut vm_config);
+ assert_eq!(vm_config.params, Some("foo=1".to_owned()))
+ }
+
+ #[test]
+ fn test_append_kernel_param() {
+ let mut vm_config =
+ VirtualMachineRawConfig { params: Some("foo=5".to_owned()), ..Default::default() };
+ append_kernel_param("bar=42", &mut vm_config);
+ assert_eq!(vm_config.params, Some("foo=5 bar=42".to_owned()))
+ }
}
diff --git a/virtualizationmanager/src/atom.rs b/virtualizationmanager/src/atom.rs
index d6eb141..1d2d191 100644
--- a/virtualizationmanager/src/atom.rs
+++ b/virtualizationmanager/src/atom.rs
@@ -83,7 +83,7 @@
// This matches how crosvm determines the number of logical cores.
// For telemetry purposes only.
pub(crate) fn get_num_cpus() -> Option<usize> {
- // SAFETY - Only integer constants passed back and forth.
+ // SAFETY: Only integer constants passed back and forth.
let ret = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_CONF) };
if ret > 0 {
ret.try_into().ok()
diff --git a/virtualizationmanager/src/crosvm.rs b/virtualizationmanager/src/crosvm.rs
index 856ff1e..6372fa8 100644
--- a/virtualizationmanager/src/crosvm.rs
+++ b/virtualizationmanager/src/crosvm.rs
@@ -107,13 +107,16 @@
pub cpus: Option<NonZeroU32>,
pub host_cpu_topology: bool,
pub task_profiles: Vec<String>,
- pub console_fd: Option<File>,
+ pub console_out_fd: Option<File>,
+ pub console_in_fd: Option<File>,
pub log_fd: Option<File>,
pub ramdump: Option<File>,
pub indirect_files: Vec<File>,
pub platform_version: VersionReq,
pub detect_hangup: bool,
pub gdb_port: Option<NonZeroU16>,
+ pub vfio_devices: Vec<PathBuf>,
+ pub devices_dtbo: Option<File>,
}
/// A disk image to pass to crosvm for a VM.
@@ -184,6 +187,7 @@
if let VmState::NotStarted { config } = state {
let detect_hangup = config.detect_hangup;
let (failure_pipe_read, failure_pipe_write) = create_pipe()?;
+ let vfio_devices = config.vfio_devices.clone();
// If this fails and returns an error, `self` will be left in the `Failed` state.
let child =
@@ -198,7 +202,7 @@
let child_clone = child.clone();
let instance_clone = instance.clone();
let monitor_vm_exit_thread = Some(thread::spawn(move || {
- instance_clone.monitor_vm_exit(child_clone, failure_pipe_read);
+ instance_clone.monitor_vm_exit(child_clone, failure_pipe_read, vfio_devices);
}));
if detect_hangup {
@@ -335,7 +339,12 @@
/// Monitors the exit of the VM (i.e. termination of the `child` process). When that happens,
/// handles the event by updating the state, noityfing the event to clients by calling
/// callbacks, and removing temporary files for the VM.
- fn monitor_vm_exit(&self, child: Arc<SharedChild>, mut failure_pipe_read: File) {
+ fn monitor_vm_exit(
+ &self,
+ child: Arc<SharedChild>,
+ mut failure_pipe_read: File,
+ vfio_devices: Vec<PathBuf>,
+ ) {
let result = child.wait();
match &result {
Err(e) => error!("Error waiting for crosvm({}) instance to die: {}", child.id(), e),
@@ -393,6 +402,11 @@
remove_temporary_files(&self.temporary_directory).unwrap_or_else(|e| {
error!("Error removing temporary files from {:?}: {}", self.temporary_directory, e);
});
+
+ // TODO(b/278008182): clean up assigned devices.
+ for device in vfio_devices.iter() {
+ info!("NOT RELEASING {device:?}");
+ }
}
/// Waits until payload is started, or timeout expires. When timeout occurs, kill
@@ -515,8 +529,10 @@
MemoryTrimLevel::TRIM_MEMORY_RUNNING_MODERATE => 10,
_ => bail!("Invalid memory trim level {:?}", level),
};
- let command =
- BalloonControlCommand::Adjust { num_bytes: total_memory * pct / 100 };
+ let command = BalloonControlCommand::Adjust {
+ num_bytes: total_memory * pct / 100,
+ wait_for_success: false,
+ };
if let Err(e) = vm_control::client::handle_request(
&VmRequest::BalloonCommand(command),
&self.crosvm_control_socket_path,
@@ -526,7 +542,7 @@
}
}
Ok(VmResponse::Err(e)) => {
- // ENOTSUP is returned when the balloon protocol is not initialised. This
+ // ENOTSUP is returned when the balloon protocol is not initialized. This
// can occur for numerous reasons: Guest is still booting, guest doesn't
// support ballooning, host doesn't support ballooning. We don't log or
// raise an error in this case: trim is just a hint and we can ignore it.
@@ -591,7 +607,7 @@
}
let guest_time_ticks = data_list[42].parse::<i64>()?;
- // SAFETY : It just returns an integer about CPU tick information.
+ // SAFETY: It just returns an integer about CPU tick information.
let ticks_per_sec = unsafe { sysconf(_SC_CLK_TCK) };
Ok(guest_time_ticks * MILLIS_PER_SEC / ticks_per_sec)
}
@@ -676,6 +692,39 @@
}
}
+const SYSFS_PLATFORM_DEVICES_PATH: &str = "/sys/devices/platform/";
+const VFIO_PLATFORM_DRIVER_PATH: &str = "/sys/bus/platform/drivers/vfio-platform";
+
+fn vfio_argument_for_platform_device(path: &Path) -> Result<String, Error> {
+ // Check platform device exists
+ let path = path.canonicalize()?;
+ if !path.starts_with(SYSFS_PLATFORM_DEVICES_PATH) {
+ bail!("{path:?} is not a platform device");
+ }
+
+ // Check platform device is bound to VFIO driver
+ let dev_driver_path = path.join("driver").canonicalize()?;
+ if dev_driver_path != Path::new(VFIO_PLATFORM_DRIVER_PATH) {
+ bail!("{path:?} is not bound to VFIO-platform driver");
+ }
+
+ if let Some(p) = path.to_str() {
+ Ok(format!("--vfio={p},iommu=viommu"))
+ } else {
+ bail!("invalid path {path:?}");
+ }
+}
+
+fn append_platform_devices(command: &mut Command, config: &CrosvmConfig) -> Result<(), Error> {
+ for device in &config.vfio_devices {
+ command.arg(vfio_argument_for_platform_device(device)?);
+ }
+ if let Some(_dtbo) = &config.devices_dtbo {
+ // TODO(b/291192693): add dtbo to command line
+ }
+ Ok(())
+}
+
/// Starts an instance of `crosvm` to manage a new VM.
fn run_vm(
config: CrosvmConfig,
@@ -776,21 +825,29 @@
//
// When [console|log]_fd is not specified, the devices are attached to sink, which means what's
// written there is discarded.
- let console_arg = format_serial_arg(&mut preserved_fds, &config.console_fd);
- let log_arg = format_serial_arg(&mut preserved_fds, &config.log_fd);
+ let console_out_arg = format_serial_out_arg(&mut preserved_fds, &config.console_out_fd);
+ let console_in_arg = config
+ .console_in_fd
+ .as_ref()
+ .map(|fd| format!(",input={}", add_preserved_fd(&mut preserved_fds, fd)))
+ .unwrap_or_default();
+ let log_arg = format_serial_out_arg(&mut preserved_fds, &config.log_fd);
let failure_serial_path = add_preserved_fd(&mut preserved_fds, &failure_pipe_write);
- let ramdump_arg = format_serial_arg(&mut preserved_fds, &config.ramdump);
+ let ramdump_arg = format_serial_out_arg(&mut preserved_fds, &config.ramdump);
// Warning: Adding more serial devices requires you to shift the PCI device ID of the boot
// disks in bootconfig.x86_64. This is because x86 crosvm puts serial devices and the block
// devices in the same PCI bus and serial devices comes before the block devices. Arm crosvm
// doesn't have the issue.
// /dev/ttyS0
- command.arg(format!("--serial={},hardware=serial,num=1", &console_arg));
+ command.arg(format!("--serial={},hardware=serial,num=1", &console_out_arg));
// /dev/ttyS1
command.arg(format!("--serial=type=file,path={},hardware=serial,num=2", &failure_serial_path));
// /dev/hvc0
- command.arg(format!("--serial={},hardware=virtio-console,num=1", &console_arg));
+ command.arg(format!(
+ "--serial={}{},hardware=virtio-console,num=1",
+ &console_out_arg, &console_in_arg
+ ));
// /dev/hvc1
command.arg(format!("--serial={},hardware=virtio-console,num=2", &ramdump_arg));
// /dev/hvc2
@@ -824,6 +881,8 @@
.arg("--socket")
.arg(add_preserved_fd(&mut preserved_fds, &control_server_socket.as_raw_descriptor()));
+ append_platform_devices(&mut command, &config)?;
+
debug!("Preserving FDs {:?}", preserved_fds);
command.preserved_fds(preserved_fds);
@@ -890,7 +949,7 @@
/// Adds the file descriptor for `file` (if any) to `preserved_fds`, and returns the appropriate
/// string for a crosvm `--serial` flag. If `file` is none, creates a dummy sink device.
-fn format_serial_arg(preserved_fds: &mut Vec<RawFd>, file: &Option<File>) -> String {
+fn format_serial_out_arg(preserved_fds: &mut Vec<RawFd>, file: &Option<File>) -> String {
if let Some(file) = file {
format!("type=file,path={}", add_preserved_fd(preserved_fds, file))
} else {
@@ -901,8 +960,9 @@
/// Creates a new pipe with the `O_CLOEXEC` flag set, and returns the read side and write side.
fn create_pipe() -> Result<(File, File), Error> {
let (raw_read, raw_write) = pipe2(OFlag::O_CLOEXEC)?;
- // SAFETY: We are the sole owners of these fds as they were just created.
+ // SAFETY: We are the sole owner of this FD as we just created it, and it is valid and open.
let read_fd = unsafe { File::from_raw_fd(raw_read) };
+ // SAFETY: We are the sole owner of this FD as we just created it, and it is valid and open.
let write_fd = unsafe { File::from_raw_fd(raw_write) };
Ok((read_fd, write_fd))
}
diff --git a/virtualizationmanager/src/debug_config.rs b/virtualizationmanager/src/debug_config.rs
index 7172e7d..9b13475 100644
--- a/virtualizationmanager/src/debug_config.rs
+++ b/virtualizationmanager/src/debug_config.rs
@@ -42,7 +42,7 @@
}
fn to_path(&self) -> PathBuf {
- // SAFETY -- unwrap() is safe for to_str() because node_path and prop_name were &str.
+ // unwrap() is safe for to_str() because node_path and prop_name were &str.
PathBuf::from(
[
"/sys/firmware/devicetree/base",
@@ -129,7 +129,7 @@
.map_err(Error::msg)
.with_context(|| "Malformed {overlay_file_path:?}")?;
- // SAFETY - Return immediately if error happens. Damaged fdt_buf and fdt are discarded.
+ // SAFETY: Return immediately if error happens. Damaged fdt_buf and fdt are discarded.
unsafe {
fdt.apply_overlay(overlay_fdt).map_err(Error::msg).with_context(|| {
"Failed to overlay {overlay_file_path:?} onto empty device tree"
@@ -141,7 +141,7 @@
}
fn as_fdt(&self) -> &Fdt {
- // SAFETY - Checked validity of buffer when instantiate.
+ // SAFETY: Checked validity of buffer when instantiate.
unsafe { Fdt::unchecked_from_slice(&self.buffer) }
}
}
diff --git a/virtualizationmanager/src/main.rs b/virtualizationmanager/src/main.rs
index bd7f8af..f058547 100644
--- a/virtualizationmanager/src/main.rs
+++ b/virtualizationmanager/src/main.rs
@@ -86,7 +86,7 @@
}
owned_fds.push(raw_fd);
- // SAFETY - Initializing OwnedFd for a RawFd provided in cmdline arguments.
+ // SAFETY: Initializing OwnedFd for a RawFd provided in cmdline arguments.
// We checked that the integer value corresponds to a valid FD and that this
// is the first argument to claim its ownership.
Ok(unsafe { OwnedFd::from_raw_fd(raw_fd) })
diff --git a/virtualizationmanager/src/payload.rs b/virtualizationmanager/src/payload.rs
index 33659d4..343f3cf 100644
--- a/virtualizationmanager/src/payload.rs
+++ b/virtualizationmanager/src/payload.rs
@@ -81,6 +81,9 @@
#[serde(rename = "provideSharedApexLibs")]
provide_shared_apex_libs: bool,
+
+ #[serde(rename = "preinstalledModulePath")]
+ preinstalled_path: PathBuf,
}
impl ApexInfoList {
@@ -194,12 +197,12 @@
temporary_directory: &Path,
) -> Result<ParcelFileDescriptor> {
let payload_metadata = match &app_config.payload {
- Payload::PayloadConfig(payload_config) => PayloadMetadata::config(PayloadConfig {
+ Payload::PayloadConfig(payload_config) => PayloadMetadata::Config(PayloadConfig {
payload_binary_name: payload_config.payloadBinaryName.clone(),
..Default::default()
}),
Payload::ConfigPath(config_path) => {
- PayloadMetadata::config_path(format!("/mnt/apk/{}", config_path))
+ PayloadMetadata::ConfigPath(format!("/mnt/apk/{}", config_path))
}
};
@@ -275,7 +278,7 @@
let apex_list = pm.get_apex_list(vm_payload_config.prefer_staged)?;
// collect APEXes from config
- let mut apex_infos = collect_apex_infos(&apex_list, &vm_payload_config.apexes, debug_config);
+ let mut apex_infos = collect_apex_infos(&apex_list, &vm_payload_config.apexes, debug_config)?;
// Pass sorted list of apexes. Sorting key shouldn't use `path` because it will change after
// reboot with prefer_staged. `last_update_seconds` is added to distinguish "samegrade"
@@ -376,18 +379,28 @@
Ok(apexes)
}
+fn check_apexes_are_from_allowed_partitions(requested_apexes: &Vec<&ApexInfo>) -> Result<()> {
+ const ALLOWED_PARTITIONS: [&str; 2] = ["/system", "/system_ext"];
+ for apex in requested_apexes {
+ if !ALLOWED_PARTITIONS.iter().any(|p| apex.preinstalled_path.starts_with(p)) {
+ bail!("Non-system APEX {} is not supported in Microdroid", apex.name);
+ }
+ }
+ Ok(())
+}
+
// Collect ApexInfos from VM config
fn collect_apex_infos<'a>(
apex_list: &'a ApexInfoList,
apex_configs: &[ApexConfig],
debug_config: &DebugConfig,
-) -> Vec<&'a ApexInfo> {
+) -> Result<Vec<&'a ApexInfo>> {
let mut additional_apexes: Vec<&str> = MICRODROID_REQUIRED_APEXES.to_vec();
if debug_config.should_include_debug_apexes() {
additional_apexes.extend(MICRODROID_REQUIRED_APEXES_DEBUG.to_vec());
}
- apex_list
+ let apex_infos = apex_list
.list
.iter()
.filter(|ai| {
@@ -395,7 +408,22 @@
|| additional_apexes.iter().any(|name| name == &ai.name && ai.is_active)
|| ai.provide_shared_apex_libs
})
- .collect()
+ .collect();
+
+ check_apexes_are_from_allowed_partitions(&apex_infos)?;
+ Ok(apex_infos)
+}
+
+pub fn add_microdroid_vendor_image(vendor_image: File, vm_config: &mut VirtualMachineRawConfig) {
+ vm_config.disks.push(DiskImage {
+ image: None,
+ writable: false,
+ partitions: vec![Partition {
+ label: "microdroid-vendor".to_owned(),
+ image: Some(ParcelFileDescriptor::new(vendor_image)),
+ writable: false,
+ }],
+ })
}
pub fn add_microdroid_system_images(
@@ -476,13 +504,14 @@
}
#[test]
- fn test_collect_apexes() {
+ fn test_collect_apexes() -> Result<()> {
let apex_info_list = ApexInfoList {
list: vec![
ApexInfo {
// 0
name: "com.android.adbd".to_string(),
path: PathBuf::from("adbd"),
+ preinstalled_path: PathBuf::from("/system/adbd"),
has_classpath_jar: false,
last_update_seconds: 12345678,
is_factory: true,
@@ -493,6 +522,7 @@
// 1
name: "com.android.os.statsd".to_string(),
path: PathBuf::from("statsd"),
+ preinstalled_path: PathBuf::from("/system/statsd"),
has_classpath_jar: false,
last_update_seconds: 12345678,
is_factory: true,
@@ -503,6 +533,7 @@
// 2
name: "com.android.os.statsd".to_string(),
path: PathBuf::from("statsd/updated"),
+ preinstalled_path: PathBuf::from("/system/statsd"),
has_classpath_jar: false,
last_update_seconds: 12345678 + 1,
is_factory: false,
@@ -533,6 +564,7 @@
// 5
name: "has_classpath".to_string(),
path: PathBuf::from("has_classpath/updated"),
+ preinstalled_path: PathBuf::from("/system/has_classpath"),
has_classpath_jar: true,
last_update_seconds: 87654321 + 1,
is_factory: false,
@@ -543,6 +575,7 @@
// 6
name: "apex-foo".to_string(),
path: PathBuf::from("apex-foo"),
+ preinstalled_path: PathBuf::from("/system/apex-foo"),
has_classpath_jar: false,
last_update_seconds: 87654321,
is_factory: true,
@@ -553,6 +586,7 @@
// 7
name: "apex-foo".to_string(),
path: PathBuf::from("apex-foo/updated"),
+ preinstalled_path: PathBuf::from("/system/apex-foo"),
has_classpath_jar: false,
last_update_seconds: 87654321 + 1,
is_factory: false,
@@ -563,6 +597,7 @@
// 8
name: "sharedlibs".to_string(),
path: PathBuf::from("apex-foo"),
+ preinstalled_path: PathBuf::from("/system/apex-foo"),
last_update_seconds: 87654321,
is_factory: true,
provide_shared_apex_libs: true,
@@ -572,6 +607,7 @@
// 9
name: "sharedlibs".to_string(),
path: PathBuf::from("apex-foo/updated"),
+ preinstalled_path: PathBuf::from("/system/apex-foo"),
last_update_seconds: 87654321 + 1,
is_active: true,
provide_shared_apex_libs: true,
@@ -584,7 +620,11 @@
ApexConfig { name: "{CLASSPATH}".to_string() },
];
assert_eq!(
- collect_apex_infos(&apex_info_list, &apex_configs, &DebugConfig::new(DebugLevel::FULL)),
+ collect_apex_infos(
+ &apex_info_list,
+ &apex_configs,
+ &DebugConfig::new(DebugLevel::FULL)
+ )?,
vec![
// Pass active/required APEXes
&apex_info_list.list[0],
@@ -597,6 +637,55 @@
&apex_info_list.list[9],
]
);
+ Ok(())
+ }
+
+ #[test]
+ fn test_check_allowed_partitions_vendor_not_allowed() -> Result<()> {
+ let apex_info_list = ApexInfoList {
+ list: vec![ApexInfo {
+ name: "apex-vendor".to_string(),
+ path: PathBuf::from("apex-vendor"),
+ preinstalled_path: PathBuf::from("/vendor/apex-vendor"),
+ is_active: true,
+ ..Default::default()
+ }],
+ };
+ let apex_configs = vec![ApexConfig { name: "apex-vendor".to_string() }];
+
+ let ret =
+ collect_apex_infos(&apex_info_list, &apex_configs, &DebugConfig::new(DebugLevel::NONE));
+ assert!(ret
+ .is_err_and(|ret| ret.to_string()
+ == "Non-system APEX apex-vendor is not supported in Microdroid"));
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_check_allowed_partitions_system_ext_allowed() -> Result<()> {
+ let apex_info_list = ApexInfoList {
+ list: vec![ApexInfo {
+ name: "apex-system_ext".to_string(),
+ path: PathBuf::from("apex-system_ext"),
+ preinstalled_path: PathBuf::from("/system_ext/apex-system_ext"),
+ is_active: true,
+ ..Default::default()
+ }],
+ };
+
+ let apex_configs = vec![ApexConfig { name: "apex-system_ext".to_string() }];
+
+ assert_eq!(
+ collect_apex_infos(
+ &apex_info_list,
+ &apex_configs,
+ &DebugConfig::new(DebugLevel::NONE)
+ )?,
+ vec![&apex_info_list.list[0]]
+ );
+
+ Ok(())
}
#[test]
diff --git a/virtualizationservice/Android.bp b/virtualizationservice/Android.bp
index 6b39ff9..67890e2 100644
--- a/virtualizationservice/Android.bp
+++ b/virtualizationservice/Android.bp
@@ -27,12 +27,13 @@
"android.os.permissions_aidl-rust",
"libandroid_logger",
"libanyhow",
+ "libavflog",
"libbinder_rs",
- "libvmclient",
"liblibc",
"liblog_rust",
"libnix",
"librustutils",
+ "libvmclient",
"libstatslog_virtualization_rust",
"libtombstoned_client_rust",
"libvsock",
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/AssignableDevice.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/AssignableDevice.aidl
new file mode 100644
index 0000000..014d78c
--- /dev/null
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/AssignableDevice.aidl
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.virtualizationservice;
+
+/** A specification of device to be assigned to the virtual machine. */
+@RustDerive(Clone=true)
+parcelable AssignableDevice {
+ /** Path to SysFS node of the device. */
+ String node;
+
+ /** Kind of the device. */
+ String kind;
+}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
index d72d5ac..df72e49 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/IVirtualizationService.aidl
@@ -15,6 +15,7 @@
*/
package android.system.virtualizationservice;
+import android.system.virtualizationservice.AssignableDevice;
import android.system.virtualizationservice.IVirtualMachine;
import android.system.virtualizationservice.PartitionType;
import android.system.virtualizationservice.VirtualMachineConfig;
@@ -23,12 +24,14 @@
interface IVirtualizationService {
/**
* Create the VM with the given config file, and return a handle to it ready to start it. If
- * `consoleFd` is provided then console output from the VM will be sent to it. If `osLogFd` is
+ * `consoleOutFd` is provided then console output from the VM will be sent to it. If
+ * `consoleInFd` is provided then console input to the VM will be read from it. If `osLogFd` is
* provided then the OS-level logs will be sent to it. `osLogFd` is supported only when the OS
* running in the VM has the logging system. In case of Microdroid, the logging system is logd.
*/
IVirtualMachine createVm(in VirtualMachineConfig config,
- in @nullable ParcelFileDescriptor consoleFd,
+ in @nullable ParcelFileDescriptor consoleOutFd,
+ in @nullable ParcelFileDescriptor consoleInFd,
in @nullable ParcelFileDescriptor osLogFd);
/**
@@ -53,4 +56,9 @@
* and as such is only permitted from the shell user.
*/
VirtualMachineDebugInfo[] debugListVms();
+
+ /**
+ * Get a list of assignable device types.
+ */
+ AssignableDevice[] getAssignableDevices();
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
index 5e05bb9..9021055 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineAppConfig.aidl
@@ -45,6 +45,8 @@
union Payload {
/**
* Path to a JSON file in an APK containing the configuration.
+ *
+ * <p>Setting this field requires android.permission.USE_CUSTOM_VIRTUAL_MACHINE
*/
@utf8InCpp String configPath;
@@ -70,16 +72,6 @@
/** Debug level of the VM */
DebugLevel debugLevel = DebugLevel.NONE;
- /**
- * Port at which crosvm will start a gdb server to debug guest kernel.
- * If set to zero, then gdb server won't be started.
- *
- * Note: Specifying a value here requires android.permission.USE_CUSTOM_VIRTUAL_MACHINE.
- *
- * TODO(b/286225150): move to a separate struct
- */
- int gdbPort = 0;
-
/** Whether the VM should be a protected VM. */
boolean protectedVm;
@@ -93,20 +85,34 @@
CpuTopology cpuTopology = CpuTopology.ONE_CPU;
/**
- * List of task profile names to apply for the VM
- *
- * Note: Specifying a value here requires android.permission.USE_CUSTOM_VIRTUAL_MACHINE.
- *
- * TODO(b/286225150): move to a separate struct
+ * Encapsulates parameters that require android.permission.USE_CUSTOM_VIRTUAL_MACHINE.
*/
- String[] taskProfiles;
+ parcelable CustomConfig {
+ /**
+ * If specified, boot Microdroid VM with the given kernel.
+ *
+ */
+ @nullable ParcelFileDescriptor customKernelImage;
- /**
- * If specified, boot Microdroid VM with the given kernel.
- *
- * Note: Specifying a value here requires android.permission.USE_CUSTOM_VIRTUAL_MACHINE.
- *
- * TODO(b/286225150): move to a separate struct
- */
- @nullable ParcelFileDescriptor customKernelImage;
+ /**
+ * Port at which crosvm will start a gdb server to debug guest kernel.
+ * If set to zero, then gdb server won't be started.
+ *
+ */
+ int gdbPort = 0;
+
+ /**
+ * List of task profile names to apply for the VM
+ */
+ String[] taskProfiles;
+
+ /** A disk image containing vendor specific modules. */
+ @nullable ParcelFileDescriptor vendorImage;
+
+ /** List of SysFS nodes of devices to be assigned */
+ String[] devices;
+ }
+
+ /** Configuration parameters guarded by android.permission.USE_CUSTOM_VIRTUAL_MACHINE */
+ @nullable CustomConfig customConfig;
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl
index 87d4ba2..7c0ed0c 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/VirtualMachineRawConfig.aidl
@@ -69,4 +69,7 @@
* If set to zero, then gdb server won't be started.
*/
int gdbPort = 0;
+
+ /** List of SysFS nodes of devices to be assigned */
+ String[] devices;
}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVfioHandler.aidl b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVfioHandler.aidl
new file mode 100644
index 0000000..cb3ed0b
--- /dev/null
+++ b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVfioHandler.aidl
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.system.virtualizationservice_internal;
+
+import android.system.virtualizationservice.AssignableDevice;
+import android.system.virtualizationservice.VirtualMachineDebugInfo;
+import android.system.virtualizationservice_internal.AtomVmBooted;
+import android.system.virtualizationservice_internal.AtomVmCreationRequested;
+import android.system.virtualizationservice_internal.AtomVmExited;
+import android.system.virtualizationservice_internal.IGlobalVmContext;
+
+/** VFIO related methods which should be done as root. */
+interface IVfioHandler {
+ /**
+ * Bind given devices to vfio driver.
+ *
+ * @param devices paths of sysfs nodes of devices to assign.
+ * @param dtbo writable file descriptor to store VM DTBO.
+ */
+ void bindDevicesToVfioDriver(in String[] devices, in ParcelFileDescriptor dtbo);
+}
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
index cc59b3f..4c7164a 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice_internal/IVirtualizationServiceInternal.aidl
@@ -15,6 +15,7 @@
*/
package android.system.virtualizationservice_internal;
+import android.system.virtualizationservice.AssignableDevice;
import android.system.virtualizationservice.VirtualMachineDebugInfo;
import android.system.virtualizationservice_internal.AtomVmBooted;
import android.system.virtualizationservice_internal.AtomVmCreationRequested;
@@ -59,4 +60,17 @@
* @return the X.509 encoded certificate.
*/
byte[] requestCertificate(in byte[] csr, in ParcelFileDescriptor instanceImgFd);
+
+ /**
+ * Get a list of assignable devices.
+ */
+ AssignableDevice[] getAssignableDevices();
+
+ /**
+ * Bind given devices to vfio driver.
+ *
+ * @param devices paths of sysfs nodes of devices to assign.
+ * @param dtbo writable file descriptor to store VM DTBO.
+ */
+ void bindDevicesToVfioDriver(in String[] devices, in ParcelFileDescriptor dtbo);
}
diff --git a/virtualizationservice/src/aidl.rs b/virtualizationservice/src/aidl.rs
index 5c5a7e4..b2513d9 100644
--- a/virtualizationservice/src/aidl.rs
+++ b/virtualizationservice/src/aidl.rs
@@ -19,6 +19,7 @@
use crate::rkpvm::request_certificate;
use android_os_permissions_aidl::aidl::android::os::IPermissionController;
use android_system_virtualizationservice::{
+ aidl::android::system::virtualizationservice::AssignableDevice::AssignableDevice,
aidl::android::system::virtualizationservice::VirtualMachineDebugInfo::VirtualMachineDebugInfo,
binder::ParcelFileDescriptor,
};
@@ -28,10 +29,12 @@
AtomVmExited::AtomVmExited,
IGlobalVmContext::{BnGlobalVmContext, IGlobalVmContext},
IVirtualizationServiceInternal::IVirtualizationServiceInternal,
+ IVfioHandler::{BpVfioHandler, IVfioHandler},
};
use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::IVirtualMachineService::VM_TOMBSTONES_SERVICE_PORT;
use anyhow::{anyhow, ensure, Context, Result};
-use binder::{self, BinderFeatures, ExceptionCode, Interface, LazyServiceGuard, Status, Strong};
+use avflog::LogResult;
+use binder::{self, wait_for_interface, BinderFeatures, ExceptionCode, Interface, LazyServiceGuard, Status, Strong, IntoBinderResult};
use libc::VMADDR_CID_HOST;
use log::{error, info, warn};
use rustutils::system_properties;
@@ -40,7 +43,7 @@
use std::io::{Read, Write};
use std::os::unix::fs::PermissionsExt;
use std::os::unix::raw::{pid_t, uid_t};
-use std::path::PathBuf;
+use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex, Weak};
use tombstoned_client::{DebuggerdDumpType, TombstonedConnection};
use vsock::{VsockListener, VsockStream};
@@ -95,20 +98,15 @@
let pid = get_calling_pid();
let lim = libc::rlimit { rlim_cur: libc::RLIM_INFINITY, rlim_max: libc::RLIM_INFINITY };
- // SAFETY - borrowing the new limit struct only
+ // SAFETY: borrowing the new limit struct only
let ret = unsafe { libc::prlimit(pid, libc::RLIMIT_MEMLOCK, &lim, std::ptr::null_mut()) };
match ret {
0 => Ok(()),
- -1 => Err(Status::new_exception_str(
- ExceptionCode::ILLEGAL_STATE,
- Some(std::io::Error::last_os_error().to_string()),
- )),
- n => Err(Status::new_exception_str(
- ExceptionCode::ILLEGAL_STATE,
- Some(format!("Unexpected return value from prlimit(): {n}")),
- )),
+ -1 => Err(std::io::Error::last_os_error().into()),
+ n => Err(anyhow!("Unexpected return value from prlimit(): {n}")),
}
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE)
}
fn allocateGlobalVmContext(
@@ -120,9 +118,9 @@
let requester_uid = get_calling_uid();
let requester_debug_pid = requester_debug_pid as pid_t;
let state = &mut *self.state.lock().unwrap();
- state.allocate_vm_context(requester_uid, requester_debug_pid).map_err(|e| {
- Status::new_exception_str(ExceptionCode::ILLEGAL_STATE, Some(e.to_string()))
- })
+ state
+ .allocate_vm_context(requester_uid, requester_debug_pid)
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE)
}
fn atomVmBooted(&self, atom: &AtomVmBooted) -> Result<(), Status> {
@@ -165,10 +163,36 @@
) -> binder::Result<Vec<u8>> {
check_manage_access()?;
info!("Received csr. Getting certificate...");
- request_certificate(csr, instance_img_fd).map_err(|e| {
- error!("Failed to get certificate. Error: {e:?}");
- Status::new_exception_str(ExceptionCode::SERVICE_SPECIFIC, Some(e.to_string()))
- })
+ request_certificate(csr, instance_img_fd)
+ .context("Failed to get certificate")
+ .with_log()
+ .or_service_specific_exception(-1)
+ }
+
+ fn getAssignableDevices(&self) -> binder::Result<Vec<AssignableDevice>> {
+ check_use_custom_virtual_machine()?;
+
+ // TODO(b/291191362): read VM DTBO to find assignable devices.
+ let mut devices = Vec::new();
+ let eh_path = "/sys/bus/platform/devices/16d00000.eh";
+ if Path::new(eh_path).exists() {
+ devices.push(AssignableDevice { kind: "eh".to_owned(), node: eh_path.to_owned() });
+ }
+ Ok(devices)
+ }
+
+ fn bindDevicesToVfioDriver(
+ &self,
+ devices: &[String],
+ dtbo: &ParcelFileDescriptor,
+ ) -> binder::Result<()> {
+ check_use_custom_virtual_machine()?;
+
+ let vfio_service: Strong<dyn IVfioHandler> =
+ wait_for_interface(<BpVfioHandler as IVfioHandler>::get_descriptor())?;
+
+ vfio_service.bindDevicesToVfioDriver(devices, dtbo)?;
+ Ok(())
}
}
@@ -377,10 +401,8 @@
if perm_svc.checkPermission(perm, calling_pid, calling_uid as i32)? {
Ok(())
} else {
- Err(Status::new_exception_str(
- ExceptionCode::SECURITY,
- Some(format!("does not have the {} permission", perm)),
- ))
+ Err(anyhow!("does not have the {} permission", perm))
+ .or_binder_exception(ExceptionCode::SECURITY)
}
}
@@ -393,3 +415,8 @@
fn check_manage_access() -> binder::Result<()> {
check_permission("android.permission.MANAGE_VIRTUAL_MACHINE")
}
+
+/// Check whether the caller of the current Binder method is allowed to use custom VMs
+fn check_use_custom_virtual_machine() -> binder::Result<()> {
+ check_permission("android.permission.USE_CUSTOM_VIRTUAL_MACHINE")
+}
diff --git a/virtualizationservice/src/rkpvm.rs b/virtualizationservice/src/rkpvm.rs
index a4649f6..63160f4 100644
--- a/virtualizationservice/src/rkpvm.rs
+++ b/virtualizationservice/src/rkpvm.rs
@@ -76,10 +76,10 @@
memoryMib: 300,
cpuTopology: CpuTopology::ONE_CPU,
platformVersion: "~1.0".to_string(),
- taskProfiles: vec![],
gdbPort: 0, // No gdb
+ ..Default::default()
});
- let vm = VmInstance::create(service.as_ref(), &config, None, None, None)
+ let vm = VmInstance::create(service.as_ref(), &config, None, None, None, None)
.context("Failed to create service VM")?;
info!("service_vm: Starting Service VM...");
diff --git a/virtualizationservice/vfio_handler/Android.bp b/virtualizationservice/vfio_handler/Android.bp
new file mode 100644
index 0000000..66662d5
--- /dev/null
+++ b/virtualizationservice/vfio_handler/Android.bp
@@ -0,0 +1,34 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_binary {
+ name: "vfio_handler",
+ crate_name: "vfio_handler",
+ edition: "2021",
+ srcs: ["src/main.rs"],
+ // Only build on targets which crosvm builds on.
+ enabled: false,
+ target: {
+ android64: {
+ compile_multilib: "64",
+ enabled: true,
+ },
+ linux_bionic_arm64: {
+ enabled: true,
+ },
+ },
+ prefer_rlib: true,
+ rustlibs: [
+ "android.system.virtualizationservice_internal-rust",
+ "libandroid_logger",
+ "libanyhow",
+ "libbinder_rs",
+ "liblazy_static",
+ "liblog_rust",
+ "libnix",
+ "librustutils",
+ "libzerocopy",
+ ],
+ apex_available: ["com.android.virt"],
+}
diff --git a/virtualizationservice/vfio_handler/src/aidl.rs b/virtualizationservice/vfio_handler/src/aidl.rs
new file mode 100644
index 0000000..bb9faf1
--- /dev/null
+++ b/virtualizationservice/vfio_handler/src/aidl.rs
@@ -0,0 +1,303 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Implementation of the AIDL interface of the VirtualizationService.
+
+use anyhow::{anyhow, Context};
+use android_system_virtualizationservice_internal::aidl::android::system::virtualizationservice_internal::IVfioHandler::IVfioHandler;
+use android_system_virtualizationservice_internal::binder::ParcelFileDescriptor;
+use binder::{self, ExceptionCode, Interface, IntoBinderResult};
+use lazy_static::lazy_static;
+use std::fs::{read_link, write, File};
+use std::io::{Read, Seek, SeekFrom, Write};
+use std::mem::size_of;
+use std::path::{Path, PathBuf};
+use rustutils::system_properties;
+use zerocopy::{
+ byteorder::{BigEndian, U32},
+ FromBytes,
+};
+
+#[derive(Debug, Default)]
+pub struct VfioHandler {}
+
+impl VfioHandler {
+ pub fn init() -> VfioHandler {
+ VfioHandler::default()
+ }
+}
+
+impl Interface for VfioHandler {}
+
+impl IVfioHandler for VfioHandler {
+ fn bindDevicesToVfioDriver(
+ &self,
+ devices: &[String],
+ dtbo: &ParcelFileDescriptor,
+ ) -> binder::Result<()> {
+ // permission check is already done by IVirtualizationServiceInternal.
+ if !*IS_VFIO_SUPPORTED {
+ return Err(anyhow!("VFIO-platform not supported"))
+ .or_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION);
+ }
+ devices.iter().try_for_each(|x| bind_device(Path::new(x)))?;
+
+ write_dtbo(dtbo)?;
+
+ Ok(())
+ }
+}
+
+const DEV_VFIO_PATH: &str = "/dev/vfio/vfio";
+const SYSFS_PLATFORM_DEVICES_PATH: &str = "/sys/devices/platform/";
+const VFIO_PLATFORM_DRIVER_PATH: &str = "/sys/bus/platform/drivers/vfio-platform";
+const SYSFS_PLATFORM_DRIVERS_PROBE_PATH: &str = "/sys/bus/platform/drivers_probe";
+const DT_TABLE_MAGIC: u32 = 0xd7b7ab1e;
+
+/// The structure of DT table header in dtbo.img.
+/// https://source.android.com/docs/core/architecture/dto/partitions
+#[repr(C)]
+#[derive(Debug, FromBytes)]
+struct DtTableHeader {
+ /// DT_TABLE_MAGIC
+ magic: U32<BigEndian>,
+ /// includes dt_table_header + all dt_table_entry and all dtb/dtbo
+ _total_size: U32<BigEndian>,
+ /// sizeof(dt_table_header)
+ header_size: U32<BigEndian>,
+ /// sizeof(dt_table_entry)
+ dt_entry_size: U32<BigEndian>,
+ /// number of dt_table_entry
+ dt_entry_count: U32<BigEndian>,
+ /// offset to the first dt_table_entry from head of dt_table_header
+ dt_entries_offset: U32<BigEndian>,
+ /// flash page size we assume
+ _page_size: U32<BigEndian>,
+ /// DTBO image version, the current version is 0. The version will be
+ /// incremented when the dt_table_header struct is updated.
+ _version: U32<BigEndian>,
+}
+
+/// The structure of each DT table entry (v0) in dtbo.img.
+/// https://source.android.com/docs/core/architecture/dto/partitions
+#[repr(C)]
+#[derive(Debug, FromBytes)]
+struct DtTableEntry {
+ /// size of each DT
+ dt_size: U32<BigEndian>,
+ /// offset from head of dt_table_header
+ dt_offset: U32<BigEndian>,
+ /// optional, must be zero if unused
+ _id: U32<BigEndian>,
+ /// optional, must be zero if unused
+ _rev: U32<BigEndian>,
+ /// optional, must be zero if unused
+ _custom: [U32<BigEndian>; 4],
+}
+
+lazy_static! {
+ static ref IS_VFIO_SUPPORTED: bool =
+ Path::new(DEV_VFIO_PATH).exists() && Path::new(VFIO_PLATFORM_DRIVER_PATH).exists();
+}
+
+fn check_platform_device(path: &Path) -> binder::Result<()> {
+ if !path.exists() {
+ return Err(anyhow!("no such device {path:?}"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
+ }
+
+ if !path.starts_with(SYSFS_PLATFORM_DEVICES_PATH) {
+ return Err(anyhow!("{path:?} is not a platform device"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
+ }
+
+ Ok(())
+}
+
+fn get_device_iommu_group(path: &Path) -> Option<u64> {
+ let group_path = read_link(path.join("iommu_group")).ok()?;
+ let group = group_path.file_name()?;
+ group.to_str()?.parse().ok()
+}
+
+fn is_bound_to_vfio_driver(path: &Path) -> bool {
+ let Ok(driver_path) = read_link(path.join("driver")) else {
+ return false;
+ };
+ let Some(driver) = driver_path.file_name() else {
+ return false;
+ };
+ driver.to_str().unwrap_or("") == "vfio-platform"
+}
+
+fn bind_vfio_driver(path: &Path) -> binder::Result<()> {
+ if is_bound_to_vfio_driver(path) {
+ // already bound
+ return Ok(());
+ }
+
+ // unbind
+ let Some(device) = path.file_name() else {
+ return Err(anyhow!("can't get device name from {path:?}"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
+ };
+ let Some(device_str) = device.to_str() else {
+ return Err(anyhow!("invalid filename {device:?}"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT);
+ };
+ let unbind_path = path.join("driver/unbind");
+ if unbind_path.exists() {
+ write(&unbind_path, device_str.as_bytes())
+ .with_context(|| format!("could not unbind {device_str}"))
+ .or_service_specific_exception(-1)?;
+ }
+
+ // bind to VFIO
+ write(path.join("driver_override"), b"vfio-platform")
+ .with_context(|| format!("could not bind {device_str} to vfio-platform"))
+ .or_service_specific_exception(-1)?;
+
+ write(SYSFS_PLATFORM_DRIVERS_PROBE_PATH, device_str.as_bytes())
+ .with_context(|| format!("could not write {device_str} to drivers-probe"))
+ .or_service_specific_exception(-1)?;
+
+ // final check
+ if !is_bound_to_vfio_driver(path) {
+ return Err(anyhow!("{path:?} still not bound to vfio driver"))
+ .or_service_specific_exception(-1);
+ }
+
+ if get_device_iommu_group(path).is_none() {
+ return Err(anyhow!("can't get iommu group for {path:?}"))
+ .or_service_specific_exception(-1);
+ }
+
+ Ok(())
+}
+
+fn bind_device(path: &Path) -> binder::Result<()> {
+ let path = path
+ .canonicalize()
+ .with_context(|| format!("can't canonicalize {path:?}"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT)?;
+
+ check_platform_device(&path)?;
+ bind_vfio_driver(&path)
+}
+
+fn get_dtbo_img_path() -> binder::Result<PathBuf> {
+ let slot_suffix = system_properties::read("ro.boot.slot_suffix")
+ .context("Failed to read ro.boot.slot_suffix")
+ .or_service_specific_exception(-1)?
+ .ok_or_else(|| anyhow!("slot_suffix is none"))
+ .or_service_specific_exception(-1)?;
+ Ok(PathBuf::from(format!("/dev/block/by-name/dtbo{slot_suffix}")))
+}
+
+fn read_values(file: &mut File, size: usize, offset: u64) -> binder::Result<Vec<u8>> {
+ file.seek(SeekFrom::Start(offset))
+ .context("Cannot seek the offset")
+ .or_service_specific_exception(-1)?;
+ let mut buffer = vec![0_u8; size];
+ file.read_exact(&mut buffer)
+ .context("Failed to read buffer")
+ .or_service_specific_exception(-1)?;
+ Ok(buffer)
+}
+
+fn get_dt_table_header(file: &mut File) -> binder::Result<DtTableHeader> {
+ let values = read_values(file, size_of::<DtTableHeader>(), 0)?;
+ let dt_table_header = DtTableHeader::read_from(values.as_slice())
+ .context("DtTableHeader is invalid")
+ .or_service_specific_exception(-1)?;
+ if dt_table_header.magic.get() != DT_TABLE_MAGIC
+ || dt_table_header.header_size.get() as usize != size_of::<DtTableHeader>()
+ {
+ return Err(anyhow!("DtTableHeader is invalid")).or_service_specific_exception(-1)?;
+ }
+ Ok(dt_table_header)
+}
+
+fn get_dt_table_entry(
+ file: &mut File,
+ header: &DtTableHeader,
+ index: u32,
+) -> binder::Result<DtTableEntry> {
+ if index >= header.dt_entry_count.get() {
+ return Err(anyhow!("Invalid dtbo index {index}")).or_service_specific_exception(-1)?;
+ }
+ let Some(prev_dt_entry_total_size) = header.dt_entry_size.get().checked_mul(index) else {
+ return Err(anyhow!("Unexpected arithmetic result"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE);
+ };
+ let Some(dt_entry_offset) =
+ prev_dt_entry_total_size.checked_add(header.dt_entries_offset.get())
+ else {
+ return Err(anyhow!("Unexpected arithmetic result"))
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE);
+ };
+ let values = read_values(file, size_of::<DtTableEntry>(), dt_entry_offset.into())?;
+ let dt_table_entry = DtTableEntry::read_from(values.as_slice())
+ .with_context(|| format!("DtTableEntry at index {index} is invalid."))
+ .or_service_specific_exception(-1)?;
+ Ok(dt_table_entry)
+}
+
+fn filter_dtbo_from_img(
+ dtbo_img_file: &mut File,
+ entry: &DtTableEntry,
+ dtbo_fd: &ParcelFileDescriptor,
+) -> binder::Result<()> {
+ let dt_size = entry
+ .dt_size
+ .get()
+ .try_into()
+ .context("Failed to convert type")
+ .or_binder_exception(ExceptionCode::ILLEGAL_STATE)?;
+ let buffer = read_values(dtbo_img_file, dt_size, entry.dt_offset.get().into())?;
+
+ let mut dtbo_fd = dtbo_fd
+ .as_ref()
+ .try_clone()
+ .context("Failed to clone File from ParcelFileDescriptor")
+ .or_binder_exception(ExceptionCode::BAD_PARCELABLE)?;
+
+ // TODO(b/296796644): Filter dtbo.img, not writing all information.
+ dtbo_fd
+ .write_all(&buffer)
+ .context("Failed to write dtbo file")
+ .or_service_specific_exception(-1)?;
+ Ok(())
+}
+
+fn write_dtbo(dtbo_fd: &ParcelFileDescriptor) -> binder::Result<()> {
+ let dtbo_path = get_dtbo_img_path()?;
+ let mut dtbo_img = File::open(dtbo_path)
+ .context("Failed to open DTBO partition")
+ .or_service_specific_exception(-1)?;
+
+ let dt_table_header = get_dt_table_header(&mut dtbo_img)?;
+ let vm_dtbo_idx = system_properties::read("ro.boot.hypervisor.vm_dtbo_idx")
+ .context("Failed to read vm_dtbo_idx")
+ .or_service_specific_exception(-1)?
+ .ok_or_else(|| anyhow!("vm_dtbo_idx is none"))
+ .or_service_specific_exception(-1)?;
+ let vm_dtbo_idx = vm_dtbo_idx
+ .parse()
+ .context("vm_dtbo_idx is not an integer")
+ .or_service_specific_exception(-1)?;
+ let dt_table_entry = get_dt_table_entry(&mut dtbo_img, &dt_table_header, vm_dtbo_idx)?;
+ filter_dtbo_from_img(&mut dtbo_img, &dt_table_entry, dtbo_fd)?;
+ Ok(())
+}
diff --git a/virtualizationservice/vfio_handler/src/main.rs b/virtualizationservice/vfio_handler/src/main.rs
new file mode 100644
index 0000000..1a1cce8
--- /dev/null
+++ b/virtualizationservice/vfio_handler/src/main.rs
@@ -0,0 +1,45 @@
+// Copyright 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Android VfioHandler
+
+mod aidl;
+
+use crate::aidl::VfioHandler;
+use android_logger::Config;
+use android_system_virtualizationservice_internal::aidl::android::system::virtualizationservice_internal::IVfioHandler::{
+ BnVfioHandler,
+ BpVfioHandler,
+ IVfioHandler,
+};
+use binder::{register_lazy_service, BinderFeatures, ProcessState};
+use log::{info, Level};
+
+const LOG_TAG: &str = "VfioHandler";
+
+fn main() {
+ android_logger::init_once(
+ Config::default()
+ .with_tag(LOG_TAG)
+ .with_min_level(Level::Info)
+ .with_log_id(android_logger::LogId::System),
+ );
+
+ let service = VfioHandler::init();
+ let service = BnVfioHandler::new_binder(service, BinderFeatures::default());
+ register_lazy_service(<BpVfioHandler as IVfioHandler>::get_descriptor(), service.as_binder())
+ .unwrap();
+ info!("Registered Binder service, joining threadpool.");
+ ProcessState::join_thread_pool();
+}
diff --git a/vm/src/main.rs b/vm/src/main.rs
index bc3f4da..4c44496 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -24,7 +24,7 @@
};
use anyhow::{Context, Error};
use binder::{ProcessState, Strong};
-use clap::Parser;
+use clap::{Args, Parser};
use create_idsig::command_create_idsig;
use create_partition::command_create_partition;
use run::{command_run, command_run_app, command_run_microdroid};
@@ -34,173 +34,165 @@
#[derive(Debug)]
struct Idsigs(Vec<PathBuf>);
+#[derive(Args)]
+/// Collection of flags that are at VM level and therefore applicable to all subcommands
+pub struct CommonConfig {
+ /// Name of VM
+ #[arg(long)]
+ name: Option<String>,
+
+ /// Run VM with vCPU topology matching that of the host. If unspecified, defaults to 1 vCPU.
+ #[arg(long, default_value = "one_cpu", value_parser = parse_cpu_topology)]
+ cpu_topology: CpuTopology,
+
+ /// Comma separated list of task profile names to apply to the VM
+ #[arg(long)]
+ task_profiles: Vec<String>,
+
+ /// Memory size (in MiB) of the VM. If unspecified, defaults to the value of `memory_mib`
+ /// in the VM config file.
+ #[arg(short, long)]
+ mem: Option<u32>,
+
+ /// Run VM in protected mode.
+ #[arg(short, long)]
+ protected: bool,
+}
+
+#[derive(Args)]
+/// Collection of flags for debugging
+pub struct DebugConfig {
+ /// Debug level of the VM. Supported values: "full" (default), and "none".
+ #[arg(long, default_value = "full", value_parser = parse_debug_level)]
+ debug: DebugLevel,
+
+ /// Path to file for VM console output.
+ #[arg(long)]
+ console: Option<PathBuf>,
+
+ /// Path to file for VM console input.
+ #[arg(long)]
+ console_in: Option<PathBuf>,
+
+ /// Path to file for VM log output.
+ #[arg(long)]
+ log: Option<PathBuf>,
+
+ /// Port at which crosvm will start a gdb server to debug guest kernel.
+ /// Note: this is only supported on Android kernels android14-5.15 and higher.
+ #[arg(long)]
+ gdb: Option<NonZeroU16>,
+}
+
+#[derive(Args)]
+/// Collection of flags that are Microdroid specific
+pub struct MicrodroidConfig {
+ /// Path to the file backing the storage.
+ /// Created if the option is used but the path does not exist in the device.
+ #[arg(long)]
+ storage: Option<PathBuf>,
+
+ /// Size of the storage. Used only if --storage is supplied but path does not exist
+ /// Default size is 10*1024*1024
+ #[arg(long)]
+ storage_size: Option<u64>,
+
+ /// Path to custom kernel image to use when booting Microdroid.
+ #[arg(long)]
+ kernel: Option<PathBuf>,
+
+ /// Path to disk image containing vendor-specific modules.
+ #[arg(long)]
+ vendor: Option<PathBuf>,
+
+ /// SysFS nodes of devices to assign to VM
+ #[arg(long)]
+ devices: Vec<PathBuf>,
+}
+
+#[derive(Args)]
+/// Flags for the run_app subcommand
+pub struct RunAppConfig {
+ #[command(flatten)]
+ common: CommonConfig,
+
+ #[command(flatten)]
+ debug: DebugConfig,
+
+ #[command(flatten)]
+ microdroid: MicrodroidConfig,
+
+ /// Path to VM Payload APK
+ apk: PathBuf,
+
+ /// Path to idsig of the APK
+ idsig: PathBuf,
+
+ /// Path to the instance image. Created if not exists.
+ instance: PathBuf,
+
+ /// Path to VM config JSON within APK (e.g. assets/vm_config.json)
+ #[arg(long)]
+ config_path: Option<String>,
+
+ /// Name of VM payload binary within APK (e.g. MicrodroidTestNativeLib.so)
+ #[arg(long)]
+ #[arg(alias = "payload_path")]
+ payload_binary_name: Option<String>,
+
+ /// Paths to extra idsig files.
+ #[arg(long = "extra-idsig")]
+ extra_idsigs: Vec<PathBuf>,
+}
+
+#[derive(Args)]
+/// Flags for the run_microdroid subcommand
+pub struct RunMicrodroidConfig {
+ #[command(flatten)]
+ common: CommonConfig,
+
+ #[command(flatten)]
+ debug: DebugConfig,
+
+ #[command(flatten)]
+ microdroid: MicrodroidConfig,
+
+ /// Path to the directory where VM-related files (e.g. instance.img, apk.idsig, etc.) will
+ /// be stored. If not specified a random directory under /data/local/tmp/microdroid will be
+ /// created and used.
+ #[arg(long)]
+ work_dir: Option<PathBuf>,
+}
+
+#[derive(Args)]
+/// Flags for the run subcommand
+pub struct RunCustomVmConfig {
+ #[command(flatten)]
+ common: CommonConfig,
+
+ #[command(flatten)]
+ debug: DebugConfig,
+
+ /// Path to VM config JSON
+ config: PathBuf,
+}
+
#[derive(Parser)]
enum Opt {
/// Run a virtual machine with a config in APK
RunApp {
- /// Path to VM Payload APK
- apk: PathBuf,
-
- /// Path to idsig of the APK
- idsig: PathBuf,
-
- /// Path to the instance image. Created if not exists.
- instance: PathBuf,
-
- /// Path to VM config JSON within APK (e.g. assets/vm_config.json)
- #[clap(long)]
- config_path: Option<String>,
-
- /// Name of VM payload binary within APK (e.g. MicrodroidTestNativeLib.so)
- #[clap(long)]
- #[clap(alias = "payload_path")]
- payload_binary_name: Option<String>,
-
- /// Name of VM
- #[clap(long)]
- name: Option<String>,
-
- /// Path to the file backing the storage.
- /// Created if the option is used but the path does not exist in the device.
- #[clap(long)]
- storage: Option<PathBuf>,
-
- /// Size of the storage. Used only if --storage is supplied but path does not exist
- /// Default size is 10*1024*1024
- #[clap(long)]
- storage_size: Option<u64>,
-
- /// Path to file for VM console output.
- #[clap(long)]
- console: Option<PathBuf>,
-
- /// Path to file for VM log output.
- #[clap(long)]
- log: Option<PathBuf>,
-
- /// Debug level of the VM. Supported values: "none" (default), and "full".
- #[clap(long, default_value = "none", value_parser = parse_debug_level)]
- debug: DebugLevel,
-
- /// Run VM in protected mode.
- #[clap(short, long)]
- protected: bool,
-
- /// Memory size (in MiB) of the VM. If unspecified, defaults to the value of `memory_mib`
- /// in the VM config file.
- #[clap(short, long)]
- mem: Option<u32>,
-
- /// Run VM with vCPU topology matching that of the host. If unspecified, defaults to 1 vCPU.
- #[clap(long, default_value = "one_cpu", value_parser = parse_cpu_topology)]
- cpu_topology: CpuTopology,
-
- /// Comma separated list of task profile names to apply to the VM
- #[clap(long)]
- task_profiles: Vec<String>,
-
- /// Paths to extra idsig files.
- #[clap(long = "extra-idsig")]
- extra_idsigs: Vec<PathBuf>,
-
- /// Port at which crosvm will start a gdb server to debug guest kernel.
- /// Note: this is only supported on Android kernels android14-5.15 and higher.
- #[clap(long)]
- gdb: Option<NonZeroU16>,
-
- /// Path to custom kernel image to use when booting Microdroid.
- #[clap(long)]
- kernel: Option<PathBuf>,
+ #[command(flatten)]
+ config: RunAppConfig,
},
/// Run a virtual machine with Microdroid inside
RunMicrodroid {
- /// Path to the directory where VM-related files (e.g. instance.img, apk.idsig, etc.) will
- /// be stored. If not specified a random directory under /data/local/tmp/microdroid will be
- /// created and used.
- #[clap(long)]
- work_dir: Option<PathBuf>,
-
- /// Name of VM
- #[clap(long)]
- name: Option<String>,
-
- /// Path to the file backing the storage.
- /// Created if the option is used but the path does not exist in the device.
- #[clap(long)]
- storage: Option<PathBuf>,
-
- /// Size of the storage. Used only if --storage is supplied but path does not exist
- /// Default size is 10*1024*1024
- #[clap(long)]
- storage_size: Option<u64>,
-
- /// Path to file for VM console output.
- #[clap(long)]
- console: Option<PathBuf>,
-
- /// Path to file for VM log output.
- #[clap(long)]
- log: Option<PathBuf>,
-
- /// Debug level of the VM. Supported values: "none" (default), and "full".
- #[clap(long, default_value = "full", value_parser = parse_debug_level)]
- debug: DebugLevel,
-
- /// Run VM in protected mode.
- #[clap(short, long)]
- protected: bool,
-
- /// Memory size (in MiB) of the VM. If unspecified, defaults to the value of `memory_mib`
- /// in the VM config file.
- #[clap(short, long)]
- mem: Option<u32>,
-
- /// Run VM with vCPU topology matching that of the host. If unspecified, defaults to 1 vCPU.
- #[clap(long, default_value = "one_cpu", value_parser = parse_cpu_topology)]
- cpu_topology: CpuTopology,
-
- /// Comma separated list of task profile names to apply to the VM
- #[clap(long)]
- task_profiles: Vec<String>,
-
- /// Port at which crosvm will start a gdb server to debug guest kernel.
- /// Note: this is only supported on Android kernels android14-5.15 and higher.
- #[clap(long)]
- gdb: Option<NonZeroU16>,
-
- /// Path to custom kernel image to use when booting Microdroid.
- #[clap(long)]
- kernel: Option<PathBuf>,
+ #[command(flatten)]
+ config: RunMicrodroidConfig,
},
/// Run a virtual machine
Run {
- /// Path to VM config JSON
- config: PathBuf,
-
- /// Name of VM
- #[clap(long)]
- name: Option<String>,
-
- /// Run VM with vCPU topology matching that of the host. If unspecified, defaults to 1 vCPU.
- #[clap(long, default_value = "one_cpu", value_parser = parse_cpu_topology)]
- cpu_topology: CpuTopology,
-
- /// Comma separated list of task profile names to apply to the VM
- #[clap(long)]
- task_profiles: Vec<String>,
-
- /// Path to file for VM console output.
- #[clap(long)]
- console: Option<PathBuf>,
-
- /// Path to file for VM log output.
- #[clap(long)]
- log: Option<PathBuf>,
-
- /// Port at which crosvm will start a gdb server to debug guest kernel.
- /// Note: this is only supported on Android kernels android14-5.15 and higher.
- #[clap(long)]
- gdb: Option<NonZeroU16>,
+ #[command(flatten)]
+ config: RunCustomVmConfig,
},
/// List running virtual machines
List,
@@ -215,7 +207,7 @@
size: u64,
/// Type of the partition
- #[clap(short = 't', long = "type", default_value = "raw",
+ #[arg(short = 't', long = "type", default_value = "raw",
value_parser = parse_partition_type)]
partition_type: PartitionType,
},
@@ -267,89 +259,9 @@
ProcessState::start_thread_pool();
match opt {
- Opt::RunApp {
- name,
- apk,
- idsig,
- instance,
- storage,
- storage_size,
- config_path,
- payload_binary_name,
- console,
- log,
- debug,
- protected,
- mem,
- cpu_topology,
- task_profiles,
- extra_idsigs,
- gdb,
- kernel,
- } => command_run_app(
- name,
- get_service()?.as_ref(),
- &apk,
- &idsig,
- &instance,
- storage.as_deref(),
- storage_size,
- config_path,
- payload_binary_name,
- console.as_deref(),
- log.as_deref(),
- debug,
- protected,
- mem,
- cpu_topology,
- task_profiles,
- &extra_idsigs,
- gdb,
- kernel.as_deref(),
- ),
- Opt::RunMicrodroid {
- name,
- work_dir,
- storage,
- storage_size,
- console,
- log,
- debug,
- protected,
- mem,
- cpu_topology,
- task_profiles,
- gdb,
- kernel,
- } => command_run_microdroid(
- name,
- get_service()?.as_ref(),
- work_dir,
- storage.as_deref(),
- storage_size,
- console.as_deref(),
- log.as_deref(),
- debug,
- protected,
- mem,
- cpu_topology,
- task_profiles,
- gdb,
- kernel.as_deref(),
- ),
- Opt::Run { name, config, cpu_topology, task_profiles, console, log, gdb } => {
- command_run(
- name,
- get_service()?.as_ref(),
- &config,
- console.as_deref(),
- log.as_deref(),
- /* mem */ None,
- cpu_topology,
- task_profiles,
- gdb,
- )
- }
+ Opt::RunApp { config } => command_run_app(config),
+ Opt::RunMicrodroid { config } => command_run_microdroid(config),
+ Opt::Run { config } => command_run(config),
Opt::List => command_list(get_service()?.as_ref()),
Opt::Info => command_info(),
Opt::CreatePartition { path, size, partition_type } => {
@@ -391,6 +303,22 @@
println!("/dev/kvm does not exist.");
}
+ if Path::new("/dev/vfio/vfio").exists() {
+ println!("/dev/vfio/vfio exists.");
+ } else {
+ println!("/dev/vfio/vfio does not exist.");
+ }
+
+ if Path::new("/sys/bus/platform/drivers/vfio-platform").exists() {
+ println!("VFIO-platform is supported.");
+ } else {
+ println!("VFIO-platform is not supported.");
+ }
+
+ let devices = get_service()?.getAssignableDevices()?;
+ let devices = devices.into_iter().map(|x| x.node).collect::<Vec<_>>();
+ println!("Assignable devices: {}", serde_json::to_string(&devices)?);
+
Ok(())
}
diff --git a/vm/src/run.rs b/vm/src/run.rs
index 54c1de4..fc8d7e0 100644
--- a/vm/src/run.rs
+++ b/vm/src/run.rs
@@ -15,11 +15,14 @@
//! Command to run a VM.
use crate::create_partition::command_create_partition;
+use crate::{get_service, RunAppConfig, RunCustomVmConfig, RunMicrodroidConfig};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
- CpuTopology::CpuTopology,
IVirtualizationService::IVirtualizationService,
PartitionType::PartitionType,
- VirtualMachineAppConfig::{DebugLevel::DebugLevel, Payload::Payload, VirtualMachineAppConfig},
+ VirtualMachineAppConfig::{
+ CustomConfig::CustomConfig, DebugLevel::DebugLevel, Payload::Payload,
+ VirtualMachineAppConfig,
+ },
VirtualMachineConfig::VirtualMachineConfig,
VirtualMachinePayloadConfig::VirtualMachinePayloadConfig,
VirtualMachineState::VirtualMachineState,
@@ -32,7 +35,6 @@
use std::fs;
use std::fs::File;
use std::io;
-use std::num::NonZeroU16;
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::path::{Path, PathBuf};
use vmclient::{ErrorCode, VmInstance};
@@ -40,93 +42,78 @@
use zip::ZipArchive;
/// Run a VM from the given APK, idsig, and config.
-#[allow(clippy::too_many_arguments)]
-pub fn command_run_app(
- name: Option<String>,
- service: &dyn IVirtualizationService,
- apk: &Path,
- idsig: &Path,
- instance: &Path,
- storage: Option<&Path>,
- storage_size: Option<u64>,
- config_path: Option<String>,
- payload_binary_name: Option<String>,
- console_path: Option<&Path>,
- log_path: Option<&Path>,
- debug_level: DebugLevel,
- protected: bool,
- mem: Option<u32>,
- cpu_topology: CpuTopology,
- task_profiles: Vec<String>,
- extra_idsigs: &[PathBuf],
- gdb: Option<NonZeroU16>,
- kernel: Option<&Path>,
-) -> Result<(), Error> {
- let apk_file = File::open(apk).context("Failed to open APK file")?;
+pub fn command_run_app(config: RunAppConfig) -> Result<(), Error> {
+ let service = get_service()?;
+ let apk = File::open(&config.apk).context("Failed to open APK file")?;
- let extra_apks = match config_path.as_deref() {
- Some(path) => parse_extra_apk_list(apk, path)?,
+ let extra_apks = match config.config_path.as_deref() {
+ Some(path) => parse_extra_apk_list(&config.apk, path)?,
None => vec![],
};
- if extra_apks.len() != extra_idsigs.len() {
+ if extra_apks.len() != config.extra_idsigs.len() {
bail!(
"Found {} extra apks, but there are {} extra idsigs",
extra_apks.len(),
- extra_idsigs.len()
+ config.extra_idsigs.len()
)
}
- for i in 0..extra_apks.len() {
- let extra_apk_fd = ParcelFileDescriptor::new(File::open(&extra_apks[i])?);
- let extra_idsig_fd = ParcelFileDescriptor::new(File::create(&extra_idsigs[i])?);
+ for (i, extra_apk) in extra_apks.iter().enumerate() {
+ let extra_apk_fd = ParcelFileDescriptor::new(File::open(extra_apk)?);
+ let extra_idsig_fd = ParcelFileDescriptor::new(File::create(&config.extra_idsigs[i])?);
service.createOrUpdateIdsigFile(&extra_apk_fd, &extra_idsig_fd)?;
}
- let idsig_file = File::create(idsig).context("Failed to create idsig file")?;
+ let idsig = File::create(&config.idsig).context("Failed to create idsig file")?;
- let apk_fd = ParcelFileDescriptor::new(apk_file);
- let idsig_fd = ParcelFileDescriptor::new(idsig_file);
+ let apk_fd = ParcelFileDescriptor::new(apk);
+ let idsig_fd = ParcelFileDescriptor::new(idsig);
service.createOrUpdateIdsigFile(&apk_fd, &idsig_fd)?;
- let idsig_file = File::open(idsig).context("Failed to open idsig file")?;
- let idsig_fd = ParcelFileDescriptor::new(idsig_file);
+ let idsig = File::open(&config.idsig).context("Failed to open idsig file")?;
+ let idsig_fd = ParcelFileDescriptor::new(idsig);
- if !instance.exists() {
+ if !config.instance.exists() {
const INSTANCE_FILE_SIZE: u64 = 10 * 1024 * 1024;
command_create_partition(
- service,
- instance,
+ service.as_ref(),
+ &config.instance,
INSTANCE_FILE_SIZE,
PartitionType::ANDROID_VM_INSTANCE,
)?;
}
- let storage = if let Some(path) = storage {
+ let storage = if let Some(path) = config.microdroid.storage {
if !path.exists() {
command_create_partition(
- service,
- path,
- storage_size.unwrap_or(10 * 1024 * 1024),
+ service.as_ref(),
+ &path,
+ config.microdroid.storage_size.unwrap_or(10 * 1024 * 1024),
PartitionType::ENCRYPTEDSTORE,
)?;
}
- Some(open_parcel_file(path, true)?)
+ Some(open_parcel_file(&path, true)?)
} else {
None
};
- let kernel = kernel.map(|p| open_parcel_file(p, false)).transpose()?;
+ let kernel =
+ config.microdroid.kernel.as_ref().map(|p| open_parcel_file(p, false)).transpose()?;
- let extra_idsig_files: Result<Vec<File>, _> = extra_idsigs.iter().map(File::open).collect();
+ let vendor =
+ config.microdroid.vendor.as_ref().map(|p| open_parcel_file(p, false)).transpose()?;
+
+ let extra_idsig_files: Result<Vec<File>, _> =
+ config.extra_idsigs.iter().map(File::open).collect();
let extra_idsig_fds = extra_idsig_files?.into_iter().map(ParcelFileDescriptor::new).collect();
- let payload = if let Some(config_path) = config_path {
- if payload_binary_name.is_some() {
+ let payload = if let Some(config_path) = config.config_path {
+ if config.payload_binary_name.is_some() {
bail!("Only one of --config-path or --payload-binary-name can be defined")
}
Payload::ConfigPath(config_path)
- } else if let Some(payload_binary_name) = payload_binary_name {
+ } else if let Some(payload_binary_name) = config.payload_binary_name {
Payload::PayloadConfig(VirtualMachinePayloadConfig {
payloadBinaryName: payload_binary_name,
})
@@ -134,25 +121,45 @@
bail!("Either --config-path or --payload-binary-name must be defined")
};
- let payload_config_str = format!("{:?}!{:?}", apk, payload);
+ let payload_config_str = format!("{:?}!{:?}", config.apk, payload);
- let config = VirtualMachineConfig::AppConfig(VirtualMachineAppConfig {
- name: name.unwrap_or_else(|| String::from("VmRunApp")),
+ let custom_config = CustomConfig {
+ customKernelImage: kernel,
+ gdbPort: config.debug.gdb.map(u16::from).unwrap_or(0) as i32, // 0 means no gdb
+ taskProfiles: config.common.task_profiles,
+ vendorImage: vendor,
+ devices: config
+ .microdroid
+ .devices
+ .iter()
+ .map(|x| {
+ x.to_str().map(String::from).ok_or(anyhow!("Failed to convert {x:?} to String"))
+ })
+ .collect::<Result<_, _>>()?,
+ };
+
+ let vm_config = VirtualMachineConfig::AppConfig(VirtualMachineAppConfig {
+ name: config.common.name.unwrap_or_else(|| String::from("VmRunApp")),
apk: apk_fd.into(),
idsig: idsig_fd.into(),
extraIdsigs: extra_idsig_fds,
- instanceImage: open_parcel_file(instance, true /* writable */)?.into(),
+ instanceImage: open_parcel_file(&config.instance, true /* writable */)?.into(),
encryptedStorageImage: storage,
payload,
- debugLevel: debug_level,
- protectedVm: protected,
- memoryMib: mem.unwrap_or(0) as i32, // 0 means use the VM default
- cpuTopology: cpu_topology,
- taskProfiles: task_profiles,
- gdbPort: gdb.map(u16::from).unwrap_or(0) as i32, // 0 means no gdb
- customKernelImage: kernel,
+ debugLevel: config.debug.debug,
+ protectedVm: config.common.protected,
+ memoryMib: config.common.mem.unwrap_or(0) as i32, // 0 means use the VM default
+ cpuTopology: config.common.cpu_topology,
+ customConfig: Some(custom_config),
});
- run(service, &config, &payload_config_str, console_path, log_path)
+ run(
+ service.as_ref(),
+ &vm_config,
+ &payload_config_str,
+ config.debug.console.as_ref().map(|p| p.as_ref()),
+ config.debug.console_in.as_ref().map(|p| p.as_ref()),
+ config.debug.log.as_ref().map(|p| p.as_ref()),
+ )
}
fn find_empty_payload_apk_path() -> Result<PathBuf, Error> {
@@ -178,92 +185,55 @@
}
/// Run a VM with Microdroid
-#[allow(clippy::too_many_arguments)]
-pub fn command_run_microdroid(
- name: Option<String>,
- service: &dyn IVirtualizationService,
- work_dir: Option<PathBuf>,
- storage: Option<&Path>,
- storage_size: Option<u64>,
- console_path: Option<&Path>,
- log_path: Option<&Path>,
- debug_level: DebugLevel,
- protected: bool,
- mem: Option<u32>,
- cpu_topology: CpuTopology,
- task_profiles: Vec<String>,
- gdb: Option<NonZeroU16>,
- kernel: Option<&Path>,
-) -> Result<(), Error> {
+pub fn command_run_microdroid(config: RunMicrodroidConfig) -> Result<(), Error> {
let apk = find_empty_payload_apk_path()?;
println!("found path {}", apk.display());
- let work_dir = work_dir.unwrap_or(create_work_dir()?);
+ let work_dir = config.work_dir.unwrap_or(create_work_dir()?);
let idsig = work_dir.join("apk.idsig");
println!("apk.idsig path: {}", idsig.display());
let instance_img = work_dir.join("instance.img");
println!("instance.img path: {}", instance_img.display());
- let payload_binary_name = "MicrodroidEmptyPayloadJniLib.so";
- let extra_sig = [];
- command_run_app(
- name,
- service,
- &apk,
- &idsig,
- &instance_img,
- storage,
- storage_size,
- /* config_path= */ None,
- Some(payload_binary_name.to_owned()),
- console_path,
- log_path,
- debug_level,
- protected,
- mem,
- cpu_topology,
- task_profiles,
- &extra_sig,
- gdb,
- kernel,
- )
+ let app_config = RunAppConfig {
+ common: config.common,
+ debug: config.debug,
+ microdroid: config.microdroid,
+ apk,
+ idsig,
+ instance: instance_img,
+ config_path: None,
+ payload_binary_name: Some("MicrodroidEmptyPayloadJniLib.so".to_owned()),
+ extra_idsigs: [].to_vec(),
+ };
+ command_run_app(app_config)
}
/// Run a VM from the given configuration file.
-#[allow(clippy::too_many_arguments)]
-pub fn command_run(
- name: Option<String>,
- service: &dyn IVirtualizationService,
- config_path: &Path,
- console_path: Option<&Path>,
- log_path: Option<&Path>,
- mem: Option<u32>,
- cpu_topology: CpuTopology,
- task_profiles: Vec<String>,
- gdb: Option<NonZeroU16>,
-) -> Result<(), Error> {
- let config_file = File::open(config_path).context("Failed to open config file")?;
- let mut config =
+pub fn command_run(config: RunCustomVmConfig) -> Result<(), Error> {
+ let config_file = File::open(&config.config).context("Failed to open config file")?;
+ let mut vm_config =
VmConfig::load(&config_file).context("Failed to parse config file")?.to_parcelable()?;
- if let Some(mem) = mem {
- config.memoryMib = mem as i32;
+ if let Some(mem) = config.common.mem {
+ vm_config.memoryMib = mem as i32;
}
- if let Some(name) = name {
- config.name = name;
+ if let Some(name) = config.common.name {
+ vm_config.name = name;
} else {
- config.name = String::from("VmRun");
+ vm_config.name = String::from("VmRun");
}
- if let Some(gdb) = gdb {
- config.gdbPort = gdb.get() as i32;
+ if let Some(gdb) = config.debug.gdb {
+ vm_config.gdbPort = gdb.get() as i32;
}
- config.cpuTopology = cpu_topology;
- config.taskProfiles = task_profiles;
+ vm_config.cpuTopology = config.common.cpu_topology;
+ vm_config.taskProfiles = config.common.task_profiles;
run(
- service,
- &VirtualMachineConfig::RawConfig(config),
- &format!("{:?}", config_path),
- console_path,
- log_path,
+ get_service()?.as_ref(),
+ &VirtualMachineConfig::RawConfig(vm_config),
+ &format!("{:?}", &config.config),
+ config.debug.console.as_ref().map(|p| p.as_ref()),
+ config.debug.console_in.as_ref().map(|p| p.as_ref()),
+ config.debug.log.as_ref().map(|p| p.as_ref()),
)
}
@@ -283,33 +253,45 @@
service: &dyn IVirtualizationService,
config: &VirtualMachineConfig,
payload_config: &str,
- console_path: Option<&Path>,
+ console_out_path: Option<&Path>,
+ console_in_path: Option<&Path>,
log_path: Option<&Path>,
) -> Result<(), Error> {
- let console = if let Some(console_path) = console_path {
- Some(
- File::create(console_path)
- .with_context(|| format!("Failed to open console file {:?}", console_path))?,
- )
+ let console_out = if let Some(console_out_path) = console_out_path {
+ Some(File::create(console_out_path).with_context(|| {
+ format!("Failed to open console output file {:?}", console_out_path)
+ })?)
} else {
- Some(duplicate_stdout()?)
+ Some(duplicate_fd(io::stdout())?)
};
+ let console_in =
+ if let Some(console_in_path) = console_in_path {
+ Some(File::create(console_in_path).with_context(|| {
+ format!("Failed to open console input file {:?}", console_in_path)
+ })?)
+ } else {
+ Some(duplicate_fd(io::stdin())?)
+ };
let log = if let Some(log_path) = log_path {
Some(
File::create(log_path)
.with_context(|| format!("Failed to open log file {:?}", log_path))?,
)
} else {
- Some(duplicate_stdout()?)
+ Some(duplicate_fd(io::stdout())?)
};
-
let callback = Box::new(Callback {});
- let vm = VmInstance::create(service, config, console, log, Some(callback))
+ let vm = VmInstance::create(service, config, console_out, console_in, log, Some(callback))
.context("Failed to create VM")?;
vm.start().context("Failed to start VM")?;
+ let debug_level = match config {
+ VirtualMachineConfig::AppConfig(config) => config.debugLevel,
+ _ => DebugLevel::NONE,
+ };
println!(
- "Created VM from {} with CID {}, state is {}.",
+ "Created {} from {} with CID {}, state is {}.",
+ if debug_level == DebugLevel::FULL { "debuggable VM" } else { "VM" },
payload_config,
vm.cid(),
state_to_str(vm.state()?)
@@ -349,17 +331,17 @@
}
}
-/// Safely duplicate the standard output file descriptor.
-fn duplicate_stdout() -> io::Result<File> {
- let stdout_fd = io::stdout().as_raw_fd();
- // Safe because this just duplicates a file descriptor which we know to be valid, and we check
- // for an error.
- let dup_fd = unsafe { libc::dup(stdout_fd) };
+/// Safely duplicate the file descriptor.
+fn duplicate_fd<T: AsRawFd>(file: T) -> io::Result<File> {
+ let fd = file.as_raw_fd();
+ // SAFETY: This just duplicates a file descriptor which we know to be valid, and we check for an
+ // an error.
+ let dup_fd = unsafe { libc::dup(fd) };
if dup_fd < 0 {
Err(io::Error::last_os_error())
} else {
- // Safe because we have just duplicated the file descriptor so we own it, and `from_raw_fd`
- // takes ownership of it.
+ // SAFETY: We have just duplicated the file descriptor so we own it, and `from_raw_fd` takes
+ // ownership of it.
Ok(unsafe { File::from_raw_fd(dup_fd) })
}
}
diff --git a/vm_payload/Android.bp b/vm_payload/Android.bp
index ae0d1a6..49b7f5f 100644
--- a/vm_payload/Android.bp
+++ b/vm_payload/Android.bp
@@ -10,8 +10,6 @@
srcs: ["src/*.rs"],
include_dirs: ["include"],
prefer_rlib: true,
- // Require unsafe blocks for inside unsafe functions.
- flags: ["-Dunsafe_op_in_unsafe_fn"],
rustlibs: [
"android.system.virtualization.payload-rust",
"libandroid_logger",
diff --git a/vmbase/Android.bp b/vmbase/Android.bp
index ac010b9..71b9e76 100644
--- a/vmbase/Android.bp
+++ b/vmbase/Android.bp
@@ -2,11 +2,28 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
+// The hierarchy of Soong modules to produce a vmbase-based binary is
+//
+// 0. rlibs may be used to provide high-level code (see "vmbase_rlib_defaults");
+// 1. rust_ffi_static packages low-level Rust code and any rlib into a static
+// library (see "vmbase_ffi_defaults") that cc_binary supports;
+// 2. cc_library_static may be used for extra C code (see "vmbase_cc_defaults");
+// 3. cc_binary produces an ELF from the (single) Rust-wrapping static library,
+// optional extra C libraries, and linker script (see "vmbase_elf_defaults");
+// 4. raw_binary strips the ELF into an image that can be loaded to memory;
+
+// Used by intermediate rust_library_rlib for vmbase-based binaries.
rust_defaults {
- name: "vmbase_rust_defaults",
+ name: "vmbase_rlib_defaults",
edition: "2021",
+ prefer_rlib: true,
host_supported: false,
enabled: false,
+ no_stdlibs: true,
+ stdlibs: [
+ "libcompiler_builtins.rust_sysroot",
+ "libcore.rust_sysroot",
+ ],
target: {
android_arm64: {
enabled: true,
@@ -14,19 +31,17 @@
},
}
+// Used by the "top-level" rust_ffi_static of vmbase-based binaries.
rust_defaults {
name: "vmbase_ffi_defaults",
- defaults: ["vmbase_rust_defaults"],
- no_stdlibs: true,
- stdlibs: [
- "libcompiler_builtins.rust_sysroot",
- "libcore.rust_sysroot",
- ],
+ defaults: ["vmbase_rlib_defaults"],
}
+// Used by extra cc_library_static linked into the final ELF.
cc_defaults {
name: "vmbase_cc_defaults",
nocrt: true,
+ no_libcrt: true,
system_shared_libs: [],
stl: "none",
installable: false,
@@ -39,8 +54,10 @@
sanitize: {
hwaddress: false,
},
+ native_coverage: false,
}
+// Used by cc_binary when producing the ELF of a vmbase-based binary.
cc_defaults {
name: "vmbase_elf_defaults",
defaults: ["vmbase_cc_defaults"],
@@ -48,18 +65,11 @@
static_libs: [
"libvmbase_entry",
],
- installable: false,
- enabled: false,
- target: {
- android_arm64: {
- enabled: true,
- },
- },
}
rust_library_rlib {
name: "libvmbase",
- defaults: ["vmbase_rust_defaults"],
+ defaults: ["vmbase_rlib_defaults"],
crate_name: "vmbase",
srcs: ["src/lib.rs"],
rustlibs: [
@@ -74,16 +84,15 @@
"libspin_nostd",
"libtinyvec_nostd",
"libvirtio_drivers",
+ "libzerocopy_nostd",
"libzeroize_nostd",
],
- no_stdlibs: true,
whole_static_libs: [
"librust_baremetal",
],
features: [
"cpu_feat_hafdbs",
],
- apex_available: ["com.android.virt"],
}
cc_library_static {
@@ -94,8 +103,6 @@
"exceptions.S",
"exceptions_panic.S",
],
- no_libcrt: true,
- apex_available: ["com.android.virt"],
}
filegroup {
diff --git a/vmbase/README.md b/vmbase/README.md
index 7f621fb..280d7e1 100644
--- a/vmbase/README.md
+++ b/vmbase/README.md
@@ -6,7 +6,7 @@
In particular it provides:
-- An [entry point](entry.S) that initialises the MMU with a hard-coded identity mapping, enables the
+- An [entry point](entry.S) that initializes the MMU with a hard-coded identity mapping, enables the
cache, prepares the image and allocates a stack.
- An [exception vector](exceptions.S) to call your exception handlers.
- A UART driver and `println!` macro for early console logging.
@@ -62,7 +62,7 @@
}
```
-vmbase adds a wrapper around your main function to initialise the console driver first (with the
+vmbase adds a wrapper around your main function to initialize the console driver first (with the
UART at base address `0x3f8`, the first UART allocated by crosvm), and make a PSCI `SYSTEM_OFF` call
to shutdown the VM if your main function ever returns.
@@ -93,7 +93,7 @@
The `println!` macro shouldn't be used in exception handlers, because it relies on a global instance
of the UART driver which might be locked when the exception happens, which would result in deadlock.
-Instead you can use `emergency_write_str` and `eprintln!`, which will re-initialise the UART every
+Instead you can use `emergency_write_str` and `eprintln!`, which will re-initialize the UART every
time to ensure that it can be used. This should still be used with care, as it may interfere with
whatever the rest of the program is doing with the UART.
diff --git a/vmbase/entry.S b/vmbase/entry.S
index 9f6993a..9177a4a 100644
--- a/vmbase/entry.S
+++ b/vmbase/entry.S
@@ -63,72 +63,6 @@
.set .Lsctlrval, .L_SCTLR_ELx_M | .L_SCTLR_ELx_C | .L_SCTLR_ELx_SA | .L_SCTLR_EL1_ITD | .L_SCTLR_EL1_SED
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1 | .L_SCTLR_EL1_WXN
-/* SMC function IDs */
-.set .L_SMCCC_VERSION_ID, 0x80000000
-.set .L_SMCCC_TRNG_VERSION_ID, 0x84000050
-.set .L_SMCCC_TRNG_FEATURES_ID, 0x84000051
-.set .L_SMCCC_TRNG_RND64_ID, 0xc4000053
-
-/* SMC function versions */
-.set .L_SMCCC_VERSION_1_1, 0x0101
-.set .L_SMCCC_TRNG_VERSION_1_0, 0x0100
-
-/* Bionic-compatible stack protector */
-.section .data.stack_protector, "aw"
-__bionic_tls:
- .zero 40
-.global __stack_chk_guard
-__stack_chk_guard:
- .quad 0
-
-/**
- * This macro stores a random value into a register.
- * If a TRNG backed is not present or if an error occurs, the value remains unchanged.
- */
-.macro rnd_reg reg:req
- mov x20, x0
- mov x21, x1
- mov x22, x2
- mov x23, x3
-
- /* Verify SMCCC version >=1.1 */
- hvc_call .L_SMCCC_VERSION_ID
- cmp w0, 0
- b.lt 100f
- cmp w0, .L_SMCCC_VERSION_1_1
- b.lt 100f
-
- /* Verify TRNG ABI version 1.x */
- hvc_call .L_SMCCC_TRNG_VERSION_ID
- cmp w0, 0
- b.lt 100f
- cmp w0, .L_SMCCC_TRNG_VERSION_1_0
- b.lt 100f
-
- /* Call TRNG_FEATURES, ensure TRNG_RND is implemented */
- mov_i x1, .L_SMCCC_TRNG_RND64_ID
- hvc_call .L_SMCCC_TRNG_FEATURES_ID
- cmp w0, 0
- b.lt 100f
-
- /* Call TRNG_RND, request 64 bits of entropy */
- mov x1, #64
- hvc_call .L_SMCCC_TRNG_RND64_ID
- cmp x0, 0
- b.lt 100f
-
- mov \reg, x3
- b 101f
-
-100:
- reset_or_hang
-101:
- mov x0, x20
- mov x1, x21
- mov x2, x22
- mov x3, x23
-.endm
-
/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
@@ -222,18 +156,17 @@
adr x30, vector_table_el1
msr vbar_el1, x30
- /* Set up Bionic-compatible thread-local storage. */
+ /*
+ * Set up Bionic-compatible thread-local storage.
+ *
+ * Note that TPIDR_EL0 can't be configured from rust_entry because the
+ * compiler will dereference it during function entry to access
+ * __stack_chk_guard and Rust doesn't support LLVM's
+ * __attribute__((no_stack_protector)).
+ */
adr_l x30, __bionic_tls
msr tpidr_el0, x30
- /* Randomize stack protector. */
- rnd_reg x29
- adr_l x30, __stack_chk_guard
- str x29, [x30]
-
- /* Write a null byte to the top of the stack guard to act as a string terminator. */
- strb wzr, [x30]
-
/* Call into Rust code. */
bl rust_entry
diff --git a/vmbase/example/Android.bp b/vmbase/example/Android.bp
index dc9a090..ae1a593 100644
--- a/vmbase/example/Android.bp
+++ b/vmbase/example/Android.bp
@@ -7,10 +7,8 @@
defaults: ["vmbase_ffi_defaults"],
crate_name: "vmbase_example",
srcs: ["src/main.rs"],
- edition: "2021",
rustlibs: [
"libaarch64_paging",
- "libbuddy_system_allocator",
"libdiced_open_dice_nostd",
"libfdtpci",
"liblibfdt",
diff --git a/vmbase/example/src/exceptions.rs b/vmbase/example/src/exceptions.rs
index 0522013..5d7768a 100644
--- a/vmbase/example/src/exceptions.rs
+++ b/vmbase/example/src/exceptions.rs
@@ -14,8 +14,7 @@
//! Exception handlers.
-use core::arch::asm;
-use vmbase::{eprintln, power::reboot};
+use vmbase::{eprintln, power::reboot, read_sysreg};
#[no_mangle]
extern "C" fn sync_exception_current(_elr: u64, _spsr: u64) {
@@ -71,9 +70,6 @@
#[inline]
fn print_esr() {
- let mut esr: u64;
- unsafe {
- asm!("mrs {esr}, esr_el1", esr = out(reg) esr);
- }
+ let esr = read_sysreg!("esr_el1");
eprintln!("esr={:#08x}", esr);
}
diff --git a/vmbase/example/src/layout.rs b/vmbase/example/src/layout.rs
index 2e9d27a..fc578bc 100644
--- a/vmbase/example/src/layout.rs
+++ b/vmbase/example/src/layout.rs
@@ -15,80 +15,36 @@
//! Memory layout.
use aarch64_paging::paging::{MemoryRegion, VirtualAddress};
-use core::arch::asm;
use core::ops::Range;
use log::info;
use vmbase::layout;
-use vmbase::STACK_CHK_GUARD;
/// The first 1 GiB of memory are used for MMIO.
pub const DEVICE_REGION: MemoryRegion = MemoryRegion::new(0, 0x40000000);
-fn into_va_range(r: Range<usize>) -> Range<VirtualAddress> {
- VirtualAddress(r.start)..VirtualAddress(r.end)
-}
-
-/// Memory reserved for the DTB.
-pub fn dtb_range() -> Range<VirtualAddress> {
- into_va_range(layout::dtb_range())
-}
-
-/// Executable code.
-pub fn text_range() -> Range<VirtualAddress> {
- into_va_range(layout::text_range())
-}
-
-/// Read-only data.
-pub fn rodata_range() -> Range<VirtualAddress> {
- into_va_range(layout::rodata_range())
-}
-
-/// Initialised writable data.
-pub fn data_range() -> Range<VirtualAddress> {
- into_va_range(layout::data_range())
-}
-
-/// Zero-initialised writable data.
-pub fn bss_range() -> Range<VirtualAddress> {
- into_va_range(layout::bss_range())
-}
-
/// Writable data region for the stack.
pub fn boot_stack_range() -> Range<VirtualAddress> {
const PAGE_SIZE: usize = 4 << 10;
- into_va_range(layout::stack_range(40 * PAGE_SIZE))
-}
-
-/// Writable data region for allocations.
-pub fn scratch_range() -> Range<VirtualAddress> {
- into_va_range(layout::scratch_range())
-}
-
-fn data_load_address() -> VirtualAddress {
- VirtualAddress(layout::data_load_address())
-}
-
-fn binary_end() -> VirtualAddress {
- VirtualAddress(layout::binary_end())
+ layout::stack_range(40 * PAGE_SIZE)
}
pub fn print_addresses() {
- let dtb = dtb_range();
+ let dtb = layout::dtb_range();
info!("dtb: {}..{} ({} bytes)", dtb.start, dtb.end, dtb.end - dtb.start);
- let text = text_range();
+ let text = layout::text_range();
info!("text: {}..{} ({} bytes)", text.start, text.end, text.end - text.start);
- let rodata = rodata_range();
+ let rodata = layout::rodata_range();
info!("rodata: {}..{} ({} bytes)", rodata.start, rodata.end, rodata.end - rodata.start);
- info!("binary end: {}", binary_end());
- let data = data_range();
+ info!("binary end: {}", layout::binary_end());
+ let data = layout::data_range();
info!(
"data: {}..{} ({} bytes, loaded at {})",
data.start,
data.end,
data.end - data.start,
- data_load_address(),
+ layout::data_load_address(),
);
- let bss = bss_range();
+ let bss = layout::bss_range();
info!("bss: {}..{} ({} bytes)", bss.start, bss.end, bss.end - bss.start);
let boot_stack = boot_stack_range();
info!(
@@ -98,18 +54,3 @@
boot_stack.end - boot_stack.start
);
}
-
-/// Bionic-compatible thread-local storage entry, at the given offset from TPIDR_EL0.
-pub fn bionic_tls(off: usize) -> u64 {
- let mut base: usize;
- unsafe {
- asm!("mrs {base}, tpidr_el0", base = out(reg) base);
- let ptr = (base + off) as *const u64;
- *ptr
- }
-}
-
-/// Value of __stack_chk_guard.
-pub fn stack_chk_guard() -> u64 {
- *STACK_CHK_GUARD
-}
diff --git a/vmbase/example/src/main.rs b/vmbase/example/src/main.rs
index 1dd8517..ebd981c 100644
--- a/vmbase/example/src/main.rs
+++ b/vmbase/example/src/main.rs
@@ -23,36 +23,53 @@
extern crate alloc;
-use crate::layout::{
- bionic_tls, boot_stack_range, dtb_range, print_addresses, rodata_range, scratch_range,
- stack_chk_guard, text_range, DEVICE_REGION,
-};
+use crate::layout::{boot_stack_range, print_addresses, DEVICE_REGION};
use crate::pci::{check_pci, get_bar_region};
-use aarch64_paging::{idmap::IdMap, paging::Attributes};
+use aarch64_paging::paging::MemoryRegion;
+use aarch64_paging::MapError;
use alloc::{vec, vec::Vec};
-use buddy_system_allocator::LockedHeap;
use fdtpci::PciInfo;
use libfdt::Fdt;
use log::{debug, error, info, trace, warn, LevelFilter};
-use vmbase::{cstr, logger, main};
+use vmbase::{
+ bionic, configure_heap, cstr,
+ layout::{dtb_range, rodata_range, scratch_range, text_range},
+ linker, logger, main,
+ memory::{PageTable, SIZE_64KB},
+};
static INITIALISED_DATA: [u32; 4] = [1, 2, 3, 4];
static mut ZEROED_DATA: [u32; 10] = [0; 10];
static mut MUTABLE_DATA: [u32; 4] = [1, 2, 3, 4];
-const ASID: usize = 1;
-const ROOT_LEVEL: usize = 1;
-
-#[global_allocator]
-static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::<32>::new();
-
-static mut HEAP: [u8; 65536] = [0; 65536];
-
main!(main);
+configure_heap!(SIZE_64KB);
+
+fn init_page_table(pci_bar_range: &MemoryRegion) -> Result<(), MapError> {
+ let mut page_table = PageTable::default();
+
+ page_table.map_device(&DEVICE_REGION)?;
+ page_table.map_code(&text_range().into())?;
+ page_table.map_rodata(&rodata_range().into())?;
+ page_table.map_data(&scratch_range().into())?;
+ page_table.map_data(&boot_stack_range().into())?;
+ page_table.map_rodata(&dtb_range().into())?;
+ page_table.map_device(pci_bar_range)?;
+
+ info!("Activating IdMap...");
+ // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
+ // aware of so activating it shouldn't have any visible effect.
+ unsafe {
+ page_table.activate();
+ }
+ info!("Activated.");
+
+ Ok(())
+}
/// Entry point for VM bootloader.
pub fn main(arg0: u64, arg1: u64, arg2: u64, arg3: u64) {
- logger::init(LevelFilter::Debug).unwrap();
+ log::set_max_level(LevelFilter::Debug);
info!("Hello world");
info!("x0={:#018x}, x1={:#018x}, x2={:#018x}, x3={:#018x}", arg0, arg1, arg2, arg3);
@@ -63,8 +80,9 @@
info!("Checking FDT...");
let fdt = dtb_range();
- let fdt =
- unsafe { core::slice::from_raw_parts_mut(fdt.start.0 as *mut u8, fdt.end.0 - fdt.start.0) };
+ let fdt_size = fdt.end.0 - fdt.start.0;
+ // SAFETY: The DTB range is valid, writable memory, and we don't construct any aliases to it.
+ let fdt = unsafe { core::slice::from_raw_parts_mut(fdt.start.0 as *mut u8, fdt_size) };
let fdt = Fdt::from_mut_slice(fdt).unwrap();
info!("FDT passed verification.");
check_fdt(fdt);
@@ -74,78 +92,14 @@
modify_fdt(fdt);
- unsafe {
- HEAP_ALLOCATOR.lock().init(HEAP.as_mut_ptr() as usize, HEAP.len());
- }
-
check_alloc();
- let mut idmap = IdMap::new(ASID, ROOT_LEVEL);
- idmap
- .map_range(
- &DEVICE_REGION,
- Attributes::VALID | Attributes::DEVICE_NGNRE | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
- idmap
- .map_range(
- &text_range().into(),
- Attributes::VALID | Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::READ_ONLY,
- )
- .unwrap();
- idmap
- .map_range(
- &rodata_range().into(),
- Attributes::VALID
- | Attributes::NORMAL
- | Attributes::NON_GLOBAL
- | Attributes::READ_ONLY
- | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
- idmap
- .map_range(
- &scratch_range().into(),
- Attributes::VALID
- | Attributes::NORMAL
- | Attributes::NON_GLOBAL
- | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
- idmap
- .map_range(
- &boot_stack_range().into(),
- Attributes::VALID
- | Attributes::NORMAL
- | Attributes::NON_GLOBAL
- | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
- idmap
- .map_range(
- &dtb_range().into(),
- Attributes::VALID
- | Attributes::NORMAL
- | Attributes::NON_GLOBAL
- | Attributes::READ_ONLY
- | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
- idmap
- .map_range(
- &get_bar_region(&pci_info),
- Attributes::VALID | Attributes::DEVICE_NGNRE | Attributes::EXECUTE_NEVER,
- )
- .unwrap();
-
- info!("Activating IdMap...");
- trace!("{:?}", idmap);
- idmap.activate();
- info!("Activated.");
+ init_page_table(&get_bar_region(&pci_info)).unwrap();
check_data();
check_dice();
+ // SAFETY: This is the only place where `make_pci_root` is called.
let mut pci_root = unsafe { pci_info.make_pci_root() };
check_pci(&mut pci_root);
@@ -153,43 +107,58 @@
}
fn check_stack_guard() {
- const BIONIC_TLS_STACK_GRD_OFF: usize = 40;
-
info!("Testing stack guard");
- assert_eq!(bionic_tls(BIONIC_TLS_STACK_GRD_OFF), stack_chk_guard());
+ // SAFETY: No concurrency issue should occur when running these tests.
+ let stack_guard = unsafe { bionic::TLS.stack_guard };
+ assert_ne!(stack_guard, 0);
+ // Check that a NULL-terminating value is added for C functions consuming strings from stack.
+ assert_eq!(stack_guard.to_ne_bytes().last(), Some(&0));
+ // Check that the TLS and guard are properly accessible from the dedicated register.
+ assert_eq!(stack_guard, bionic::__get_tls().stack_guard);
+ // Check that the LLVM __stack_chk_guard alias is also properly set up.
+ assert_eq!(
+ stack_guard,
+ // SAFETY: No concurrency issue should occur when running these tests.
+ unsafe { linker::__stack_chk_guard },
+ );
}
fn check_data() {
info!("INITIALISED_DATA: {:?}", INITIALISED_DATA.as_ptr());
- unsafe {
- info!("ZEROED_DATA: {:?}", ZEROED_DATA.as_ptr());
- info!("MUTABLE_DATA: {:?}", MUTABLE_DATA.as_ptr());
- info!("HEAP: {:?}", HEAP.as_ptr());
- }
+ // SAFETY: We only print the addresses of the static mutable variable, not actually access it.
+ info!("ZEROED_DATA: {:?}", unsafe { ZEROED_DATA.as_ptr() });
+ // SAFETY: We only print the addresses of the static mutable variable, not actually access it.
+ info!("MUTABLE_DATA: {:?}", unsafe { MUTABLE_DATA.as_ptr() });
assert_eq!(INITIALISED_DATA[0], 1);
assert_eq!(INITIALISED_DATA[1], 2);
assert_eq!(INITIALISED_DATA[2], 3);
assert_eq!(INITIALISED_DATA[3], 4);
- unsafe {
- for element in ZEROED_DATA.iter() {
- assert_eq!(*element, 0);
- }
- ZEROED_DATA[0] = 13;
- assert_eq!(ZEROED_DATA[0], 13);
- ZEROED_DATA[0] = 0;
- assert_eq!(ZEROED_DATA[0], 0);
+ // SAFETY: Nowhere else in the program accesses this static mutable variable, so there is no
+ // chance of concurrent access.
+ let zeroed_data = unsafe { &mut ZEROED_DATA };
+ // SAFETY: Nowhere else in the program accesses this static mutable variable, so there is no
+ // chance of concurrent access.
+ let mutable_data = unsafe { &mut MUTABLE_DATA };
- assert_eq!(MUTABLE_DATA[0], 1);
- assert_eq!(MUTABLE_DATA[1], 2);
- assert_eq!(MUTABLE_DATA[2], 3);
- assert_eq!(MUTABLE_DATA[3], 4);
- MUTABLE_DATA[0] += 41;
- assert_eq!(MUTABLE_DATA[0], 42);
- MUTABLE_DATA[0] -= 41;
- assert_eq!(MUTABLE_DATA[0], 1);
+ for element in zeroed_data.iter() {
+ assert_eq!(*element, 0);
}
+ zeroed_data[0] = 13;
+ assert_eq!(zeroed_data[0], 13);
+ zeroed_data[0] = 0;
+ assert_eq!(zeroed_data[0], 0);
+
+ assert_eq!(mutable_data[0], 1);
+ assert_eq!(mutable_data[1], 2);
+ assert_eq!(mutable_data[2], 3);
+ assert_eq!(mutable_data[3], 4);
+ mutable_data[0] += 41;
+ assert_eq!(mutable_data[0], 42);
+ mutable_data[0] -= 41;
+ assert_eq!(mutable_data[0], 1);
+
info!("Data looks good");
}
diff --git a/vmbase/example/src/pci.rs b/vmbase/example/src/pci.rs
index 384a9c1..b838539 100644
--- a/vmbase/example/src/pci.rs
+++ b/vmbase/example/src/pci.rs
@@ -20,13 +20,14 @@
use fdtpci::PciInfo;
use log::{debug, info};
use virtio_drivers::{
- device::{blk::VirtIOBlk, console::VirtIOConsole},
+ device::console::VirtIOConsole,
transport::{
- pci::{bus::PciRoot, virtio_device_type, PciTransport},
+ pci::{bus::PciRoot, PciTransport},
DeviceType, Transport,
},
- BufferDirection, Hal, PhysAddr, PAGE_SIZE,
+ BufferDirection, Error, Hal, PhysAddr, PAGE_SIZE,
};
+use vmbase::virtio::pci::{self, PciTransportIterator};
/// The standard sector size of a VirtIO block device, in bytes.
const SECTOR_SIZE_BYTES: usize = 512;
@@ -36,61 +37,82 @@
pub fn check_pci(pci_root: &mut PciRoot) {
let mut checked_virtio_device_count = 0;
- for (device_function, info) in pci_root.enumerate_bus(0) {
- let (status, command) = pci_root.get_status_command(device_function);
- info!("Found {} at {}, status {:?} command {:?}", info, device_function, status, command);
- if let Some(virtio_type) = virtio_device_type(&info) {
- info!(" VirtIO {:?}", virtio_type);
- let mut transport = PciTransport::new::<HalImpl>(pci_root, device_function).unwrap();
- info!(
- "Detected virtio PCI device with device type {:?}, features {:#018x}",
- transport.device_type(),
- transport.read_device_features(),
- );
- if check_virtio_device(transport, virtio_type) {
+ let mut block_device_count = 0;
+ let mut socket_device_count = 0;
+ for mut transport in PciTransportIterator::<HalImpl>::new(pci_root) {
+ info!(
+ "Detected virtio PCI device with device type {:?}, features {:#018x}",
+ transport.device_type(),
+ transport.read_device_features(),
+ );
+ match transport.device_type() {
+ DeviceType::Block => {
+ check_virtio_block_device(transport, block_device_count);
+ block_device_count += 1;
checked_virtio_device_count += 1;
}
+ DeviceType::Console => {
+ check_virtio_console_device(transport);
+ checked_virtio_device_count += 1;
+ }
+ DeviceType::Socket => {
+ check_virtio_socket_device(transport);
+ socket_device_count += 1;
+ checked_virtio_device_count += 1;
+ }
+ _ => {}
}
}
- assert_eq!(checked_virtio_device_count, 4);
+ assert_eq!(checked_virtio_device_count, 6);
+ assert_eq!(block_device_count, 2);
+ assert_eq!(socket_device_count, 1);
}
-/// Checks the given VirtIO device, if we know how to.
-///
-/// Returns true if the device was checked, or false if it was ignored.
-fn check_virtio_device(transport: impl Transport, device_type: DeviceType) -> bool {
- match device_type {
- DeviceType::Block => {
- let mut blk =
- VirtIOBlk::<HalImpl, _>::new(transport).expect("failed to create blk driver");
- info!("Found {} KiB block device.", blk.capacity() * SECTOR_SIZE_BYTES as u64 / 1024);
+/// Checks the given VirtIO block device.
+fn check_virtio_block_device(transport: PciTransport, index: usize) {
+ let mut blk = pci::VirtIOBlk::<HalImpl>::new(transport).expect("failed to create blk driver");
+ info!("Found {} KiB block device.", blk.capacity() * SECTOR_SIZE_BYTES as u64 / 1024);
+ match index {
+ 0 => {
assert_eq!(blk.capacity(), EXPECTED_SECTOR_COUNT as u64);
let mut data = [0; SECTOR_SIZE_BYTES * EXPECTED_SECTOR_COUNT];
for i in 0..EXPECTED_SECTOR_COUNT {
- blk.read_block(i, &mut data[i * SECTOR_SIZE_BYTES..(i + 1) * SECTOR_SIZE_BYTES])
+ blk.read_blocks(i, &mut data[i * SECTOR_SIZE_BYTES..(i + 1) * SECTOR_SIZE_BYTES])
.expect("Failed to read block device.");
}
for (i, chunk) in data.chunks(size_of::<u32>()).enumerate() {
assert_eq!(chunk, &(i as u32).to_le_bytes());
}
info!("Read expected data from block device.");
- true
}
- DeviceType::Console => {
- let mut console = VirtIOConsole::<HalImpl, _>::new(transport)
- .expect("Failed to create VirtIO console driver");
- info!("Found console device: {:?}", console.info());
- for &c in b"Hello VirtIO console\n" {
- console.send(c).expect("Failed to send character to VirtIO console device");
- }
- info!("Wrote to VirtIO console.");
- true
+ 1 => {
+ assert_eq!(blk.capacity(), 0);
+ let mut data = [0; SECTOR_SIZE_BYTES];
+ assert_eq!(blk.read_blocks(0, &mut data), Err(Error::IoError));
}
- _ => false,
+ _ => panic!("Unexpected VirtIO block device index {}.", index),
}
}
+/// Checks the given VirtIO socket device.
+fn check_virtio_socket_device(transport: PciTransport) {
+ let socket = pci::VirtIOSocket::<HalImpl>::new(transport)
+ .expect("Failed to create VirtIO socket driver");
+ info!("Found socket device: guest_cid={}", socket.guest_cid());
+}
+
+/// Checks the given VirtIO console device.
+fn check_virtio_console_device(transport: PciTransport) {
+ let mut console = VirtIOConsole::<HalImpl, PciTransport>::new(transport)
+ .expect("Failed to create VirtIO console driver");
+ info!("Found console device: {:?}", console.info());
+ for &c in b"Hello VirtIO console\n" {
+ console.send(c).expect("Failed to send character to VirtIO console device");
+ }
+ info!("Wrote to VirtIO console.");
+}
+
/// Gets the memory region in which BARs are allocated.
pub fn get_bar_region(pci_info: &PciInfo) -> MemoryRegion {
MemoryRegion::new(pci_info.bar_range.start as usize, pci_info.bar_range.end as usize)
@@ -98,11 +120,21 @@
struct HalImpl;
+/// SAFETY: See the 'Implementation Safety' comments on methods below for how they fulfill the
+/// safety requirements of the unsafe `Hal` trait.
unsafe impl Hal for HalImpl {
+ /// # Implementation Safety
+ ///
+ /// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
+ /// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
+ /// block of memory using `alloc_zeroed`, which is guaranteed to allocate valid, unique and
+ /// zeroed memory. We request an alignment of at least `PAGE_SIZE` from `alloc_zeroed`.
fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
debug!("dma_alloc: pages={}", pages);
- let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
- // Safe because the layout has a non-zero size.
+ let layout =
+ Layout::from_size_align(pages.checked_mul(PAGE_SIZE).unwrap(), PAGE_SIZE).unwrap();
+ assert_ne!(layout.size(), 0);
+ // SAFETY: We just checked that the layout has a non-zero size.
let vaddr = unsafe { alloc_zeroed(layout) };
let vaddr =
if let Some(vaddr) = NonNull::new(vaddr) { vaddr } else { handle_alloc_error(layout) };
@@ -113,14 +145,19 @@
unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
- // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
- // the layout is the same as was used then.
+ // SAFETY: The memory was allocated by `dma_alloc` above using the same allocator, and the
+ // layout is the same as was used then.
unsafe {
dealloc(vaddr.as_ptr(), layout);
}
0
}
+ /// # Implementation Safety
+ ///
+ /// The returned pointer must be valid because the `paddr` describes a valid MMIO region, and we
+ /// previously mapped the entire PCI MMIO range. It can't alias any other allocations because
+ /// the PCI MMIO range doesn't overlap with any other memory ranges.
unsafe fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
NonNull::new(paddr as _).unwrap()
}
diff --git a/vmbase/example/tests/test.rs b/vmbase/example/tests/test.rs
index 9088f1a..17ff947 100644
--- a/vmbase/example/tests/test.rs
+++ b/vmbase/example/tests/test.rs
@@ -35,6 +35,8 @@
const VMBASE_EXAMPLE_PATH: &str =
"/data/local/tmp/vmbase_example.integration_test/arm64/vmbase_example.bin";
const TEST_DISK_IMAGE_PATH: &str = "/data/local/tmp/vmbase_example.integration_test/test_disk.img";
+const EMPTY_DISK_IMAGE_PATH: &str =
+ "/data/local/tmp/vmbase_example.integration_test/empty_disk.img";
/// Runs the vmbase_example VM as an unprotected VM via VirtualizationService.
#[test]
@@ -76,24 +78,43 @@
let test_image = ParcelFileDescriptor::new(test_image);
let disk_image = DiskImage { image: Some(test_image), writable: false, partitions: vec![] };
+ // Make file for empty test disk image.
+ let empty_image = File::options()
+ .create(true)
+ .read(true)
+ .write(true)
+ .truncate(true)
+ .open(EMPTY_DISK_IMAGE_PATH)
+ .with_context(|| format!("Failed to open empty disk image {}", EMPTY_DISK_IMAGE_PATH))?;
+ let empty_image = ParcelFileDescriptor::new(empty_image);
+ let empty_disk_image =
+ DiskImage { image: Some(empty_image), writable: false, partitions: vec![] };
+
let config = VirtualMachineConfig::RawConfig(VirtualMachineRawConfig {
name: String::from("VmBaseTest"),
kernel: None,
initrd: None,
params: None,
bootloader: Some(bootloader),
- disks: vec![disk_image],
+ disks: vec![disk_image, empty_disk_image],
protectedVm: false,
memoryMib: 300,
cpuTopology: CpuTopology::ONE_CPU,
platformVersion: "~1.0".to_string(),
- taskProfiles: vec![],
gdbPort: 0, // no gdb
+ ..Default::default()
});
let (handle, console) = android_log_fd()?;
let (mut log_reader, log_writer) = pipe()?;
- let vm = VmInstance::create(service.as_ref(), &config, Some(console), Some(log_writer), None)
- .context("Failed to create VM")?;
+ let vm = VmInstance::create(
+ service.as_ref(),
+ &config,
+ Some(console),
+ /* consoleIn */ None,
+ Some(log_writer),
+ None,
+ )
+ .context("Failed to create VM")?;
vm.start().context("Failed to start VM")?;
info!("Started example VM.");
@@ -122,6 +143,7 @@
// SAFETY: These are new FDs with no previous owner.
let reader = unsafe { File::from_raw_fd(reader_fd) };
+ // SAFETY: These are new FDs with no previous owner.
let writer = unsafe { File::from_raw_fd(writer_fd) };
Ok((reader, writer))
diff --git a/vmbase/sections.ld b/vmbase/sections.ld
index 5232d30..c7ef0ec 100644
--- a/vmbase/sections.ld
+++ b/vmbase/sections.ld
@@ -107,6 +107,9 @@
. = init_stack_pointer;
} >writable_data
+ /* Make our Bionic stack protector compatible with mainline LLVM */
+ __stack_chk_guard = __bionic_tls + 40;
+
/*
* Remove unused sections from the image.
*/
diff --git a/vmbase/src/arch.rs b/vmbase/src/arch.rs
index d7b63b3..d8bb8b2 100644
--- a/vmbase/src/arch.rs
+++ b/vmbase/src/arch.rs
@@ -19,8 +19,8 @@
macro_rules! read_sysreg {
($sysreg:literal) => {{
let mut r: usize;
- // Safe because it reads a system register and does not affect Rust.
#[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: Reading a system register does not affect memory.
unsafe {
core::arch::asm!(
concat!("mrs {}, ", $sysreg),
@@ -53,8 +53,8 @@
#[macro_export]
macro_rules! isb {
() => {{
- // Safe because this is just a memory barrier and does not affect Rust.
#[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: memory barriers do not affect Rust's memory model.
unsafe {
core::arch::asm!("isb", options(nomem, nostack, preserves_flags));
}
@@ -65,8 +65,8 @@
#[macro_export]
macro_rules! dsb {
($option:literal) => {{
- // Safe because this is just a memory barrier and does not affect Rust.
#[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: memory barriers do not affect Rust's memory model.
unsafe {
core::arch::asm!(concat!("dsb ", $option), options(nomem, nostack, preserves_flags));
}
@@ -79,9 +79,9 @@
($option:literal, $asid:expr, $addr:expr) => {{
let asid: usize = $asid;
let addr: usize = $addr;
- // Safe because it invalidates TLB and doesn't affect Rust. When the address matches a
- // block entry larger than the page size, all translations for the block are invalidated.
#[allow(unused_unsafe)] // In case the macro is used within an unsafe block.
+ // SAFETY: Invalidating the TLB doesn't affect Rust. When the address matches a
+ // block entry larger than the page size, all translations for the block are invalidated.
unsafe {
core::arch::asm!(
concat!("tlbi ", $option, ", {x}"),
diff --git a/vmbase/src/bionic.rs b/vmbase/src/bionic.rs
index 69da521..f8db1fe 100644
--- a/vmbase/src/bionic.rs
+++ b/vmbase/src/bionic.rs
@@ -22,13 +22,39 @@
use core::str;
use crate::console;
+use crate::cstr;
use crate::eprintln;
-use crate::linker;
+use crate::rand::fill_with_entropy;
+use crate::read_sysreg;
const EOF: c_int = -1;
+const EIO: c_int = 5;
-/// Reference to __stack_chk_guard.
-pub static STACK_CHK_GUARD: &u64 = unsafe { &linker::__stack_chk_guard };
+/// Bionic thread-local storage.
+#[repr(C)]
+pub struct Tls {
+ /// Unused.
+ _unused: [u8; 40],
+ /// Use by the compiler as stack canary value.
+ pub stack_guard: u64,
+}
+
+/// Bionic TLS.
+///
+/// Provides the TLS used by Bionic code. This is unique as vmbase only supports one thread.
+///
+/// Note that the linker script re-exports __bionic_tls.stack_guard as __stack_chk_guard for
+/// compatibility with non-Bionic LLVM.
+#[link_section = ".data.stack_protector"]
+#[export_name = "__bionic_tls"]
+pub static mut TLS: Tls = Tls { _unused: [0; 40], stack_guard: 0 };
+
+/// Gets a reference to the TLS from the dedicated system register.
+pub fn __get_tls() -> &'static mut Tls {
+ let tpidr = read_sysreg!("tpidr_el0");
+ // SAFETY: The register is currently only written to once, from entry.S, with a valid value.
+ unsafe { &mut *(tpidr as *mut Tls) }
+}
#[no_mangle]
extern "C" fn __stack_chk_fail() -> ! {
@@ -46,27 +72,49 @@
#[no_mangle]
unsafe extern "C" fn __errno() -> *mut c_int {
- &mut ERRNO as *mut _
+ // SAFETY: C functions which call this are only called from the main thread, not from exception
+ // handlers.
+ unsafe { &mut ERRNO as *mut _ }
}
fn set_errno(value: c_int) {
- // SAFETY - vmbase is currently single-threaded.
+ // SAFETY: vmbase is currently single-threaded.
unsafe { ERRNO = value };
}
+fn get_errno() -> c_int {
+ // SAFETY: vmbase is currently single-threaded.
+ unsafe { ERRNO }
+}
+
+#[no_mangle]
+extern "C" fn getentropy(buffer: *mut c_void, length: usize) -> c_int {
+ if length > 256 {
+ // The maximum permitted value for the length argument is 256.
+ set_errno(EIO);
+ return -1;
+ }
+
+ // SAFETY: Just like libc, we need to assume that `ptr` is valid.
+ let buffer = unsafe { slice::from_raw_parts_mut(buffer.cast::<u8>(), length) };
+ fill_with_entropy(buffer).unwrap();
+
+ 0
+}
+
/// Reports a fatal error detected by Bionic.
///
/// # Safety
///
-/// Input strings `prefix` and `format` must be properly NULL-terminated.
+/// Input strings `prefix` and `format` must be valid and properly NUL-terminated.
///
/// # Note
///
/// This Rust functions is missing the last argument of its C/C++ counterpart, a va_list.
#[no_mangle]
unsafe extern "C" fn async_safe_fatal_va_list(prefix: *const c_char, format: *const c_char) {
- let prefix = CStr::from_ptr(prefix);
- let format = CStr::from_ptr(format);
+ // SAFETY: The caller guaranteed that both strings were valid and NUL-terminated.
+ let (prefix, format) = unsafe { (CStr::from_ptr(prefix), CStr::from_ptr(format)) };
if let (Ok(prefix), Ok(format)) = (prefix.to_str(), format.to_str()) {
// We don't bother with printf formatting.
@@ -100,7 +148,7 @@
#[no_mangle]
extern "C" fn fputs(c_str: *const c_char, stream: usize) -> c_int {
- // SAFETY - Just like libc, we need to assume that `s` is a valid NULL-terminated string.
+ // SAFETY: Just like libc, we need to assume that `s` is a valid NULL-terminated string.
let c_str = unsafe { CStr::from_ptr(c_str) };
if let (Ok(s), Ok(_)) = (c_str.to_str(), File::try_from(stream)) {
@@ -116,7 +164,7 @@
extern "C" fn fwrite(ptr: *const c_void, size: usize, nmemb: usize, stream: usize) -> usize {
let length = size.saturating_mul(nmemb);
- // SAFETY - Just like libc, we need to assume that `ptr` is valid.
+ // SAFETY: Just like libc, we need to assume that `ptr` is valid.
let bytes = unsafe { slice::from_raw_parts(ptr as *const u8, length) };
if let (Ok(s), Ok(_)) = (str::from_utf8(bytes), File::try_from(stream)) {
@@ -129,142 +177,168 @@
#[no_mangle]
extern "C" fn strerror(n: c_int) -> *mut c_char {
- // Messages taken from errno(1).
- let s = match n {
- 0 => "Success",
- 1 => "Operation not permitted",
- 2 => "No such file or directory",
- 3 => "No such process",
- 4 => "Interrupted system call",
- 5 => "Input/output error",
- 6 => "No such device or address",
- 7 => "Argument list too long",
- 8 => "Exec format error",
- 9 => "Bad file descriptor",
- 10 => "No child processes",
- 11 => "Resource temporarily unavailable",
- 12 => "Cannot allocate memory",
- 13 => "Permission denied",
- 14 => "Bad address",
- 15 => "Block device required",
- 16 => "Device or resource busy",
- 17 => "File exists",
- 18 => "Invalid cross-device link",
- 19 => "No such device",
- 20 => "Not a directory",
- 21 => "Is a directory",
- 22 => "Invalid argument",
- 23 => "Too many open files in system",
- 24 => "Too many open files",
- 25 => "Inappropriate ioctl for device",
- 26 => "Text file busy",
- 27 => "File too large",
- 28 => "No space left on device",
- 29 => "Illegal seek",
- 30 => "Read-only file system",
- 31 => "Too many links",
- 32 => "Broken pipe",
- 33 => "Numerical argument out of domain",
- 34 => "Numerical result out of range",
- 35 => "Resource deadlock avoided",
- 36 => "File name too long",
- 37 => "No locks available",
- 38 => "Function not implemented",
- 39 => "Directory not empty",
- 40 => "Too many levels of symbolic links",
- 42 => "No message of desired type",
- 43 => "Identifier removed",
- 44 => "Channel number out of range",
- 45 => "Level 2 not synchronized",
- 46 => "Level 3 halted",
- 47 => "Level 3 reset",
- 48 => "Link number out of range",
- 49 => "Protocol driver not attached",
- 50 => "No CSI structure available",
- 51 => "Level 2 halted",
- 52 => "Invalid exchange",
- 53 => "Invalid request descriptor",
- 54 => "Exchange full",
- 55 => "No anode",
- 56 => "Invalid request code",
- 57 => "Invalid slot",
- 59 => "Bad font file format",
- 60 => "Device not a stream",
- 61 => "No data available",
- 62 => "Timer expired",
- 63 => "Out of streams resources",
- 64 => "Machine is not on the network",
- 65 => "Package not installed",
- 66 => "Object is remote",
- 67 => "Link has been severed",
- 68 => "Advertise error",
- 69 => "Srmount error",
- 70 => "Communication error on send",
- 71 => "Protocol error",
- 72 => "Multihop attempted",
- 73 => "RFS specific error",
- 74 => "Bad message",
- 75 => "Value too large for defined data type",
- 76 => "Name not unique on network",
- 77 => "File descriptor in bad state",
- 78 => "Remote address changed",
- 79 => "Can not access a needed shared library",
- 80 => "Accessing a corrupted shared library",
- 81 => ".lib section in a.out corrupted",
- 82 => "Attempting to link in too many shared libraries",
- 83 => "Cannot exec a shared library directly",
- 84 => "Invalid or incomplete multibyte or wide character",
- 85 => "Interrupted system call should be restarted",
- 86 => "Streams pipe error",
- 87 => "Too many users",
- 88 => "Socket operation on non-socket",
- 89 => "Destination address required",
- 90 => "Message too long",
- 91 => "Protocol wrong type for socket",
- 92 => "Protocol not available",
- 93 => "Protocol not supported",
- 94 => "Socket type not supported",
- 95 => "Operation not supported",
- 96 => "Protocol family not supported",
- 97 => "Address family not supported by protocol",
- 98 => "Address already in use",
- 99 => "Cannot assign requested address",
- 100 => "Network is down",
- 101 => "Network is unreachable",
- 102 => "Network dropped connection on reset",
- 103 => "Software caused connection abort",
- 104 => "Connection reset by peer",
- 105 => "No buffer space available",
- 106 => "Transport endpoint is already connected",
- 107 => "Transport endpoint is not connected",
- 108 => "Cannot send after transport endpoint shutdown",
- 109 => "Too many references: cannot splice",
- 110 => "Connection timed out",
- 111 => "Connection refused",
- 112 => "Host is down",
- 113 => "No route to host",
- 114 => "Operation already in progress",
- 115 => "Operation now in progress",
- 116 => "Stale file handle",
- 117 => "Structure needs cleaning",
- 118 => "Not a XENIX named type file",
- 119 => "No XENIX semaphores available",
- 120 => "Is a named type file",
- 121 => "Remote I/O error",
- 122 => "Disk quota exceeded",
- 123 => "No medium found",
- 124 => "Wrong medium type",
- 125 => "Operation canceled",
- 126 => "Required key not available",
- 127 => "Key has expired",
- 128 => "Key has been revoked",
- 129 => "Key was rejected by service",
- 130 => "Owner died",
- 131 => "State not recoverable",
- 132 => "Operation not possible due to RF-kill",
- 133 => "Memory page has hardware error",
- _ => "Unknown errno value",
+ cstr_error(n).as_ptr().cast_mut().cast()
+}
+
+#[no_mangle]
+extern "C" fn perror(s: *const c_char) {
+ let prefix = if s.is_null() {
+ None
+ } else {
+ // SAFETY: Just like libc, we need to assume that `s` is a valid NULL-terminated string.
+ let c_str = unsafe { CStr::from_ptr(s) };
+ // TODO(Rust 1.71): if c_str.is_empty() {
+ if c_str.to_bytes().is_empty() {
+ None
+ } else {
+ Some(c_str.to_str().unwrap())
+ }
};
- s.as_ptr().cast_mut().cast()
+ let error = cstr_error(get_errno()).to_str().unwrap();
+
+ if let Some(prefix) = prefix {
+ eprintln!("{prefix}: {error}");
+ } else {
+ eprintln!("{error}");
+ }
+}
+
+fn cstr_error(n: c_int) -> &'static CStr {
+ // Messages taken from errno(1).
+ match n {
+ 0 => cstr!("Success"),
+ 1 => cstr!("Operation not permitted"),
+ 2 => cstr!("No such file or directory"),
+ 3 => cstr!("No such process"),
+ 4 => cstr!("Interrupted system call"),
+ 5 => cstr!("Input/output error"),
+ 6 => cstr!("No such device or address"),
+ 7 => cstr!("Argument list too long"),
+ 8 => cstr!("Exec format error"),
+ 9 => cstr!("Bad file descriptor"),
+ 10 => cstr!("No child processes"),
+ 11 => cstr!("Resource temporarily unavailable"),
+ 12 => cstr!("Cannot allocate memory"),
+ 13 => cstr!("Permission denied"),
+ 14 => cstr!("Bad address"),
+ 15 => cstr!("Block device required"),
+ 16 => cstr!("Device or resource busy"),
+ 17 => cstr!("File exists"),
+ 18 => cstr!("Invalid cross-device link"),
+ 19 => cstr!("No such device"),
+ 20 => cstr!("Not a directory"),
+ 21 => cstr!("Is a directory"),
+ 22 => cstr!("Invalid argument"),
+ 23 => cstr!("Too many open files in system"),
+ 24 => cstr!("Too many open files"),
+ 25 => cstr!("Inappropriate ioctl for device"),
+ 26 => cstr!("Text file busy"),
+ 27 => cstr!("File too large"),
+ 28 => cstr!("No space left on device"),
+ 29 => cstr!("Illegal seek"),
+ 30 => cstr!("Read-only file system"),
+ 31 => cstr!("Too many links"),
+ 32 => cstr!("Broken pipe"),
+ 33 => cstr!("Numerical argument out of domain"),
+ 34 => cstr!("Numerical result out of range"),
+ 35 => cstr!("Resource deadlock avoided"),
+ 36 => cstr!("File name too long"),
+ 37 => cstr!("No locks available"),
+ 38 => cstr!("Function not implemented"),
+ 39 => cstr!("Directory not empty"),
+ 40 => cstr!("Too many levels of symbolic links"),
+ 42 => cstr!("No message of desired type"),
+ 43 => cstr!("Identifier removed"),
+ 44 => cstr!("Channel number out of range"),
+ 45 => cstr!("Level 2 not synchronized"),
+ 46 => cstr!("Level 3 halted"),
+ 47 => cstr!("Level 3 reset"),
+ 48 => cstr!("Link number out of range"),
+ 49 => cstr!("Protocol driver not attached"),
+ 50 => cstr!("No CSI structure available"),
+ 51 => cstr!("Level 2 halted"),
+ 52 => cstr!("Invalid exchange"),
+ 53 => cstr!("Invalid request descriptor"),
+ 54 => cstr!("Exchange full"),
+ 55 => cstr!("No anode"),
+ 56 => cstr!("Invalid request code"),
+ 57 => cstr!("Invalid slot"),
+ 59 => cstr!("Bad font file format"),
+ 60 => cstr!("Device not a stream"),
+ 61 => cstr!("No data available"),
+ 62 => cstr!("Timer expired"),
+ 63 => cstr!("Out of streams resources"),
+ 64 => cstr!("Machine is not on the network"),
+ 65 => cstr!("Package not installed"),
+ 66 => cstr!("Object is remote"),
+ 67 => cstr!("Link has been severed"),
+ 68 => cstr!("Advertise error"),
+ 69 => cstr!("Srmount error"),
+ 70 => cstr!("Communication error on send"),
+ 71 => cstr!("Protocol error"),
+ 72 => cstr!("Multihop attempted"),
+ 73 => cstr!("RFS specific error"),
+ 74 => cstr!("Bad message"),
+ 75 => cstr!("Value too large for defined data type"),
+ 76 => cstr!("Name not unique on network"),
+ 77 => cstr!("File descriptor in bad state"),
+ 78 => cstr!("Remote address changed"),
+ 79 => cstr!("Can not access a needed shared library"),
+ 80 => cstr!("Accessing a corrupted shared library"),
+ 81 => cstr!(".lib section in a.out corrupted"),
+ 82 => cstr!("Attempting to link in too many shared libraries"),
+ 83 => cstr!("Cannot exec a shared library directly"),
+ 84 => cstr!("Invalid or incomplete multibyte or wide character"),
+ 85 => cstr!("Interrupted system call should be restarted"),
+ 86 => cstr!("Streams pipe error"),
+ 87 => cstr!("Too many users"),
+ 88 => cstr!("Socket operation on non-socket"),
+ 89 => cstr!("Destination address required"),
+ 90 => cstr!("Message too long"),
+ 91 => cstr!("Protocol wrong type for socket"),
+ 92 => cstr!("Protocol not available"),
+ 93 => cstr!("Protocol not supported"),
+ 94 => cstr!("Socket type not supported"),
+ 95 => cstr!("Operation not supported"),
+ 96 => cstr!("Protocol family not supported"),
+ 97 => cstr!("Address family not supported by protocol"),
+ 98 => cstr!("Address already in use"),
+ 99 => cstr!("Cannot assign requested address"),
+ 100 => cstr!("Network is down"),
+ 101 => cstr!("Network is unreachable"),
+ 102 => cstr!("Network dropped connection on reset"),
+ 103 => cstr!("Software caused connection abort"),
+ 104 => cstr!("Connection reset by peer"),
+ 105 => cstr!("No buffer space available"),
+ 106 => cstr!("Transport endpoint is already connected"),
+ 107 => cstr!("Transport endpoint is not connected"),
+ 108 => cstr!("Cannot send after transport endpoint shutdown"),
+ 109 => cstr!("Too many references: cannot splice"),
+ 110 => cstr!("Connection timed out"),
+ 111 => cstr!("Connection refused"),
+ 112 => cstr!("Host is down"),
+ 113 => cstr!("No route to host"),
+ 114 => cstr!("Operation already in progress"),
+ 115 => cstr!("Operation now in progress"),
+ 116 => cstr!("Stale file handle"),
+ 117 => cstr!("Structure needs cleaning"),
+ 118 => cstr!("Not a XENIX named type file"),
+ 119 => cstr!("No XENIX semaphores available"),
+ 120 => cstr!("Is a named type file"),
+ 121 => cstr!("Remote I/O error"),
+ 122 => cstr!("Disk quota exceeded"),
+ 123 => cstr!("No medium found"),
+ 124 => cstr!("Wrong medium type"),
+ 125 => cstr!("Operation canceled"),
+ 126 => cstr!("Required key not available"),
+ 127 => cstr!("Key has expired"),
+ 128 => cstr!("Key has been revoked"),
+ 129 => cstr!("Key was rejected by service"),
+ 130 => cstr!("Owner died"),
+ 131 => cstr!("State not recoverable"),
+ 132 => cstr!("Operation not possible due to RF-kill"),
+ 133 => cstr!("Memory page has hardware error"),
+ _ => cstr!("Unknown errno value"),
+ }
}
diff --git a/vmbase/src/console.rs b/vmbase/src/console.rs
index 7c8ddf6..a7d37b4 100644
--- a/vmbase/src/console.rs
+++ b/vmbase/src/console.rs
@@ -25,7 +25,7 @@
/// Initialises a new instance of the UART driver and returns it.
fn create() -> Uart {
- // Safe because BASE_ADDRESS is the base of the MMIO region for a UART and is mapped as device
+ // SAFETY: BASE_ADDRESS is the base of the MMIO region for a UART and is mapped as device
// memory.
unsafe { Uart::new(BASE_ADDRESS) }
}
@@ -51,7 +51,7 @@
write(CONSOLE.lock().as_mut().unwrap(), format_args).unwrap();
}
-/// Reinitialises the UART driver and writes a string to it.
+/// Reinitializes the UART driver and writes a string to it.
///
/// This is intended for use in situations where the UART may be in an unknown state or the global
/// instance may be locked, such as in an exception handler or panic handler.
@@ -60,7 +60,7 @@
let _ = uart.write_str(s);
}
-/// Reinitialises the UART driver and writes a formatted string to it.
+/// Reinitializes the UART driver and writes a formatted string to it.
///
/// This is intended for use in situations where the UART may be in an unknown state or the global
/// instance may be locked, such as in an exception handler or panic handler.
@@ -71,7 +71,7 @@
/// Prints the given formatted string to the console, followed by a newline.
///
-/// Panics if the console has not yet been initialised. May hang if used in an exception context;
+/// Panics if the console has not yet been initialized. May hang if used in an exception context;
/// use `eprintln!` instead.
macro_rules! println {
() => ($crate::console::write_str("\n"));
diff --git a/vmbase/src/entry.rs b/vmbase/src/entry.rs
index 8cdfe77..2ff66cc 100644
--- a/vmbase/src/entry.rs
+++ b/vmbase/src/entry.rs
@@ -14,12 +14,54 @@
//! Rust entry point.
-use crate::{console, power::shutdown};
+use crate::{
+ bionic, console, heap, logger,
+ power::{reboot, shutdown},
+ rand,
+};
+use core::mem::size_of;
+use hyp::{self, get_mmio_guard};
+
+fn try_console_init() -> Result<(), hyp::Error> {
+ console::init();
+
+ if let Some(mmio_guard) = get_mmio_guard() {
+ mmio_guard.enroll()?;
+ mmio_guard.validate_granule()?;
+ mmio_guard.map(console::BASE_ADDRESS)?;
+ }
+
+ Ok(())
+}
/// This is the entry point to the Rust code, called from the binary entry point in `entry.S`.
#[no_mangle]
extern "C" fn rust_entry(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
- console::init();
+ // SAFETY: Only called once, from here, and inaccessible to client code.
+ unsafe { heap::init() };
+
+ if try_console_init().is_err() {
+ // Don't panic (or log) here to avoid accessing the console.
+ reboot()
+ }
+
+ logger::init().expect("Failed to initialize the logger");
+ // We initialize the logger to Off (like the log crate) and clients should log::set_max_level.
+
+ const SIZE_OF_STACK_GUARD: usize = size_of::<u64>();
+ let mut stack_guard = [0u8; SIZE_OF_STACK_GUARD];
+ // We keep a null byte at the top of the stack guard to act as a string terminator.
+ let random_guard = &mut stack_guard[..(SIZE_OF_STACK_GUARD - 1)];
+
+ rand::init().expect("Failed to initialize a source of entropy");
+ rand::fill_with_entropy(random_guard).expect("Failed to get stack canary entropy");
+ bionic::__get_tls().stack_guard = u64::from_ne_bytes(stack_guard);
+
+ // Note: If rust_entry ever returned (which it shouldn't by being -> !), the compiler-injected
+ // stack guard comparison would detect a mismatch and call __stack_chk_fail.
+
+ // SAFETY: `main` is provided by the application using the `main!` macro, and we make sure it
+ // has the right type.
unsafe {
main(x0, x1, x2, x3);
}
@@ -33,16 +75,21 @@
/// Marks the main function of the binary.
///
+/// Once main is entered, it can assume that:
+/// - The panic_handler has been configured and panic!() and friends are available;
+/// - The global_allocator has been configured and heap memory is available;
+/// - The logger has been configured and the log::{info, warn, error, ...} macros are available.
+///
/// Example:
///
/// ```rust
-/// use vmbase::{logger, main};
+/// use vmbase::main;
/// use log::{info, LevelFilter};
///
/// main!(my_main);
///
/// fn my_main() {
-/// logger::init(LevelFilter::Info).unwrap();
+/// log::set_max_level(LevelFilter::Info);
/// info!("Hello world");
/// }
/// ```
diff --git a/vmbase/src/exceptions.rs b/vmbase/src/exceptions.rs
new file mode 100644
index 0000000..7833334
--- /dev/null
+++ b/vmbase/src/exceptions.rs
@@ -0,0 +1,139 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Helper functions and structs for exception handlers.
+
+use crate::{
+ console, eprintln,
+ memory::{page_4kb_of, MemoryTrackerError},
+ read_sysreg,
+};
+use aarch64_paging::paging::VirtualAddress;
+use core::fmt;
+
+const UART_PAGE: usize = page_4kb_of(console::BASE_ADDRESS);
+
+/// Represents an error that can occur while handling an exception.
+#[derive(Debug)]
+pub enum HandleExceptionError {
+ /// The page table is unavailable.
+ PageTableUnavailable,
+ /// The page table has not been initialized.
+ PageTableNotInitialized,
+ /// An internal error occurred in the memory tracker.
+ InternalError(MemoryTrackerError),
+ /// An unknown exception occurred.
+ UnknownException,
+}
+
+impl From<MemoryTrackerError> for HandleExceptionError {
+ fn from(other: MemoryTrackerError) -> Self {
+ Self::InternalError(other)
+ }
+}
+
+impl fmt::Display for HandleExceptionError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Self::PageTableUnavailable => write!(f, "Page table is not available."),
+ Self::PageTableNotInitialized => write!(f, "Page table is not initialized."),
+ Self::InternalError(e) => write!(f, "Error while updating page table: {e}"),
+ Self::UnknownException => write!(f, "An unknown exception occurred, not handled."),
+ }
+ }
+}
+
+/// Represents the possible types of exception syndrome register (ESR) values.
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum Esr {
+ /// Data abort due to translation fault.
+ DataAbortTranslationFault,
+ /// Data abort due to permission fault.
+ DataAbortPermissionFault,
+ /// Data abort due to a synchronous external abort.
+ DataAbortSyncExternalAbort,
+ /// An unknown ESR value.
+ Unknown(usize),
+}
+
+impl Esr {
+ const EXT_DABT_32BIT: usize = 0x96000010;
+ const TRANSL_FAULT_BASE_32BIT: usize = 0x96000004;
+ const TRANSL_FAULT_ISS_MASK_32BIT: usize = !0x143;
+ const PERM_FAULT_BASE_32BIT: usize = 0x9600004C;
+ const PERM_FAULT_ISS_MASK_32BIT: usize = !0x103;
+}
+
+impl From<usize> for Esr {
+ fn from(esr: usize) -> Self {
+ if esr == Self::EXT_DABT_32BIT {
+ Self::DataAbortSyncExternalAbort
+ } else if esr & Self::TRANSL_FAULT_ISS_MASK_32BIT == Self::TRANSL_FAULT_BASE_32BIT {
+ Self::DataAbortTranslationFault
+ } else if esr & Self::PERM_FAULT_ISS_MASK_32BIT == Self::PERM_FAULT_BASE_32BIT {
+ Self::DataAbortPermissionFault
+ } else {
+ Self::Unknown(esr)
+ }
+ }
+}
+
+impl fmt::Display for Esr {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Self::DataAbortSyncExternalAbort => write!(f, "Synchronous external abort"),
+ Self::DataAbortTranslationFault => write!(f, "Translation fault"),
+ Self::DataAbortPermissionFault => write!(f, "Permission fault"),
+ Self::Unknown(v) => write!(f, "Unknown exception esr={v:#08x}"),
+ }
+ }
+}
+/// A struct representing an Armv8 exception.
+pub struct ArmException {
+ /// The value of the exception syndrome register.
+ pub esr: Esr,
+ /// The faulting virtual address read from the fault address register.
+ pub far: VirtualAddress,
+}
+
+impl fmt::Display for ArmException {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "ArmException: esr={}, far={}", self.esr, self.far)
+ }
+}
+
+impl ArmException {
+ /// Reads the values of the EL1 exception syndrome register (`esr_el1`)
+ /// and fault address register (`far_el1`) and returns a new instance of
+ /// `ArmException` with these values.
+ pub fn from_el1_regs() -> Self {
+ let esr: Esr = read_sysreg!("esr_el1").into();
+ let far = read_sysreg!("far_el1");
+ Self { esr, far: VirtualAddress(far) }
+ }
+
+ /// Prints the details of an obj and the exception, excluding UART exceptions.
+ pub fn print<T: fmt::Display>(&self, exception_name: &str, obj: T, elr: u64) {
+ // Don't print to the UART if we are handling an exception it could raise.
+ if !self.is_uart_exception() {
+ eprintln!("{exception_name}");
+ eprintln!("{obj}");
+ eprintln!("{}, elr={:#08x}", self, elr);
+ }
+ }
+
+ fn is_uart_exception(&self) -> bool {
+ self.esr == Esr::DataAbortSyncExternalAbort && page_4kb_of(self.far.0) == UART_PAGE
+ }
+}
diff --git a/pvmfw/src/heap.rs b/vmbase/src/heap.rs
similarity index 83%
rename from pvmfw/src/heap.rs
rename to vmbase/src/heap.rs
index 151049e..c8b76ac 100644
--- a/pvmfw/src/heap.rs
+++ b/vmbase/src/heap.rs
@@ -27,15 +27,31 @@
use buddy_system_allocator::LockedHeap;
-/// 128 KiB
-const HEAP_SIZE: usize = 0x20000;
-static mut HEAP: [u8; HEAP_SIZE] = [0; HEAP_SIZE];
+/// Configures the size of the global allocator.
+#[macro_export]
+macro_rules! configure_heap {
+ ($len:expr) => {
+ static mut __HEAP_ARRAY: [u8; $len] = [0; $len];
+ #[export_name = "HEAP"]
+ // SAFETY: HEAP will only be accessed once as mut, from init().
+ static mut __HEAP: &'static mut [u8] = unsafe { &mut __HEAP_ARRAY };
+ };
+}
+
+extern "Rust" {
+ /// Slice used by the global allocator, configured using configure_heap!().
+ static mut HEAP: &'static mut [u8];
+}
#[global_allocator]
static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::<32>::new();
-/// SAFETY: Must be called no more than once.
-pub unsafe fn init() {
+/// Initialize the global allocator.
+///
+/// # Safety
+///
+/// Must be called no more than once.
+pub(crate) unsafe fn init() {
// SAFETY: Nothing else accesses this memory, and we hand it over to the heap to manage and
// never touch it again. The heap is locked, so there cannot be any races.
let (start, size) = unsafe { (HEAP.as_mut_ptr() as usize, HEAP.len()) };
@@ -49,12 +65,12 @@
pub fn aligned_boxed_slice(size: usize, align: usize) -> Option<Box<[u8]>> {
let size = NonZeroUsize::new(size)?.get();
let layout = Layout::from_size_align(size, align).ok()?;
- // SAFETY - We verify that `size` and the returned `ptr` are non-null.
+ // SAFETY: We verify that `size` and the returned `ptr` are non-null.
let ptr = unsafe { alloc(layout) };
let ptr = NonNull::new(ptr)?.as_ptr();
let slice_ptr = ptr::slice_from_raw_parts_mut(ptr, size);
- // SAFETY - The memory was allocated using the proper layout by our global_allocator.
+ // SAFETY: The memory was allocated using the proper layout by our global_allocator.
Some(unsafe { Box::from_raw(slice_ptr) })
}
@@ -84,9 +100,9 @@
heap_range.contains(&(ptr.as_ptr() as *const u8)),
"free() called on a pointer that is not part of the HEAP: {ptr:?}"
);
+ // SAFETY: ptr is non-null and was allocated by allocate, which prepends a correctly aligned
+ // usize.
let (ptr, size) = unsafe {
- // SAFETY: ptr is non-null and was allocated by allocate, which prepends a correctly aligned
- // usize.
let ptr = ptr.cast::<usize>().as_ptr().offset(-1);
(ptr, *ptr)
};
diff --git a/pvmfw/src/hvc.rs b/vmbase/src/hvc.rs
similarity index 77%
rename from pvmfw/src/hvc.rs
rename to vmbase/src/hvc.rs
index e03c9d3..1197143 100644
--- a/pvmfw/src/hvc.rs
+++ b/vmbase/src/hvc.rs
@@ -21,25 +21,23 @@
hvc64,
};
-// TODO(b/272226230): Move all the trng functions to trng module
const ARM_SMCCC_TRNG_VERSION: u32 = 0x8400_0050;
-#[allow(dead_code)]
const ARM_SMCCC_TRNG_FEATURES: u32 = 0x8400_0051;
#[allow(dead_code)]
const ARM_SMCCC_TRNG_GET_UUID: u32 = 0x8400_0052;
#[allow(dead_code)]
const ARM_SMCCC_TRNG_RND32: u32 = 0x8400_0053;
-const ARM_SMCCC_TRNG_RND64: u32 = 0xc400_0053;
+pub const ARM_SMCCC_TRNG_RND64: u32 = 0xc400_0053;
/// Returns the (major, minor) version tuple, as defined by the SMCCC TRNG.
-pub fn trng_version() -> trng::Result<(u16, u16)> {
+pub fn trng_version() -> trng::Result<trng::Version> {
let args = [0u64; 17];
let version = positive_or_error_64::<Error>(hvc64(ARM_SMCCC_TRNG_VERSION, args)[0])?;
- Ok(((version >> 16) as u16, version as u16))
+ (version as u32 as i32).try_into()
}
-pub type TrngRng64Entropy = (u64, u64, u64);
+pub type TrngRng64Entropy = [u64; 3];
pub fn trng_rnd64(nbits: u64) -> trng::Result<TrngRng64Entropy> {
let mut args = [0u64; 17];
@@ -48,5 +46,12 @@
let regs = hvc64(ARM_SMCCC_TRNG_RND64, args);
success_or_error_64::<Error>(regs[0])?;
- Ok((regs[1], regs[2], regs[3]))
+ Ok([regs[1], regs[2], regs[3]])
+}
+
+pub fn trng_features(fid: u32) -> trng::Result<u64> {
+ let mut args = [0u64; 17];
+ args[0] = fid as u64;
+
+ positive_or_error_64::<Error>(hvc64(ARM_SMCCC_TRNG_FEATURES, args)[0])
}
diff --git a/pvmfw/src/hvc/trng.rs b/vmbase/src/hvc/trng.rs
similarity index 68%
rename from pvmfw/src/hvc/trng.rs
rename to vmbase/src/hvc/trng.rs
index 6331d66..efb86f6 100644
--- a/pvmfw/src/hvc/trng.rs
+++ b/vmbase/src/hvc/trng.rs
@@ -16,7 +16,7 @@
use core::result;
/// Standard SMCCC TRNG error values as described in DEN 0098 1.0 REL0.
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
pub enum Error {
/// The call is not supported by the implementation.
NotSupported,
@@ -55,3 +55,40 @@
}
pub type Result<T> = result::Result<T, Error>;
+
+/// A version of the SMCCC TRNG interface.
+#[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
+pub struct Version {
+ pub major: u16,
+ pub minor: u16,
+}
+
+impl fmt::Display for Version {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}.{}", self.major, self.minor)
+ }
+}
+
+impl fmt::Debug for Version {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl TryFrom<i32> for Version {
+ type Error = Error;
+
+ fn try_from(value: i32) -> core::result::Result<Self, Error> {
+ if value < 0 {
+ Err((value as i64).into())
+ } else {
+ Ok(Self { major: (value >> 16) as u16, minor: value as u16 })
+ }
+ }
+}
+
+impl From<Version> for u32 {
+ fn from(version: Version) -> Self {
+ (u32::from(version.major) << 16) | u32::from(version.minor)
+ }
+}
diff --git a/vmbase/src/layout/mod.rs b/vmbase/src/layout/mod.rs
index 21c113a..f7e8170 100644
--- a/vmbase/src/layout/mod.rs
+++ b/vmbase/src/layout/mod.rs
@@ -17,6 +17,8 @@
pub mod crosvm;
use crate::console::BASE_ADDRESS;
+use crate::linker::__stack_chk_guard;
+use aarch64_paging::paging::VirtualAddress;
use core::ops::Range;
use core::ptr::addr_of;
@@ -27,11 +29,14 @@
#[macro_export]
macro_rules! linker_addr {
($symbol:ident) => {{
- unsafe { addr_of!($crate::linker::$symbol) as usize }
+ // SAFETY: We're just getting the address of an extern static symbol provided by the linker,
+ // not dereferencing it.
+ let addr = unsafe { addr_of!($crate::linker::$symbol) as usize };
+ VirtualAddress(addr)
}};
}
-/// Get the address range between a pair of linker-defined symbols.
+/// Gets the virtual address range between a pair of linker-defined symbols.
#[macro_export]
macro_rules! linker_region {
($begin:ident,$end:ident) => {{
@@ -43,57 +48,65 @@
}
/// Memory reserved for the DTB.
-pub fn dtb_range() -> Range<usize> {
+pub fn dtb_range() -> Range<VirtualAddress> {
linker_region!(dtb_begin, dtb_end)
}
/// Executable code.
-pub fn text_range() -> Range<usize> {
+pub fn text_range() -> Range<VirtualAddress> {
linker_region!(text_begin, text_end)
}
/// Read-only data.
-pub fn rodata_range() -> Range<usize> {
+pub fn rodata_range() -> Range<VirtualAddress> {
linker_region!(rodata_begin, rodata_end)
}
/// Initialised writable data.
-pub fn data_range() -> Range<usize> {
+pub fn data_range() -> Range<VirtualAddress> {
linker_region!(data_begin, data_end)
}
-/// Zero-initialised writable data.
-pub fn bss_range() -> Range<usize> {
+/// Zero-initialized writable data.
+pub fn bss_range() -> Range<VirtualAddress> {
linker_region!(bss_begin, bss_end)
}
/// Writable data region for the stack.
-pub fn stack_range(stack_size: usize) -> Range<usize> {
+pub fn stack_range(stack_size: usize) -> Range<VirtualAddress> {
let end = linker_addr!(init_stack_pointer);
- let start = end.checked_sub(stack_size).unwrap();
+ let start = VirtualAddress(end.0.checked_sub(stack_size).unwrap());
assert!(start >= linker_addr!(stack_limit));
start..end
}
/// All writable sections, excluding the stack.
-pub fn scratch_range() -> Range<usize> {
+pub fn scratch_range() -> Range<VirtualAddress> {
linker_region!(eh_stack_limit, bss_end)
}
/// UART console range.
-pub fn console_uart_range() -> Range<usize> {
+pub fn console_uart_range() -> Range<VirtualAddress> {
const CONSOLE_LEN: usize = 1; // `uart::Uart` only uses one u8 register.
- BASE_ADDRESS..(BASE_ADDRESS + CONSOLE_LEN)
+ VirtualAddress(BASE_ADDRESS)..VirtualAddress(BASE_ADDRESS + CONSOLE_LEN)
}
/// Read-write data (original).
-pub fn data_load_address() -> usize {
+pub fn data_load_address() -> VirtualAddress {
linker_addr!(data_lma)
}
/// End of the binary image.
-pub fn binary_end() -> usize {
+pub fn binary_end() -> VirtualAddress {
linker_addr!(bin_end)
}
+
+/// Value of __stack_chk_guard.
+pub fn stack_chk_guard() -> u64 {
+ // SAFETY: __stack_chk_guard shouldn't have any mutable aliases unless the stack overflows. If
+ // it does, then there could be undefined behaviour all over the program, but we want to at
+ // least have a chance at catching it.
+ unsafe { addr_of!(__stack_chk_guard).read_volatile() }
+}
diff --git a/vmbase/src/lib.rs b/vmbase/src/lib.rs
index 54f3384..431e899 100644
--- a/vmbase/src/lib.rs
+++ b/vmbase/src/lib.rs
@@ -19,21 +19,23 @@
extern crate alloc;
pub mod arch;
-mod bionic;
+pub mod bionic;
pub mod console;
mod entry;
+pub mod exceptions;
pub mod fdt;
+pub mod heap;
+mod hvc;
pub mod layout;
-mod linker;
+pub mod linker;
pub mod logger;
pub mod memory;
pub mod power;
+pub mod rand;
pub mod uart;
pub mod util;
pub mod virtio;
-pub use bionic::STACK_CHK_GUARD;
-
use core::panic::PanicInfo;
use power::reboot;
diff --git a/vmbase/src/logger.rs b/vmbase/src/logger.rs
index c30adad..9130918 100644
--- a/vmbase/src/logger.rs
+++ b/vmbase/src/logger.rs
@@ -20,19 +20,20 @@
use crate::console::println;
use core::sync::atomic::{AtomicBool, Ordering};
-use log::{LevelFilter, Log, Metadata, Record, SetLoggerError};
+use log::{Log, Metadata, Record, SetLoggerError};
struct Logger {
is_enabled: AtomicBool,
}
-static mut LOGGER: Logger = Logger::new();
+
+static LOGGER: Logger = Logger::new();
impl Logger {
const fn new() -> Self {
Self { is_enabled: AtomicBool::new(true) }
}
- fn swap_enabled(&mut self, enabled: bool) -> bool {
+ fn swap_enabled(&self, enabled: bool) -> bool {
self.is_enabled.swap(enabled, Ordering::Relaxed)
}
}
@@ -58,27 +59,19 @@
impl SuppressGuard {
fn new() -> Self {
- // Safe because it modifies an atomic.
- unsafe { Self { old_enabled: LOGGER.swap_enabled(false) } }
+ Self { old_enabled: LOGGER.swap_enabled(false) }
}
}
impl Drop for SuppressGuard {
fn drop(&mut self) {
- // Safe because it modifies an atomic.
- unsafe {
- LOGGER.swap_enabled(self.old_enabled);
- }
+ LOGGER.swap_enabled(self.old_enabled);
}
}
/// Initialize vmbase logger with a given max logging level.
-pub fn init(max_level: LevelFilter) -> Result<(), SetLoggerError> {
- // Safe because it only sets the global logger.
- unsafe {
- log::set_logger(&LOGGER)?;
- }
- log::set_max_level(max_level);
+pub(crate) fn init() -> Result<(), SetLoggerError> {
+ log::set_logger(&LOGGER)?;
Ok(())
}
diff --git a/vmbase/src/memory/dbm.rs b/vmbase/src/memory/dbm.rs
index d429b30..401022e 100644
--- a/vmbase/src/memory/dbm.rs
+++ b/vmbase/src/memory/dbm.rs
@@ -34,7 +34,7 @@
} else {
tcr &= !TCR_EL1_HA_HD_BITS
};
- // Safe because it writes to a system register and does not affect Rust.
+ // SAFETY: Changing this bit in TCR doesn't affect Rust's view of memory.
unsafe { write_sysreg!("tcr_el1", tcr) }
isb!();
}
diff --git a/vmbase/src/memory/mod.rs b/vmbase/src/memory/mod.rs
index 9f14691..2f72fc4 100644
--- a/vmbase/src/memory/mod.rs
+++ b/vmbase/src/memory/mod.rs
@@ -22,8 +22,13 @@
pub use error::MemoryTrackerError;
pub use page_table::PageTable;
-pub use shared::{alloc_shared, dealloc_shared, MemoryRange, MemoryTracker, MEMORY};
-pub use util::{
- flush, flushed_zeroize, min_dcache_line_size, page_4kb_of, phys_to_virt, virt_to_phys,
- PAGE_SIZE, SIZE_2MB, SIZE_4KB, SIZE_4MB,
+pub use shared::{
+ handle_permission_fault, handle_translation_fault, MemoryRange, MemoryTracker, MEMORY,
};
+pub use util::{
+ flush, flushed_zeroize, min_dcache_line_size, page_4kb_of, PAGE_SIZE, SIZE_128KB, SIZE_2MB,
+ SIZE_4KB, SIZE_4MB, SIZE_64KB,
+};
+
+pub(crate) use shared::{alloc_shared, dealloc_shared};
+pub(crate) use util::{phys_to_virt, virt_to_phys};
diff --git a/vmbase/src/memory/page_table.rs b/vmbase/src/memory/page_table.rs
index 3943b03..e067e96 100644
--- a/vmbase/src/memory/page_table.rs
+++ b/vmbase/src/memory/page_table.rs
@@ -18,7 +18,7 @@
use aarch64_paging::idmap::IdMap;
use aarch64_paging::paging::{Attributes, MemoryRegion, PteUpdater};
use aarch64_paging::MapError;
-use core::{ops::Range, result};
+use core::result;
/// Software bit used to indicate a device that should be lazily mapped.
pub(super) const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0;
@@ -88,50 +88,44 @@
/// Maps the given range of virtual addresses to the physical addresses as lazily mapped
/// nGnRE device memory.
- pub fn map_device_lazy(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DEVICE_LAZY)
+ pub fn map_device_lazy(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, DEVICE_LAZY)
}
/// Maps the given range of virtual addresses to the physical addresses as valid device
/// nGnRE device memory.
- pub fn map_device(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DEVICE)
+ pub fn map_device(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, DEVICE)
}
/// Maps the given range of virtual addresses to the physical addresses as non-executable
/// and writable normal memory.
- pub fn map_data(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DATA)
+ pub fn map_data(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, DATA)
}
/// Maps the given range of virtual addresses to the physical addresses as non-executable,
/// read-only and writable-clean normal memory.
- pub fn map_data_dbm(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, DATA_DBM)
+ pub fn map_data_dbm(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, DATA_DBM)
}
/// Maps the given range of virtual addresses to the physical addresses as read-only
/// normal memory.
- pub fn map_code(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, CODE)
+ pub fn map_code(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, CODE)
}
/// Maps the given range of virtual addresses to the physical addresses as non-executable
/// and read-only normal memory.
- pub fn map_rodata(&mut self, range: &Range<usize>) -> Result<()> {
- self.map_range(range, RODATA)
- }
-
- /// Maps the given range of virtual addresses to the physical addresses with the given
- /// attributes.
- fn map_range(&mut self, range: &Range<usize>, attr: Attributes) -> Result<()> {
- self.idmap.map_range(&MemoryRegion::new(range.start, range.end), attr)
+ pub fn map_rodata(&mut self, range: &MemoryRegion) -> Result<()> {
+ self.idmap.map_range(range, RODATA)
}
/// Applies the provided updater function to a number of PTEs corresponding to a given memory
/// range.
- pub fn modify_range(&mut self, range: &Range<usize>, f: &PteUpdater) -> Result<()> {
- self.idmap.modify_range(&MemoryRegion::new(range.start, range.end), f)
+ pub fn modify_range(&mut self, range: &MemoryRegion, f: &PteUpdater) -> Result<()> {
+ self.idmap.modify_range(range, f)
}
}
diff --git a/vmbase/src/memory/shared.rs b/vmbase/src/memory/shared.rs
index 61cbeb0..dfa29e4 100644
--- a/vmbase/src/memory/shared.rs
+++ b/vmbase/src/memory/shared.rs
@@ -19,18 +19,21 @@
use super::page_table::{is_leaf_pte, PageTable, MMIO_LAZY_MAP_FLAG};
use super::util::{page_4kb_of, virt_to_phys};
use crate::dsb;
+use crate::exceptions::HandleExceptionError;
use crate::util::RangeExt as _;
-use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange};
+use aarch64_paging::paging::{Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress};
use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
use alloc::boxed::Box;
use alloc::vec::Vec;
use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
use core::alloc::Layout;
+use core::cmp::max;
+use core::mem::size_of;
use core::num::NonZeroUsize;
use core::ops::Range;
use core::ptr::NonNull;
use core::result;
-use hyp::{get_hypervisor, MMIO_GUARD_GRANULE_SIZE};
+use hyp::{get_mem_sharer, get_mmio_guard, MMIO_GUARD_GRANULE_SIZE};
use log::{debug, error, trace};
use once_cell::race::OnceBox;
use spin::mutex::SpinMutex;
@@ -44,6 +47,11 @@
/// Memory range.
pub type MemoryRange = Range<usize>;
+
+fn get_va_range(range: &MemoryRange) -> VaRange {
+ VaRange::new(range.start, range.end)
+}
+
type Result<T> = result::Result<T, MemoryTrackerError>;
#[derive(Clone, Copy, Debug, Default, PartialEq)]
@@ -69,8 +77,6 @@
payload_range: Option<MemoryRange>,
}
-unsafe impl Send for MemoryTracker {}
-
impl MemoryTracker {
const CAPACITY: usize = 5;
const MMIO_CAPACITY: usize = 5;
@@ -80,7 +86,7 @@
mut page_table: PageTable,
total: MemoryRange,
mmio_range: MemoryRange,
- payload_range: Option<MemoryRange>,
+ payload_range: Option<Range<VirtualAddress>>,
) -> Self {
assert!(
!total.overlaps(&mmio_range),
@@ -93,7 +99,7 @@
set_dbm_enabled(true);
debug!("Activating dynamic page table...");
- // SAFETY - page_table duplicates the static mappings for everything that the Rust code is
+ // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
// aware of so activating it shouldn't have any visible effect.
unsafe { page_table.activate() }
debug!("... Success!");
@@ -104,7 +110,7 @@
regions: ArrayVec::new(),
mmio_regions: ArrayVec::new(),
mmio_range,
- payload_range,
+ payload_range: payload_range.map(|r| r.start.0..r.end.0),
}
}
@@ -130,7 +136,7 @@
pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
self.check(®ion)?;
- self.page_table.map_rodata(range).map_err(|e| {
+ self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
error!("Error during range allocation: {e}");
MemoryTrackerError::FailedToMap
})?;
@@ -141,7 +147,7 @@
pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
self.check(®ion)?;
- self.page_table.map_data_dbm(range).map_err(|e| {
+ self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
error!("Error during mutable range allocation: {e}");
MemoryTrackerError::FailedToMap
})?;
@@ -171,10 +177,17 @@
return Err(MemoryTrackerError::Full);
}
- self.page_table.map_device_lazy(&range).map_err(|e| {
- error!("Error during MMIO device mapping: {e}");
- MemoryTrackerError::FailedToMap
- })?;
+ if get_mmio_guard().is_some() {
+ self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
+ error!("Error during lazy MMIO device mapping: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ } else {
+ self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
+ error!("Error during MMIO device mapping: {e}");
+ MemoryTrackerError::FailedToMap
+ })?;
+ }
if self.mmio_regions.try_push(range).is_some() {
return Err(MemoryTrackerError::Full);
@@ -211,10 +224,12 @@
///
/// Note that they are not unmapped from the page table.
pub fn mmio_unmap_all(&mut self) -> Result<()> {
- for range in &self.mmio_regions {
- self.page_table
- .modify_range(range, &mmio_guard_unmap_page)
- .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
+ if get_mmio_guard().is_some() {
+ for range in &self.mmio_regions {
+ self.page_table
+ .modify_range(&get_va_range(range), &mmio_guard_unmap_page)
+ .map_err(|_| MemoryTrackerError::FailedToUnmap)?;
+ }
}
Ok(())
}
@@ -256,6 +271,19 @@
Ok(())
}
+ /// Initialize the shared heap to use heap memory directly.
+ ///
+ /// When running on "non-protected" hypervisors which permit host direct accesses to guest
+ /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
+ /// dedicated region so this function instructs the shared pool to use the global allocator.
+ pub fn init_heap_shared_pool(&mut self) -> Result<()> {
+ // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
+ // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
+ // without any actual "dynamic memory sharing" taking place and, as such, the granule may
+ // be set to the one of the global_allocator i.e. a byte.
+ self.init_dynamic_shared_pool(size_of::<u8>())
+ }
+
/// Unshares any memory that may have been shared.
pub fn unshare_all_memory(&mut self) {
drop(SHARED_MEMORY.lock().take());
@@ -263,12 +291,14 @@
/// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
/// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
- pub fn handle_mmio_fault(&mut self, addr: usize) -> Result<()> {
- let page_range = page_4kb_of(addr)..page_4kb_of(addr) + MMIO_GUARD_GRANULE_SIZE;
+ fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
+ let page_start = VirtualAddress(page_4kb_of(addr.0));
+ let page_range: VaRange = (page_start..page_start + MMIO_GUARD_GRANULE_SIZE).into();
+ let mmio_guard = get_mmio_guard().unwrap();
self.page_table
.modify_range(&page_range, &verify_lazy_mapped_block)
.map_err(|_| MemoryTrackerError::InvalidPte)?;
- get_hypervisor().mmio_guard_map(page_range.start)?;
+ mmio_guard.map(page_start.0)?;
// Maps a single device page, breaking up block mappings if necessary.
self.page_table.map_device(&page_range).map_err(|_| MemoryTrackerError::FailedToMap)
}
@@ -284,7 +314,7 @@
// Now flush writable-dirty pages in those regions.
for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
self.page_table
- .modify_range(range, &flush_dirty_range)
+ .modify_range(&get_va_range(range), &flush_dirty_range)
.map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
}
Ok(())
@@ -293,9 +323,9 @@
/// Handles permission fault for read-only blocks by setting writable-dirty state.
/// In general, this should be called from the exception handler when hardware dirty
/// state management is disabled or unavailable.
- pub fn handle_permission_fault(&mut self, addr: usize) -> Result<()> {
+ fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
self.page_table
- .modify_range(&(addr..addr + 1), &mark_dirty_block)
+ .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
.map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
}
}
@@ -310,7 +340,7 @@
/// Allocates a memory range of at least the given size and alignment that is shared with the host.
/// Returns a pointer to the buffer.
-pub fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
+pub(crate) fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
assert_ne!(layout.size(), 0);
let Some(buffer) = try_shared_alloc(layout) else {
handle_alloc_error(layout);
@@ -326,7 +356,11 @@
if let Some(buffer) = shared_pool.alloc_aligned(layout) {
Some(NonNull::new(buffer as _).unwrap())
} else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
- shared_memory.refill(&mut shared_pool, layout);
+ // Adjusts the layout size to the max of the next power of two and the alignment,
+ // as this is the actual size of the memory allocated in `alloc_aligned()`.
+ let size = max(layout.size().next_power_of_two(), layout.align());
+ let refill_layout = Layout::from_size_align(size, layout.align()).unwrap();
+ shared_memory.refill(&mut shared_pool, refill_layout);
shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
} else {
None
@@ -341,7 +375,7 @@
///
/// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
/// deallocated.
-pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
+pub(crate) unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
@@ -353,7 +387,7 @@
/// Unshares all pages when dropped.
struct MemorySharer {
granule: usize,
- shared_regions: Vec<(usize, Layout)>,
+ frames: Vec<(usize, Layout)>,
}
impl MemorySharer {
@@ -361,42 +395,47 @@
/// `granule` must be a power of 2.
fn new(granule: usize, capacity: usize) -> Self {
assert!(granule.is_power_of_two());
- Self { granule, shared_regions: Vec::with_capacity(capacity) }
+ Self { granule, frames: Vec::with_capacity(capacity) }
}
/// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
let layout = hint.align_to(self.granule).unwrap().pad_to_align();
assert_ne!(layout.size(), 0);
- // SAFETY - layout has non-zero size.
+ // SAFETY: layout has non-zero size.
let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
handle_alloc_error(layout);
};
let base = shared.as_ptr() as usize;
let end = base.checked_add(layout.size()).unwrap();
- trace!("Sharing memory region {:#x?}", base..end);
- for vaddr in (base..end).step_by(self.granule) {
- let vaddr = NonNull::new(vaddr as *mut _).unwrap();
- get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
- }
- self.shared_regions.push((base, layout));
+ if let Some(mem_sharer) = get_mem_sharer() {
+ trace!("Sharing memory region {:#x?}", base..end);
+ for vaddr in (base..end).step_by(self.granule) {
+ let vaddr = NonNull::new(vaddr as *mut _).unwrap();
+ mem_sharer.share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+ }
+ }
+
+ self.frames.push((base, layout));
pool.add_frame(base, end);
}
}
impl Drop for MemorySharer {
fn drop(&mut self) {
- while let Some((base, layout)) = self.shared_regions.pop() {
- let end = base.checked_add(layout.size()).unwrap();
- trace!("Unsharing memory region {:#x?}", base..end);
- for vaddr in (base..end).step_by(self.granule) {
- let vaddr = NonNull::new(vaddr as *mut _).unwrap();
- get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+ while let Some((base, layout)) = self.frames.pop() {
+ if let Some(mem_sharer) = get_mem_sharer() {
+ let end = base.checked_add(layout.size()).unwrap();
+ trace!("Unsharing memory region {:#x?}", base..end);
+ for vaddr in (base..end).step_by(self.granule) {
+ let vaddr = NonNull::new(vaddr as *mut _).unwrap();
+ mem_sharer.unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
+ }
}
- // SAFETY - The region was obtained from alloc_zeroed() with the recorded layout.
+ // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.
unsafe { dealloc(base as *mut _, layout) };
}
}
@@ -448,9 +487,25 @@
// Since mmio_guard_map takes IPAs, if pvmfw moves non-ID address mapping, page_base
// should be converted to IPA. However, since 0x0 is a valid MMIO address, we don't use
// virt_to_phys here, and just pass page_base instead.
- get_hypervisor().mmio_guard_unmap(page_base).map_err(|e| {
+ get_mmio_guard().unwrap().unmap(page_base).map_err(|e| {
error!("Error MMIO guard unmapping: {e}");
})?;
}
Ok(())
}
+
+/// Handles a translation fault with the given fault address register (FAR).
+#[inline]
+pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
+ let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
+ let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
+ Ok(memory.handle_mmio_fault(far)?)
+}
+
+/// Handles a permission fault with the given fault address register (FAR).
+#[inline]
+pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
+ let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
+ let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
+ Ok(memory.handle_permission_fault(far)?)
+}
diff --git a/vmbase/src/memory/util.rs b/vmbase/src/memory/util.rs
index 04d42cd..2b75414 100644
--- a/vmbase/src/memory/util.rs
+++ b/vmbase/src/memory/util.rs
@@ -22,6 +22,10 @@
/// The size of a 4KB memory in bytes.
pub const SIZE_4KB: usize = 4 << 10;
+/// The size of a 64KB memory in bytes.
+pub const SIZE_64KB: usize = 64 << 10;
+/// The size of a 128KB memory in bytes.
+pub const SIZE_128KB: usize = 128 << 10;
/// The size of a 2MB memory in bytes.
pub const SIZE_2MB: usize = 2 << 20;
/// The size of a 4MB memory in bytes.
@@ -51,7 +55,7 @@
let start = unchecked_align_down(start, line_size);
for line in (start..end).step_by(line_size) {
- // SAFETY - Clearing cache lines shouldn't have Rust-visible side effects.
+ // SAFETY: Clearing cache lines shouldn't have Rust-visible side effects.
unsafe {
asm!(
"dc cvau, {x}",
@@ -84,7 +88,7 @@
///
/// As we use identity mapping for everything, this is just a cast, but it's useful to use it to be
/// explicit about where we are converting from virtual to physical address.
-pub fn virt_to_phys(vaddr: NonNull<u8>) -> usize {
+pub(crate) fn virt_to_phys(vaddr: NonNull<u8>) -> usize {
vaddr.as_ptr() as _
}
@@ -92,6 +96,6 @@
/// physical address.
///
/// Panics if `paddr` is 0.
-pub fn phys_to_virt(paddr: usize) -> NonNull<u8> {
+pub(crate) fn phys_to_virt(paddr: usize) -> NonNull<u8> {
NonNull::new(paddr as _).unwrap()
}
diff --git a/vmbase/src/rand.rs b/vmbase/src/rand.rs
new file mode 100644
index 0000000..b31bd4b
--- /dev/null
+++ b/vmbase/src/rand.rs
@@ -0,0 +1,155 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Functions and drivers for obtaining true entropy.
+
+use crate::hvc;
+use core::fmt;
+use core::mem::size_of;
+use smccc::{self, Hvc};
+use zerocopy::AsBytes as _;
+
+type Entropy = [u8; size_of::<u64>() * 3];
+
+/// Error type for rand operations.
+pub enum Error {
+ /// No source of entropy found.
+ NoEntropySource,
+ /// Error during architectural SMCCC call.
+ Smccc(smccc::arch::Error),
+ /// Error during SMCCC TRNG call.
+ Trng(hvc::trng::Error),
+ /// Unsupported SMCCC version.
+ UnsupportedSmcccVersion(smccc::arch::Version),
+ /// Unsupported SMCCC TRNG version.
+ UnsupportedTrngVersion(hvc::trng::Version),
+}
+
+impl From<smccc::arch::Error> for Error {
+ fn from(e: smccc::arch::Error) -> Self {
+ Self::Smccc(e)
+ }
+}
+
+impl From<hvc::trng::Error> for Error {
+ fn from(e: hvc::trng::Error) -> Self {
+ Self::Trng(e)
+ }
+}
+
+/// Result type for rand operations.
+pub type Result<T> = core::result::Result<T, Error>;
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Self::NoEntropySource => write!(f, "No source of entropy available"),
+ Self::Smccc(e) => write!(f, "Architectural SMCCC error: {e}"),
+ Self::Trng(e) => write!(f, "SMCCC TRNG error: {e}"),
+ Self::UnsupportedSmcccVersion(v) => write!(f, "Unsupported SMCCC version {v}"),
+ Self::UnsupportedTrngVersion(v) => write!(f, "Unsupported SMCCC TRNG version {v}"),
+ }
+ }
+}
+
+impl fmt::Debug for Error {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{self}")
+ }
+}
+
+/// Configure the source of entropy.
+pub(crate) fn init() -> Result<()> {
+ // SMCCC TRNG requires SMCCC v1.1.
+ match smccc::arch::version::<Hvc>()? {
+ smccc::arch::Version { major: 1, minor } if minor >= 1 => (),
+ version => return Err(Error::UnsupportedSmcccVersion(version)),
+ }
+
+ // TRNG_RND requires SMCCC TRNG v1.0.
+ match hvc::trng_version()? {
+ hvc::trng::Version { major: 1, minor: _ } => (),
+ version => return Err(Error::UnsupportedTrngVersion(version)),
+ }
+
+ // TRNG_RND64 doesn't define any special capabilities so ignore the successful result.
+ let _ = hvc::trng_features(hvc::ARM_SMCCC_TRNG_RND64).map_err(|e| {
+ if e == hvc::trng::Error::NotSupported {
+ // SMCCC TRNG is currently our only source of entropy.
+ Error::NoEntropySource
+ } else {
+ e.into()
+ }
+ })?;
+
+ Ok(())
+}
+
+/// Fills a slice of bytes with true entropy.
+pub fn fill_with_entropy(s: &mut [u8]) -> Result<()> {
+ const MAX_BYTES_PER_CALL: usize = size_of::<Entropy>();
+
+ for chunk in s.chunks_mut(MAX_BYTES_PER_CALL) {
+ let entropy = repeat_trng_rnd(chunk.len())?;
+ chunk.clone_from_slice(&entropy[..chunk.len()]);
+ }
+
+ Ok(())
+}
+
+/// Returns an array where the first `n_bytes` bytes hold entropy.
+///
+/// The rest of the array should be ignored.
+fn repeat_trng_rnd(n_bytes: usize) -> Result<Entropy> {
+ loop {
+ if let Some(entropy) = rnd64(n_bytes)? {
+ return Ok(entropy);
+ }
+ }
+}
+
+/// Returns an array where the first `n_bytes` bytes hold entropy, if available.
+///
+/// The rest of the array should be ignored.
+fn rnd64(n_bytes: usize) -> Result<Option<Entropy>> {
+ let bits = usize::try_from(u8::BITS).unwrap();
+ let result = hvc::trng_rnd64((n_bytes * bits).try_into().unwrap());
+ let entropy = if matches!(result, Err(hvc::trng::Error::NoEntropy)) {
+ None
+ } else {
+ let r = result?;
+ // From the SMCCC TRNG:
+ //
+ // A MAX_BITS-bits wide value (Entropy) is returned across X1 to X3.
+ // The requested conditioned entropy is returned in Entropy[N-1:0].
+ //
+ // X1 Entropy[191:128]
+ // X2 Entropy[127:64]
+ // X3 Entropy[63:0]
+ //
+ // The bits in Entropy[MAX_BITS-1:N] are 0.
+ let reordered = [r[2].to_le(), r[1].to_le(), r[0].to_le()];
+
+ Some(reordered.as_bytes().try_into().unwrap())
+ };
+
+ Ok(entropy)
+}
+
+/// Generate an array of fixed-size initialized with true-random bytes.
+pub fn random_array<const N: usize>() -> Result<[u8; N]> {
+ let mut arr = [0; N];
+ fill_with_entropy(&mut arr)?;
+ Ok(arr)
+}
diff --git a/vmbase/src/uart.rs b/vmbase/src/uart.rs
index 0fc2494..09d747f 100644
--- a/vmbase/src/uart.rs
+++ b/vmbase/src/uart.rs
@@ -38,8 +38,8 @@
/// Writes a single byte to the UART.
pub fn write_byte(&self, byte: u8) {
- // Safe because we know that the base address points to the control registers of an UART
- // device which is appropriately mapped.
+ // SAFETY: We know that the base address points to the control registers of a UART device
+ // which is appropriately mapped.
unsafe {
write_volatile(self.base_address, byte);
}
@@ -55,5 +55,5 @@
}
}
-// Safe because it just contains a pointer to device memory, which can be accessed from any context.
+// SAFETY: `Uart` just contains a pointer to device memory, which can be accessed from any context.
unsafe impl Send for Uart {}
diff --git a/vmbase/src/virtio/hal.rs b/vmbase/src/virtio/hal.rs
index 36f9e56..0d3f445 100644
--- a/vmbase/src/virtio/hal.rs
+++ b/vmbase/src/virtio/hal.rs
@@ -32,10 +32,8 @@
/// HAL implementation for the virtio_drivers crate.
pub struct HalImpl;
-/// # Safety
-///
-/// See the 'Implementation Safety' comments on methods below for how they fulfill the safety
-/// requirements of the unsafe `Hal` trait.
+/// SAFETY: See the 'Implementation Safety' comments on methods below for how they fulfill the
+/// safety requirements of the unsafe `Hal` trait.
unsafe impl Hal for HalImpl {
/// # Implementation Safety
///
@@ -48,14 +46,14 @@
let layout = dma_layout(pages);
let vaddr =
alloc_shared(layout).expect("Failed to allocate and share VirtIO DMA range with host");
- // SAFETY - vaddr points to a region allocated for the caller so is safe to access.
+ // SAFETY: vaddr points to a region allocated for the caller so is safe to access.
unsafe { core::ptr::write_bytes(vaddr.as_ptr(), 0, layout.size()) };
let paddr = virt_to_phys(vaddr);
(paddr, vaddr)
}
unsafe fn dma_dealloc(_paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
- // SAFETY - Memory was allocated by `dma_alloc` using `alloc_shared` with the same layout.
+ // SAFETY: Memory was allocated by `dma_alloc` using `alloc_shared` with the same layout.
unsafe { dealloc_shared(vaddr, dma_layout(pages)) }
.expect("Failed to unshare VirtIO DMA range with host");
0
@@ -68,7 +66,7 @@
/// range. It can't alias any other allocations because we previously validated in
/// `map_mmio_range` that the PCI MMIO range didn't overlap with any other memory ranges.
unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
- let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised");
+ let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialized");
let bar_range = {
let start = pci_info.bar_range.start.try_into().unwrap();
let end = pci_info.bar_range.end.try_into().unwrap();
@@ -96,7 +94,7 @@
if direction == BufferDirection::DriverToDevice {
let src = buffer.cast::<u8>().as_ptr().cast_const();
trace!("VirtIO bounce buffer at {bounce:?} (PA:{paddr:#x}) initialized from {src:?}");
- // SAFETY - Both regions are valid, properly aligned, and don't overlap.
+ // SAFETY: Both regions are valid, properly aligned, and don't overlap.
unsafe { copy_nonoverlapping(src, bounce.as_ptr(), size) };
}
@@ -109,11 +107,11 @@
if direction == BufferDirection::DeviceToDriver {
let dest = buffer.cast::<u8>().as_ptr();
trace!("VirtIO bounce buffer at {bounce:?} (PA:{paddr:#x}) copied back to {dest:?}");
- // SAFETY - Both regions are valid, properly aligned, and don't overlap.
+ // SAFETY: Both regions are valid, properly aligned, and don't overlap.
unsafe { copy_nonoverlapping(bounce.as_ptr(), dest, size) };
}
- // SAFETY - Memory was allocated by `share` using `alloc_shared` with the same layout.
+ // SAFETY: Memory was allocated by `share` using `alloc_shared` with the same layout.
unsafe { dealloc_shared(bounce, bb_layout(size)) }
.expect("Failed to unshare and deallocate VirtIO bounce buffer");
}
diff --git a/vmbase/src/virtio/mod.rs b/vmbase/src/virtio/mod.rs
index df916bc..fbe41e3 100644
--- a/vmbase/src/virtio/mod.rs
+++ b/vmbase/src/virtio/mod.rs
@@ -16,3 +16,5 @@
mod hal;
pub mod pci;
+
+pub use hal::HalImpl;
diff --git a/vmbase/src/virtio/pci.rs b/vmbase/src/virtio/pci.rs
index cbb4d26..1d05c18 100644
--- a/vmbase/src/virtio/pci.rs
+++ b/vmbase/src/virtio/pci.rs
@@ -14,19 +14,20 @@
//! Functions to scan the PCI bus for VirtIO devices.
-use super::hal::HalImpl;
use crate::memory::{MemoryTracker, MemoryTrackerError};
use alloc::boxed::Box;
use core::fmt;
+use core::marker::PhantomData;
use fdtpci::PciInfo;
use log::debug;
use once_cell::race::OnceBox;
use virtio_drivers::{
- device::blk,
+ device::{blk, socket},
transport::pci::{
bus::{BusDeviceIterator, PciRoot},
virtio_device_type, PciTransport,
},
+ Hal,
};
pub(super) static PCI_INFO: OnceBox<PciInfo> = OnceBox::new();
@@ -63,7 +64,7 @@
/// 3. Creates and returns a `PciRoot`.
///
/// This must only be called once; it will panic if it is called a second time.
-pub fn initialise(pci_info: PciInfo, memory: &mut MemoryTracker) -> Result<PciRoot, PciError> {
+pub fn initialize(pci_info: PciInfo, memory: &mut MemoryTracker) -> Result<PciRoot, PciError> {
PCI_INFO.set(Box::new(pci_info.clone())).map_err(|_| PciError::DuplicateInitialization)?;
memory.map_mmio_range(pci_info.cam_range.clone()).map_err(PciError::CamMapFailed)?;
@@ -76,23 +77,29 @@
}
/// Virtio Block device.
-pub type VirtIOBlk = blk::VirtIOBlk<HalImpl, PciTransport>;
+pub type VirtIOBlk<T> = blk::VirtIOBlk<T, PciTransport>;
+
+/// Virtio Socket device.
+///
+/// Spec: https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html 5.10
+pub type VirtIOSocket<T> = socket::VirtIOSocket<T, PciTransport>;
/// An iterator that iterates over the PCI transport for each device.
-pub struct PciTransportIterator<'a> {
+pub struct PciTransportIterator<'a, T: Hal> {
pci_root: &'a mut PciRoot,
bus: BusDeviceIterator,
+ _hal: PhantomData<T>,
}
-impl<'a> PciTransportIterator<'a> {
+impl<'a, T: Hal> PciTransportIterator<'a, T> {
/// Creates a new iterator.
pub fn new(pci_root: &'a mut PciRoot) -> Self {
let bus = pci_root.enumerate_bus(0);
- Self { pci_root, bus }
+ Self { pci_root, bus, _hal: PhantomData }
}
}
-impl<'a> Iterator for PciTransportIterator<'a> {
+impl<'a, T: Hal> Iterator for PciTransportIterator<'a, T> {
type Item = PciTransport;
fn next(&mut self) -> Option<Self::Item> {
@@ -109,7 +116,7 @@
};
debug!(" VirtIO {:?}", virtio_type);
- return PciTransport::new::<HalImpl>(self.pci_root, device_function).ok();
+ return PciTransport::new::<T>(self.pci_root, device_function).ok();
}
}
}
diff --git a/vmclient/src/lib.rs b/vmclient/src/lib.rs
index 8f25b99..7c0383b 100644
--- a/vmclient/src/lib.rs
+++ b/vmclient/src/lib.rs
@@ -67,7 +67,7 @@
// file descriptors (expected by SharedChild).
let (raw1, raw2) = pipe2(OFlag::O_CLOEXEC)?;
- // SAFETY - Taking ownership of brand new FDs.
+ // SAFETY: Taking ownership of brand new FDs.
unsafe { Ok((OwnedFd::from_raw_fd(raw1), OwnedFd::from_raw_fd(raw2))) }
}
@@ -80,7 +80,7 @@
let (raw1, raw2) =
socketpair(AddressFamily::Unix, SockType::Stream, None, SockFlag::SOCK_CLOEXEC)?;
- // SAFETY - Taking ownership of brand new FDs.
+ // SAFETY: Taking ownership of brand new FDs.
unsafe { Ok((OwnedFd::from_raw_fd(raw1), OwnedFd::from_raw_fd(raw2))) }
}
@@ -175,14 +175,17 @@
pub fn create(
service: &dyn IVirtualizationService,
config: &VirtualMachineConfig,
- console: Option<File>,
+ console_out: Option<File>,
+ console_in: Option<File>,
log: Option<File>,
callback: Option<Box<dyn VmCallback + Send + Sync>>,
) -> BinderResult<Self> {
- let console = console.map(ParcelFileDescriptor::new);
+ let console_out = console_out.map(ParcelFileDescriptor::new);
+ let console_in = console_in.map(ParcelFileDescriptor::new);
let log = log.map(ParcelFileDescriptor::new);
- let vm = service.createVm(config, console.as_ref(), log.as_ref())?;
+ let vm =
+ service.createVm(config, console_out.as_ref(), console_in.as_ref(), log.as_ref())?;
let cid = vm.getCid()?;
diff --git a/zipfuse/src/inode.rs b/zipfuse/src/inode.rs
index ea63422..3175a30 100644
--- a/zipfuse/src/inode.rs
+++ b/zipfuse/src/inode.rs
@@ -31,10 +31,11 @@
const INVALID: Inode = 0;
const ROOT: Inode = 1;
-const DEFAULT_DIR_MODE: u32 = libc::S_IRUSR | libc::S_IXUSR;
+const DEFAULT_DIR_MODE: u32 = libc::S_IRUSR | libc::S_IXUSR | libc::S_IRGRP | libc::S_IXGRP;
// b/264668376 some files in APK don't have unix permissions specified. Default to 400
// otherwise those files won't be readable even by the owner.
-const DEFAULT_FILE_MODE: u32 = libc::S_IRUSR;
+const DEFAULT_FILE_MODE: u32 = libc::S_IRUSR | libc::S_IRGRP;
+const EXECUTABLE_FILE_MODE: u32 = DEFAULT_FILE_MODE | libc::S_IXUSR | libc::S_IXGRP;
/// `InodeData` represents an inode which has metadata about a file or a directory
#[derive(Debug)]
@@ -191,7 +192,7 @@
// additional binaries that they might want to execute.
// An example of such binary is measure_io one used in the authfs performance tests.
// More context available at b/265261525 and b/270955654.
- file_mode |= libc::S_IXUSR;
+ file_mode = EXECUTABLE_FILE_MODE;
}
while let Some(name) = iter.next() {
@@ -210,7 +211,7 @@
parent = found;
// Update the mode if this is a directory leaf.
if !is_file && is_leaf {
- let mut inode = table.get_mut(parent).unwrap();
+ let inode = table.get_mut(parent).unwrap();
inode.mode = file.unix_mode().unwrap_or(DEFAULT_DIR_MODE);
}
continue;
diff --git a/zipfuse/src/main.rs b/zipfuse/src/main.rs
index 20d6fd6..e8be42c 100644
--- a/zipfuse/src/main.rs
+++ b/zipfuse/src/main.rs
@@ -31,7 +31,7 @@
use std::fs::{File, OpenOptions};
use std::io;
use std::io::Read;
-use std::mem::size_of;
+use std::mem::{size_of, MaybeUninit};
use std::os::unix::io::AsRawFd;
use std::path::Path;
use std::path::PathBuf;
@@ -192,7 +192,8 @@
#[allow(clippy::useless_conversion)]
fn stat_from(&self, inode: Inode) -> io::Result<libc::stat64> {
let inode_data = self.find_inode(inode)?;
- let mut st = unsafe { std::mem::MaybeUninit::<libc::stat64>::zeroed().assume_init() };
+ // SAFETY: All fields of stat64 are valid for zero byte patterns.
+ let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
st.st_dev = 0;
st.st_nlink = if let Some(directory) = inode_data.get_directory() {
(2 + directory.len() as libc::nlink_t).into()