Merge "pvmfw: Add system register read/write macros"
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 14452a3..3217ee1 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -4,9 +4,6 @@
"name": "MicrodroidHostTestCases"
},
{
- "name": "ComposHostTestCases"
- },
- {
"name": "MicrodroidTestApp"
},
{
@@ -36,6 +33,9 @@
"name": "ComposBenchmarkApp"
},
{
+ "name": "ComposHostTestCases"
+ },
+ {
"name": "AVFHostTestCases"
}
],
@@ -49,9 +49,6 @@
"path": "packages/modules/Virtualization/apkdmverity"
},
{
- "path": "packages/modules/Virtualization/avmd"
- },
- {
"path": "packages/modules/Virtualization/encryptedstore"
},
{
diff --git a/apex/product_packages.mk b/apex/product_packages.mk
index 4293c80..ef84551 100644
--- a/apex/product_packages.mk
+++ b/apex/product_packages.mk
@@ -19,6 +19,9 @@
# To include the APEX in your build, insert this in your device.mk:
# $(call inherit-product, packages/modules/Virtualization/apex/product_packages.mk)
+# If devices supports AVF it implies that it uses non-flattened APEXes.
+$(call inherit-product, $(SRC_TARGET_DIR)/product/updatable_apex.mk)
+
PRODUCT_PACKAGES += \
com.android.compos \
diff --git a/authfs/fd_server/Android.bp b/authfs/fd_server/Android.bp
index 5097408..db1fd44 100644
--- a/authfs/fd_server/Android.bp
+++ b/authfs/fd_server/Android.bp
@@ -12,6 +12,7 @@
"libauthfs_fsverity_metadata",
"libbinder_rs",
"libclap",
+ "libfsverity_rs",
"liblibc",
"liblog_rust",
"libnix",
@@ -31,6 +32,7 @@
"libauthfs_fsverity_metadata",
"libbinder_rs",
"libclap",
+ "libfsverity_rs",
"liblibc",
"liblog_rust",
"libnix",
diff --git a/authfs/fd_server/src/aidl.rs b/authfs/fd_server/src/aidl.rs
index 01b8209..ada3ffb 100644
--- a/authfs/fd_server/src/aidl.rs
+++ b/authfs/fd_server/src/aidl.rs
@@ -31,7 +31,6 @@
use std::path::{Component, Path, PathBuf, MAIN_SEPARATOR};
use std::sync::{Arc, RwLock};
-use crate::fsverity;
use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::{
BnVirtFdService, FsStat::FsStat, IVirtFdService, MAX_REQUESTING_DATA,
};
diff --git a/authfs/fd_server/src/fsverity.rs b/authfs/fd_server/src/fsverity.rs
deleted file mode 100644
index 576f9dd..0000000
--- a/authfs/fd_server/src/fsverity.rs
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use nix::ioctl_readwrite;
-use std::io;
-
-// Constants/values from uapi/linux/fsverity.h
-const FS_VERITY_METADATA_TYPE_MERKLE_TREE: u64 = 1;
-const FS_VERITY_METADATA_TYPE_SIGNATURE: u64 = 3;
-const FS_IOCTL_MAGIC: u8 = b'f';
-const FS_IOCTL_READ_VERITY_METADATA: u8 = 135;
-
-#[repr(C)]
-pub struct fsverity_read_metadata_arg {
- metadata_type: u64,
- offset: u64,
- length: u64,
- buf_ptr: u64,
- __reserved: u64,
-}
-
-ioctl_readwrite!(
- read_verity_metadata,
- FS_IOCTL_MAGIC,
- FS_IOCTL_READ_VERITY_METADATA,
- fsverity_read_metadata_arg
-);
-
-fn read_metadata(fd: i32, metadata_type: u64, offset: u64, buf: &mut [u8]) -> io::Result<usize> {
- let mut arg = fsverity_read_metadata_arg {
- metadata_type,
- offset,
- length: buf.len() as u64,
- buf_ptr: buf.as_mut_ptr() as u64,
- __reserved: 0,
- };
- Ok(unsafe { read_verity_metadata(fd, &mut arg) }? as usize)
-}
-
-/// Read the raw Merkle tree from the fd, if it exists. The API semantics is similar to a regular
-/// pread(2), and may not return full requested buffer.
-pub fn read_merkle_tree(fd: i32, offset: u64, buf: &mut [u8]) -> io::Result<usize> {
- read_metadata(fd, FS_VERITY_METADATA_TYPE_MERKLE_TREE, offset, buf)
-}
-
-/// Read the fs-verity signature from the fd (if exists). The returned signature should be complete.
-pub fn read_signature(fd: i32, buf: &mut [u8]) -> io::Result<usize> {
- read_metadata(fd, FS_VERITY_METADATA_TYPE_SIGNATURE, 0 /* offset */, buf)
-}
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index f91ebec..47983cb 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -23,7 +23,6 @@
//! client can then request the content of file 9 by offset and size.
mod aidl;
-mod fsverity;
use anyhow::{bail, Result};
use clap::Parser;
diff --git a/authfs/tests/common/src/java/com/android/fs/common/AuthFsTestRule.java b/authfs/tests/common/src/java/com/android/fs/common/AuthFsTestRule.java
index 357edea..7c85797 100644
--- a/authfs/tests/common/src/java/com/android/fs/common/AuthFsTestRule.java
+++ b/authfs/tests/common/src/java/com/android/fs/common/AuthFsTestRule.java
@@ -88,7 +88,7 @@
private static CommandRunner sAndroid;
private static CommandRunner sMicrodroid;
- private final ExecutorService mThreadPool = Executors.newCachedThreadPool();
+ private ExecutorService mThreadPool;
public static void setUpAndroid(TestInformation testInfo) throws Exception {
assertNotNull(testInfo.getDevice());
@@ -242,6 +242,7 @@
}
public void setUpTest() throws Exception {
+ mThreadPool = Executors.newCachedThreadPool();
if (sAndroid != null) {
sAndroid.run("mkdir -p " + TEST_OUTPUT_DIR);
}
@@ -264,5 +265,10 @@
archiveLogThenDelete(this, getDevice(), vmRecentLog, "vm_recent.log-" + testName);
sAndroid.run("rm -rf " + TEST_OUTPUT_DIR);
+
+ if (mThreadPool != null) {
+ mThreadPool.shutdownNow();
+ mThreadPool = null;
+ }
}
}
diff --git a/avmd/Android.bp b/avmd/Android.bp
deleted file mode 100644
index e5e0553..0000000
--- a/avmd/Android.bp
+++ /dev/null
@@ -1,61 +0,0 @@
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-rust_defaults {
- name: "libavmd_defaults",
- crate_name: "avmd",
- host_supported: true,
- srcs: ["src/lib.rs"],
- prefer_rlib: true,
- rustlibs: [
- "libhex",
- "libserde",
- "libapkverify",
- ],
-}
-
-rust_library {
- name: "libavmd",
- defaults: ["libavmd_defaults"],
-}
-
-rust_defaults {
- name: "avmdtool.defaults",
- srcs: ["src/main.rs"],
- host_supported: true,
- prefer_rlib: true,
- rustlibs: [
- "libanyhow",
- "libapexutil_rust",
- "libapkverify",
- "libavmd",
- "libclap",
- "libserde",
- "libserde_cbor",
- "libvbmeta_rust",
- ],
-}
-
-rust_binary {
- name: "avmdtool",
- defaults: ["avmdtool.defaults"],
-}
-
-rust_test {
- name: "avmdtool.test",
- defaults: ["avmdtool.defaults"],
- test_suites: ["general-tests"],
-}
-
-rust_test {
- name: "avmdtool_tests",
- srcs: ["tests/*_test.rs"],
- test_suites: ["general-tests"],
- rustlibs: [
- "libtempfile",
- ],
- compile_multilib: "first",
- data_bins: ["avmdtool"],
- data: ["tests/data/*"],
-}
diff --git a/avmd/README.md b/avmd/README.md
deleted file mode 100644
index ae813a0..0000000
--- a/avmd/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-# The AVMD image format
----
-
-The AVMD image format is used to descibe the verified code that a VM will
-load. This repository contains tools and libraries for working with the AVMD
-image format.
-
-# What is it?
-
-When a VM boots, it loads and verifies a set of images that control execution
-within the VM. Therefore, describing what executes in a VM means describing
-what is loaded. The AVMD image format is designed, for this purpose, to
-describe the closure of images that can be loaded and how they should be
-verified.
-
-# Caveats
-
-The AVMD image format will only allow Android supported signing formats. The
-supported formats are currently limited to [AVB][] and [APK][].
-
-[AVB]: https://android.googlesource.com/platform/external/avb/+/master/README.md
-[APK]: https://source.android.com/security/apksigning#schemes
-
-Verification of the images as they are loaded is the responsibility of the VM.
-The VM is required to only load the images described and to verify them against
-the included parameters. If the VM does not follow this requirement, the
-description of the VM may not be accurate and must not be trusted. Validating
-that the VM behaves as expected requires audit of all boot stages of the VM.
-
-# Using avmdtool
-
-The `.avmd` file can be created as follows
-
-```bash
-avmdtool create /tmp/out.avmd \
- --vbmeta pvmfw preload u-boot.bin \
- --vbmeta uboot env_vbmeta disk1/vbmeta.imb \
- --vbmeta uboot vbmeta micordoid/vbmeta.img \
- --apk microdroid payload compos.apk \
- --apk microdroid extra_apk extra_apk.apk \
- --apex-payload microdroid art_apex art.apex
-```
-
-You can read the `.avmd` file with
-
-```bash
-avmdtool dump /tmp/out.avmd
-```
diff --git a/avmd/TEST_MAPPING b/avmd/TEST_MAPPING
deleted file mode 100644
index 892eb2c..0000000
--- a/avmd/TEST_MAPPING
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "avf-presubmit": [
- {
- "name": "avmdtool.test"
- },
- {
- "name": "avmdtool_tests"
- }
- ]
-}
diff --git a/avmd/src/avmd.rs b/avmd/src/avmd.rs
deleted file mode 100644
index cb02f39..0000000
--- a/avmd/src/avmd.rs
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2022, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-extern crate alloc;
-
-use alloc::{
- string::{String, ToString},
- vec::Vec,
-};
-use apkverify::SignatureAlgorithmID;
-use core::fmt;
-use serde::{Deserialize, Serialize};
-
-/// An Avmd struct contains
-/// - A header with version information that allows rollback when needed.
-/// - A list of descriptors that describe different images.
-#[derive(Serialize, Deserialize, Debug, Clone)]
-pub struct Avmd {
- header: Header,
- descriptors: Vec<Descriptor>,
-}
-
-impl fmt::Display for Avmd {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- writeln!(f, "Descriptors:")?;
- for descriptor in &self.descriptors {
- write!(f, "{}", descriptor)?;
- }
- Ok(())
- }
-}
-
-impl Avmd {
- /// Creates an instance of Avmd with a given list of descriptors.
- pub fn new(descriptors: Vec<Descriptor>) -> Avmd {
- Avmd { header: Header::default(), descriptors }
- }
-}
-
-static AVMD_MAGIC: u32 = 0x444d5641;
-static AVMD_VERSION_MAJOR: u16 = 1;
-static AVMD_VERSION_MINOR: u16 = 0;
-
-/// Header information for AVMD.
-#[derive(Serialize, Deserialize, Debug, Clone)]
-struct Header {
- magic: u32,
- version_major: u16,
- version_minor: u16,
-}
-
-impl Default for Header {
- fn default() -> Self {
- Header {
- magic: AVMD_MAGIC,
- version_major: AVMD_VERSION_MAJOR,
- version_minor: AVMD_VERSION_MINOR,
- }
- }
-}
-
-/// AVMD descriptor.
-#[derive(Serialize, Deserialize, Debug, Clone)]
-pub enum Descriptor {
- /// Descriptor type for the VBMeta images.
- VbMeta(VbMetaDescriptor),
- /// Descriptor type for APK.
- Apk(ApkDescriptor),
-}
-
-impl fmt::Display for Descriptor {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match self {
- Descriptor::VbMeta(descriptor) => write!(f, "{}", descriptor),
- Descriptor::Apk(descriptor) => write!(f, "{}", descriptor),
- }
- }
-}
-
-/// VbMeta descriptor.
-#[derive(Serialize, Deserialize, Debug, Clone)]
-pub struct VbMetaDescriptor {
- /// The identifier of this resource.
- #[serde(flatten)]
- pub resource: ResourceIdentifier,
- /// The SHA-512 [VBMeta digest][] calculated from the top-level VBMeta image.
- ///
- /// [VBMeta digest]: https://android.googlesource.com/platform/external/avb/+/master/README.md#the-vbmeta-digest
- pub vbmeta_digest: Vec<u8>,
-}
-
-impl fmt::Display for VbMetaDescriptor {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- writeln!(f, " VBMeta descriptor:")?;
- writeln!(f, " namespace: {}", self.resource.namespace)?;
- writeln!(f, " name: {}", self.resource.name)?;
- writeln!(f, " vbmeta digest: {}", hex::encode(&self.vbmeta_digest))?;
- Ok(())
- }
-}
-
-/// APK descriptor.
-#[derive(Serialize, Deserialize, Debug, Clone)]
-pub struct ApkDescriptor {
- /// The identifier of this resource.
- #[serde(flatten)]
- pub resource: ResourceIdentifier,
- /// The ID of the algoithm used to sign the APK.
- /// It should be one of the algorithms in the [list][].
- ///
- /// [list]: https://source.android.com/security/apksigning/v2#signature-algorithm-ids
- pub signature_algorithm_id: SignatureAlgorithmID,
- /// Digest of the APK's v3 signing block. TODO: fix
- pub apk_digest: Vec<u8>,
-}
-
-impl fmt::Display for ApkDescriptor {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- writeln!(f, " APK descriptor:")?;
- writeln!(f, " namespace: {}", self.resource.namespace)?;
- writeln!(f, " name: {}", self.resource.name)?;
- writeln!(f, " Signing algorithm ID: {:#04x}", self.signature_algorithm_id.to_u32())?;
- writeln!(f, " APK digest: {}", hex::encode(&self.apk_digest))?;
- Ok(())
- }
-}
-
-/// Resource identifier regroups information to identify resources.
-#[derive(Serialize, Deserialize, Debug, Clone)]
-pub struct ResourceIdentifier {
- /// Namespace of the resource.
- namespace: String,
- /// Name of the resource.
- name: String,
-}
-
-impl ResourceIdentifier {
- /// Creates an instance of ResourceIdentifier with the given
- /// namespace and name.
- pub fn new(namespace: &str, name: &str) -> ResourceIdentifier {
- ResourceIdentifier { namespace: namespace.to_string(), name: name.to_string() }
- }
-}
diff --git a/avmd/src/lib.rs b/avmd/src/lib.rs
deleted file mode 100644
index 7a06e6a..0000000
--- a/avmd/src/lib.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2022, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Library for handling AVMD blobs.
-
-#![no_std]
-
-mod avmd;
-
-pub use avmd::{ApkDescriptor, Avmd, Descriptor, ResourceIdentifier, VbMetaDescriptor};
diff --git a/avmd/src/main.rs b/avmd/src/main.rs
deleted file mode 100644
index 8d7cb57..0000000
--- a/avmd/src/main.rs
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2022, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Tool for handling AVMD blobs.
-
-use anyhow::{anyhow, bail, Result};
-use apexutil::get_payload_vbmeta_image_hash;
-use apkverify::get_apk_digest;
-use avmd::{ApkDescriptor, Avmd, Descriptor, ResourceIdentifier, VbMetaDescriptor};
-use clap::{
- builder::ValueParser,
- parser::{Indices, ValuesRef},
- Arg, ArgAction, ArgMatches, Command,
-};
-use serde::ser::Serialize;
-use std::{fs::File, path::PathBuf};
-use vbmeta::VbMetaImage;
-
-fn get_vbmeta_image_hash(file: &str) -> Result<Vec<u8>> {
- let img = VbMetaImage::verify_path(file)?;
- Ok(img.hash().ok_or_else(|| anyhow!("No hash as VBMeta image isn't signed"))?.to_vec())
-}
-
-/// Iterate over a set of argument values, that could be empty or come in
-/// (<index>, <namespace>, <name>, <file>) tuple.
-struct NamespaceNameFileIterator<'a> {
- indices: Option<Indices<'a>>,
- values: Option<ValuesRef<'a, String>>,
-}
-
-impl<'a> NamespaceNameFileIterator<'a> {
- fn new(args: &'a ArgMatches, name: &'a str) -> Self {
- NamespaceNameFileIterator { indices: args.indices_of(name), values: args.get_many(name) }
- }
-}
-
-impl<'a> Iterator for NamespaceNameFileIterator<'a> {
- type Item = (usize, &'a str, &'a str, &'a str);
-
- fn next(&mut self) -> Option<Self::Item> {
- match (self.indices.as_mut(), self.values.as_mut()) {
- (Some(indices), Some(values)) => {
- match (indices.nth(2), values.next(), values.next(), values.next()) {
- (Some(index), Some(namespace), Some(name), Some(file)) => {
- Some((index, namespace, name, file))
- }
- _ => None,
- }
- }
- _ => None,
- }
- }
-}
-
-fn create(args: &ArgMatches) -> Result<()> {
- // Store descriptors in the order they were given in the arguments
- // TODO: instead, group them by namespace?
- let mut descriptors = std::collections::BTreeMap::new();
- for (i, namespace, name, file) in NamespaceNameFileIterator::new(args, "vbmeta") {
- descriptors.insert(
- i,
- Descriptor::VbMeta(VbMetaDescriptor {
- resource: ResourceIdentifier::new(namespace, name),
- vbmeta_digest: get_vbmeta_image_hash(file)?,
- }),
- );
- }
- for (i, namespace, name, file) in NamespaceNameFileIterator::new(args, "apk") {
- let file = File::open(file)?;
- let (signature_algorithm_id, apk_digest) = get_apk_digest(file, /*verify=*/ true)?;
- descriptors.insert(
- i,
- Descriptor::Apk(ApkDescriptor {
- resource: ResourceIdentifier::new(namespace, name),
- signature_algorithm_id,
- apk_digest: apk_digest.to_vec(),
- }),
- );
- }
- for (i, namespace, name, file) in NamespaceNameFileIterator::new(args, "apex-payload") {
- descriptors.insert(
- i,
- Descriptor::VbMeta(VbMetaDescriptor {
- resource: ResourceIdentifier::new(namespace, name),
- vbmeta_digest: get_payload_vbmeta_image_hash(file)?,
- }),
- );
- }
- let avmd = Avmd::new(descriptors.into_values().collect());
- let mut bytes = Vec::new();
- avmd.serialize(
- &mut serde_cbor::Serializer::new(&mut serde_cbor::ser::IoWrite::new(&mut bytes))
- .packed_format()
- .legacy_enums(),
- )?;
- std::fs::write(args.get_one::<PathBuf>("file").unwrap(), &bytes)?;
- Ok(())
-}
-
-fn dump(args: &ArgMatches) -> Result<()> {
- let file = std::fs::read(args.get_one::<PathBuf>("file").unwrap())?;
- let avmd: Avmd = serde_cbor::from_slice(&file)?;
- println!("{}", avmd);
- Ok(())
-}
-
-fn clap_command() -> Command {
- let namespace_name_file = ["namespace", "name", "file"];
-
- Command::new("avmdtool")
- .subcommand_required(true)
- .arg_required_else_help(true)
- .subcommand(
- Command::new("create")
- .arg_required_else_help(true)
- .arg(Arg::new("file").value_parser(ValueParser::path_buf()).required(true))
- .arg(
- Arg::new("vbmeta")
- .long("vbmeta")
- .value_names(namespace_name_file)
- .num_args(3)
- .action(ArgAction::Append),
- )
- .arg(
- Arg::new("apk")
- .long("apk")
- .value_names(namespace_name_file)
- .num_args(3)
- .action(ArgAction::Append),
- )
- .arg(
- Arg::new("apex-payload")
- .long("apex-payload")
- .value_names(namespace_name_file)
- .num_args(3)
- .action(ArgAction::Append),
- ),
- )
- .subcommand(
- Command::new("dump")
- .arg_required_else_help(true)
- .arg(Arg::new("file").value_parser(ValueParser::path_buf()).required(true)),
- )
-}
-
-fn main() -> Result<()> {
- let args = clap_command().get_matches();
- match args.subcommand() {
- Some(("create", sub_args)) => create(sub_args)?,
- Some(("dump", sub_args)) => dump(sub_args)?,
- _ => bail!("Invalid arguments"),
- }
- Ok(())
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn verify_command() {
- // Check that the command parsing has been configured in a valid way.
- clap_command().debug_assert();
- }
-}
diff --git a/avmd/tests/avmdtool_test.rs b/avmd/tests/avmdtool_test.rs
deleted file mode 100644
index 4647f06..0000000
--- a/avmd/tests/avmdtool_test.rs
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2022, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Tests for avmdtool.
-
-use std::fs;
-use std::process::Command;
-use tempfile::TempDir;
-
-#[test]
-fn test_dump() {
- let filename = "tests/data/test.avmd";
- assert!(
- fs::metadata(filename).is_ok(),
- "File '{}' does not exist. You can re-create it with:
- avmdtool create {} \\
- --apex-payload microdroid vbmeta tests/data/test.apex \\
- --apk microdroid_manager apk \\
- tests/data/v3-only-with-rsa-pkcs1-sha256-4096.apk \\
- --apk microdroid_manager extra-apk tests/data/v3-only-with-stamp.apk",
- filename,
- filename
- );
- let output = Command::new("./avmdtool").args(["dump", filename]).output().unwrap();
- assert!(output.status.success());
- assert_eq!(output.stdout, fs::read("tests/data/test.avmd.dump").unwrap());
-}
-
-#[test]
-fn test_create() {
- let test_dir = TempDir::new().unwrap();
- let test_file_path = test_dir.path().join("tmp_test.amvd");
- let output = Command::new("./avmdtool")
- .args([
- "create",
- test_file_path.to_str().unwrap(),
- "--apex-payload",
- "microdroid",
- "vbmeta",
- "tests/data/test.apex",
- "--apk",
- "microdroid_manager",
- "apk",
- "tests/data/v3-only-with-rsa-pkcs1-sha256-4096.apk",
- "--apk",
- "microdroid_manager",
- "extra-apk",
- "tests/data/v3-only-with-stamp.apk",
- ])
- .output()
- .unwrap();
- assert!(output.status.success());
- assert_eq!(fs::read(test_file_path).unwrap(), fs::read("tests/data/test.avmd").unwrap());
-}
diff --git a/avmd/tests/data/test.apex b/avmd/tests/data/test.apex
deleted file mode 100644
index fd79365..0000000
--- a/avmd/tests/data/test.apex
+++ /dev/null
Binary files differ
diff --git a/avmd/tests/data/test.avmd b/avmd/tests/data/test.avmd
deleted file mode 100644
index e567125..0000000
--- a/avmd/tests/data/test.avmd
+++ /dev/null
Binary files differ
diff --git a/avmd/tests/data/test.avmd.dump b/avmd/tests/data/test.avmd.dump
deleted file mode 100644
index a63a151..0000000
--- a/avmd/tests/data/test.avmd.dump
+++ /dev/null
@@ -1,16 +0,0 @@
-Descriptors:
- VBMeta descriptor:
- namespace: microdroid
- name: vbmeta
- vbmeta digest: 296e32a76544de9da01713e471403ab4667705ad527bb4f1fac0cf61e7ce122d
- APK descriptor:
- namespace: microdroid_manager
- name: apk
- Signing algorithm ID: 0x103
- APK digest: 0df2426ea33aedaf495d88e5be0c6a1663ff0a81c5ed12d5b2929ae4b4300f2f
- APK descriptor:
- namespace: microdroid_manager
- name: extra-apk
- Signing algorithm ID: 0x201
- APK digest: 626bb647c0089717a7ffa52fd8e845f9403d5e27f7a5a8752e47b3345fb82f5c
-
diff --git a/avmd/tests/data/v3-only-with-rsa-pkcs1-sha256-4096.apk b/avmd/tests/data/v3-only-with-rsa-pkcs1-sha256-4096.apk
deleted file mode 100644
index 0c9391c..0000000
--- a/avmd/tests/data/v3-only-with-rsa-pkcs1-sha256-4096.apk
+++ /dev/null
Binary files differ
diff --git a/avmd/tests/data/v3-only-with-stamp.apk b/avmd/tests/data/v3-only-with-stamp.apk
deleted file mode 100644
index 5f65214..0000000
--- a/avmd/tests/data/v3-only-with-stamp.apk
+++ /dev/null
Binary files differ
diff --git a/compos/aidl/com/android/compos/ICompOsService.aidl b/compos/aidl/com/android/compos/ICompOsService.aidl
index df8c91e..497c35e 100644
--- a/compos/aidl/com/android/compos/ICompOsService.aidl
+++ b/compos/aidl/com/android/compos/ICompOsService.aidl
@@ -87,7 +87,7 @@
/**
* Returns the attestation certificate chain of the current VM. The result is in the form of a
* CBOR encoded Boot Certificate Chain (BCC) as defined in
- * hardware/interfaces/security/dice/aidl/android/hardware/security/dice/Bcc.aidl.
+ * hardware/interfaces/security/rkp/aidl/android/hardware/security/keymint/ProtectedData.aidl
*/
byte[] getAttestationChain();
diff --git a/compos/common/Android.bp b/compos/common/Android.bp
index 35947d7..05bc093 100644
--- a/compos/common/Android.bp
+++ b/compos/common/Android.bp
@@ -12,6 +12,7 @@
"compos_aidl_interface-rust",
"libanyhow",
"libbinder_rs",
+ "libglob",
"liblazy_static",
"liblog_rust",
"libnested_virt",
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index 92c9a3c..bf4c678 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -19,7 +19,7 @@
use crate::timeouts::TIMEOUTS;
use crate::{
get_vm_config_path, BUILD_MANIFEST_APK_PATH, BUILD_MANIFEST_SYSTEM_EXT_APK_PATH,
- COMPOS_APEX_ROOT, COMPOS_DATA_ROOT, COMPOS_VSOCK_PORT,
+ COMPOS_APEX_ROOT, COMPOS_VSOCK_PORT,
};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
CpuTopology::CpuTopology,
@@ -27,12 +27,13 @@
VirtualMachineAppConfig::{DebugLevel::DebugLevel, Payload::Payload, VirtualMachineAppConfig},
VirtualMachineConfig::VirtualMachineConfig,
};
-use anyhow::{bail, Context, Result};
+use anyhow::{anyhow, bail, Context, Result};
use binder::{ParcelFileDescriptor, Strong};
use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
+use glob::glob;
use log::{info, warn};
use rustutils::system_properties;
-use std::fs::{self, File};
+use std::fs::File;
use std::path::{Path, PathBuf};
use vmclient::{DeathReason, ErrorCode, VmInstance, VmWaitError};
@@ -79,7 +80,6 @@
let instance_fd = ParcelFileDescriptor::new(instance_image);
let apex_dir = Path::new(COMPOS_APEX_ROOT);
- let data_dir = Path::new(COMPOS_DATA_ROOT);
let config_apk = locate_config_apk(apex_dir)?;
let apk_fd = File::open(config_apk).context("Failed to open config APK file")?;
@@ -109,18 +109,6 @@
let debug_level = if parameters.debug_mode { DebugLevel::FULL } else { DebugLevel::NONE };
- let (console_fd, log_fd) = if debug_level == DebugLevel::NONE {
- (None, None)
- } else {
- // Console output and the system log output from the VM are redirected to file.
- let console_fd = File::create(data_dir.join("vm_console.log"))
- .context("Failed to create console log file")?;
- let log_fd = File::create(data_dir.join("vm.log"))
- .context("Failed to create system log file")?;
- info!("Running in debug level {:?}", debug_level);
- (Some(console_fd), Some(log_fd))
- };
-
let cpu_topology = match parameters.cpu_topology {
VmCpuTopology::OneCpu => CpuTopology::ONE_CPU,
VmCpuTopology::MatchHost => CpuTopology::MATCH_HOST,
@@ -142,6 +130,8 @@
gdbPort: 0, // Don't start gdb-server
});
+ // Let logs go to logcat.
+ let (console_fd, log_fd) = (None, None);
let callback = Box::new(Callback {});
let instance = VmInstance::create(service, &config, console_fd, log_fd, Some(callback))
.context("Failed to create VM")?;
@@ -194,15 +184,19 @@
// Our config APK will be in a directory under app, but the name of the directory is at the
// discretion of the build system. So just look in each sub-directory until we find it.
// (In practice there will be exactly one directory, so this shouldn't take long.)
- let app_dir = apex_dir.join("app");
- for dir in fs::read_dir(app_dir).context("Reading app dir")? {
- let apk_file = dir?.path().join("CompOSPayloadApp.apk");
- if apk_file.is_file() {
- return Ok(apk_file);
- }
+ let app_glob = apex_dir.join("app").join("**").join("CompOSPayloadApp*.apk");
+ let mut entries: Vec<PathBuf> =
+ glob(app_glob.to_str().ok_or_else(|| anyhow!("Invalid path: {}", app_glob.display()))?)
+ .context("failed to glob")?
+ .filter_map(|e| e.ok())
+ .collect();
+ if entries.len() > 1 {
+ bail!("Found more than one apk matching {}", app_glob.display());
}
-
- bail!("Failed to locate CompOSPayloadApp.apk")
+ match entries.pop() {
+ Some(path) => Ok(path),
+ None => Err(anyhow!("No apks match {}", app_glob.display())),
+ }
}
fn prepare_idsig(
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index cee4b01..b0294dd 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -16,10 +16,13 @@
"libbinder_rs",
"libcompos_common",
"libcomposd_native_rust",
+ "libfsverity_rs",
"libminijail_rust",
"libnix",
"liblibc",
"liblog_rust",
+ "libodsign_proto_rust",
+ "libprotobuf",
"librustutils",
"libshared_child",
"libvmclient",
diff --git a/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl b/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
index 569bba5..a3ce553 100644
--- a/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
+++ b/compos/composd/aidl/android/system/composd/ICompilationTaskCallback.aidl
@@ -25,6 +25,8 @@
CompilationFailed,
/** We ran compilation in the VM, but it reported a problem. */
UnexpectedCompilationResult,
+ /** We failed to enable fs-verity completely to the output artifacts. */
+ FailedToEnableFsverity,
}
/**
diff --git a/compos/composd/src/instance_manager.rs b/compos/composd/src/instance_manager.rs
index 2db13c7..2ce12f8 100644
--- a/compos/composd/src/instance_manager.rs
+++ b/compos/composd/src/instance_manager.rs
@@ -19,16 +19,16 @@
use crate::instance_starter::{CompOsInstance, InstanceStarter};
use android_system_virtualizationservice::aidl::android::system::virtualizationservice;
-use anyhow::{bail, Result};
+use anyhow::{anyhow, bail, Context, Result};
use binder::Strong;
use compos_common::compos_client::{VmCpuTopology, VmParameters};
use compos_common::{CURRENT_INSTANCE_DIR, TEST_INSTANCE_DIR};
+use log::info;
+use rustutils::system_properties;
+use std::str::FromStr;
use std::sync::{Arc, Mutex, Weak};
use virtualizationservice::IVirtualizationService::IVirtualizationService;
-// Enough memory to complete odrefresh in the VM.
-const VM_MEMORY_MIB: i32 = 1280;
-
pub struct InstanceManager {
service: Strong<dyn IVirtualizationService>,
state: Mutex<State>,
@@ -81,12 +81,33 @@
// number of dex2oat threads.
let cpu_topology = VmCpuTopology::MatchHost;
let task_profiles = vec!["SCHED_SP_COMPUTE".to_string()];
- Ok(VmParameters {
- cpu_topology,
- task_profiles,
- memory_mib: Some(VM_MEMORY_MIB),
- ..Default::default()
- })
+ let memory_mib = Some(compos_memory_mib()?);
+ Ok(VmParameters { cpu_topology, task_profiles, memory_mib, ..Default::default() })
+}
+
+fn compos_memory_mib() -> Result<i32> {
+ // Enough memory to complete odrefresh in the VM, for older versions of ART that don't set the
+ // property explicitly.
+ const DEFAULT_MEMORY_MIB: u32 = 400;
+
+ let art_requested_mib =
+ read_property("composd.vm.art.memory_mib.config")?.unwrap_or(DEFAULT_MEMORY_MIB);
+
+ let vm_adjustment_mib = read_property("composd.vm.vendor.memory_mib.config")?.unwrap_or(0);
+
+ info!(
+ "Compilation VM memory: ART requests {art_requested_mib} MiB, \
+ VM adjust is {vm_adjustment_mib}"
+ );
+ art_requested_mib
+ .checked_add_signed(vm_adjustment_mib)
+ .and_then(|x| x.try_into().ok())
+ .context("Invalid vm memory adjustment")
+}
+
+fn read_property<T: FromStr>(name: &str) -> Result<Option<T>> {
+ let str = system_properties::read(name).context("Failed to read {name}")?;
+ str.map(|s| s.parse().map_err(|_| anyhow!("Invalid {name}: {s}"))).transpose()
}
// Ensures we only run one instance at a time.
diff --git a/compos/composd/src/odrefresh_task.rs b/compos/composd/src/odrefresh_task.rs
index 3a699ab..a98f50d 100644
--- a/compos/composd/src/odrefresh_task.rs
+++ b/compos/composd/src/odrefresh_task.rs
@@ -28,11 +28,16 @@
CompilationMode::CompilationMode, ICompOsService, OdrefreshArgs::OdrefreshArgs,
};
use compos_common::odrefresh::{
- is_system_property_interesting, ExitCode, ODREFRESH_OUTPUT_ROOT_DIR,
+ is_system_property_interesting, ExitCode, CURRENT_ARTIFACTS_SUBDIR, ODREFRESH_OUTPUT_ROOT_DIR,
+ PENDING_ARTIFACTS_SUBDIR,
};
+use compos_common::BUILD_MANIFEST_SYSTEM_EXT_APK_PATH;
use log::{error, info, warn};
+use odsign_proto::odsign_info::OdsignInfo;
+use protobuf::Message;
use rustutils::system_properties;
-use std::fs::{remove_dir_all, OpenOptions};
+use std::fs::{remove_dir_all, File, OpenOptions};
+use std::os::fd::AsFd;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{AsRawFd, OwnedFd};
use std::path::Path;
@@ -103,8 +108,21 @@
let result = match exit_code {
Ok(ExitCode::CompilationSuccess) => {
- info!("CompilationSuccess");
- callback.onSuccess()
+ if compilation_mode == CompilationMode::TEST_COMPILE {
+ info!("Compilation success");
+ callback.onSuccess()
+ } else {
+ // compos.info is generated only during NORMAL_COMPILE
+ if let Err(e) = enable_fsverity_to_all() {
+ let message =
+ format!("Unexpected failure when enabling fs-verity: {:?}", e);
+ error!("{}", message);
+ callback.onFailure(FailureReason::FailedToEnableFsverity, &message)
+ } else {
+ info!("Compilation success, fs-verity enabled");
+ callback.onSuccess()
+ }
+ }
}
Ok(exit_code) => {
let message = format!("Unexpected odrefresh result: {:?}", exit_code);
@@ -161,13 +179,20 @@
let output_dir_raw_fd = output_dir_fd.as_raw_fd();
let staging_dir_raw_fd = staging_dir_fd.as_raw_fd();
- // Get the /system_ext FD differently because it may not exist.
- let (system_ext_dir_raw_fd, ro_dir_fds) =
- if let Ok(system_ext_dir_fd) = open_dir(Path::new("/system_ext")) {
- (system_ext_dir_fd.as_raw_fd(), vec![system_dir_fd, system_ext_dir_fd])
- } else {
- (-1, vec![system_dir_fd])
- };
+ // When the VM starts, it starts with or without mouting the extra build manifest APK from
+ // /system_ext. Later on request (here), we need to pass the directory FD of /system_ext, but
+ // only if the VM is configured to need it.
+ //
+ // It is possible to plumb the information from ComposClient to here, but it's extra complexity
+ // and feel slightly weird to encode the VM's state to the task itself, as it is a request to
+ // the VM.
+ let need_system_ext = Path::new(BUILD_MANIFEST_SYSTEM_EXT_APK_PATH).exists();
+ let (system_ext_dir_raw_fd, ro_dir_fds) = if need_system_ext {
+ let system_ext_dir_fd = open_dir(Path::new("/system_ext"))?;
+ (system_ext_dir_fd.as_raw_fd(), vec![system_dir_fd, system_ext_dir_fd])
+ } else {
+ (-1, vec![system_dir_fd])
+ };
// Spawn a fd_server to serve the FDs.
let fd_server_config = FdServerConfig {
@@ -197,6 +222,31 @@
ExitCode::from_i32(exit_code.into())
}
+/// Enable fs-verity to output artifacts according to compos.info in the pending directory. Any
+/// error before the completion will just abort, leaving the previous files enabled.
+fn enable_fsverity_to_all() -> Result<()> {
+ let odrefresh_current_dir = Path::new(ODREFRESH_OUTPUT_ROOT_DIR).join(CURRENT_ARTIFACTS_SUBDIR);
+ let pending_dir = Path::new(ODREFRESH_OUTPUT_ROOT_DIR).join(PENDING_ARTIFACTS_SUBDIR);
+ let mut reader =
+ File::open(&pending_dir.join("compos.info")).context("Failed to open compos.info")?;
+ let compos_info = OdsignInfo::parse_from_reader(&mut reader).context("Failed to parse")?;
+
+ for path_str in compos_info.file_hashes.keys() {
+ // Need to rebase the directory on to compos-pending first
+ if let Ok(relpath) = Path::new(path_str).strip_prefix(&odrefresh_current_dir) {
+ let path = pending_dir.join(relpath);
+ let file = File::open(&path).with_context(|| format!("Failed to open {:?}", path))?;
+ // We don't expect error. But when it happens, don't bother handle it here. For
+ // simplicity, just let odsign do the regular check.
+ fsverity::enable(file.as_fd())
+ .with_context(|| format!("Failed to enable fs-verity to {:?}", path))?;
+ } else {
+ warn!("Skip due to unexpected path: {}", path_str);
+ }
+ }
+ Ok(())
+}
+
/// Returns an `OwnedFD` of the directory.
fn open_dir(path: &Path) -> Result<OwnedFd> {
Ok(OwnedFd::from(
diff --git a/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java b/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
index 479ae7f..933ac7a 100644
--- a/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
+++ b/compos/service/java/com/android/server/compos/IsolatedCompilationJobService.java
@@ -234,6 +234,10 @@
result = IsolatedCompilationMetrics.RESULT_UNEXPECTED_COMPILATION_RESULT;
break;
+ case ICompilationTaskCallback.FailureReason.FailedToEnableFsverity:
+ result = IsolatedCompilationMetrics.RESULT_FAILED_TO_ENABLE_FSVERITY;
+ break;
+
default:
result = IsolatedCompilationMetrics.RESULT_UNKNOWN_FAILURE;
break;
diff --git a/compos/service/java/com/android/server/compos/IsolatedCompilationMetrics.java b/compos/service/java/com/android/server/compos/IsolatedCompilationMetrics.java
index e333198..f7799a4 100644
--- a/compos/service/java/com/android/server/compos/IsolatedCompilationMetrics.java
+++ b/compos/service/java/com/android/server/compos/IsolatedCompilationMetrics.java
@@ -36,9 +36,17 @@
// TODO(b/218525257): Move the definition of these enums to atoms.proto
@Retention(RetentionPolicy.SOURCE)
- @IntDef({RESULT_UNKNOWN, RESULT_SUCCESS, RESULT_UNKNOWN_FAILURE, RESULT_FAILED_TO_START,
- RESULT_JOB_CANCELED, RESULT_COMPILATION_FAILED, RESULT_UNEXPECTED_COMPILATION_RESULT,
- RESULT_COMPOSD_DIED})
+ @IntDef({
+ RESULT_UNKNOWN,
+ RESULT_SUCCESS,
+ RESULT_UNKNOWN_FAILURE,
+ RESULT_FAILED_TO_START,
+ RESULT_JOB_CANCELED,
+ RESULT_COMPILATION_FAILED,
+ RESULT_UNEXPECTED_COMPILATION_RESULT,
+ RESULT_COMPOSD_DIED,
+ RESULT_FAILED_TO_ENABLE_FSVERITY
+ })
public @interface CompilationResult {}
// Keep this in sync with Result enum in IsolatedCompilationEnded in
@@ -59,6 +67,9 @@
.ISOLATED_COMPILATION_ENDED__COMPILATION_RESULT__RESULT_UNEXPECTED_COMPILATION_RESULT;
public static final int RESULT_COMPOSD_DIED =
ArtStatsLog.ISOLATED_COMPILATION_ENDED__COMPILATION_RESULT__RESULT_COMPOSD_DIED;
+ public static final int RESULT_FAILED_TO_ENABLE_FSVERITY =
+ ArtStatsLog
+ .ISOLATED_COMPILATION_ENDED__COMPILATION_RESULT__RESULT_FAILED_TO_ENABLE_FSVERITY;
@Retention(RetentionPolicy.SOURCE)
@IntDef({SCHEDULING_RESULT_UNKNOWN, SCHEDULING_SUCCESS, SCHEDULING_FAILURE})
diff --git a/compos/tests/java/android/compos/test/ComposTestCase.java b/compos/tests/java/android/compos/test/ComposTestCase.java
index 4e3d0a8..8a1b41a 100644
--- a/compos/tests/java/android/compos/test/ComposTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposTestCase.java
@@ -27,7 +27,6 @@
import com.android.microdroid.test.host.CommandRunner;
import com.android.microdroid.test.host.MicrodroidHostTestCaseBase;
-import com.android.tradefed.device.DeviceNotAvailableException;
import com.android.tradefed.log.LogUtil.CLog;
import com.android.tradefed.result.FileInputStreamSource;
import com.android.tradefed.result.LogDataType;
@@ -95,8 +94,6 @@
public void tearDown() throws Exception {
killVmAndReconnectAdb();
- archiveVmLogsThenDelete("teardown");
-
CommandRunner android = new CommandRunner(getDevice());
// Clear up any CompOS instance files we created
@@ -113,19 +110,6 @@
}
}
- private void archiveVmLogsThenDelete(String suffix) throws DeviceNotAvailableException {
- archiveLogThenDelete(
- mTestLogs,
- getDevice(),
- COMPOS_APEXDATA_DIR + "/vm_console.log",
- "vm_console.log-" + suffix + "-" + mTestName.getMethodName());
- archiveLogThenDelete(
- mTestLogs,
- getDevice(),
- COMPOS_APEXDATA_DIR + "/vm.log",
- "vm.log-" + suffix + "-" + mTestName.getMethodName());
- }
-
@Test
public void testOdrefreshSpeed() throws Exception {
getDevice().setProperty(SYSTEM_SERVER_COMPILER_FILTER_PROP_NAME, "speed");
@@ -170,10 +154,6 @@
}
killVmAndReconnectAdb();
- // These logs are potentially useful, capture them before they are overwritten by
- // compos_verify.
- archiveVmLogsThenDelete("compile");
-
// Expect the BCC extracted from the BCC to be well-formed.
assertVmBccIsValid();
diff --git a/demo/README.md b/demo/README.md
index c5c87d8..fa4e38a 100644
--- a/demo/README.md
+++ b/demo/README.md
@@ -8,13 +8,18 @@
## Installing
+You can install the app like this:
```
-adb install -t out/dist/MicrodroidDemoApp.apk
-adb shell pm grant com.android.microdroid.demo android.permission.MANAGE_VIRTUAL_MACHINE
+adb install -t -g out/dist/MicrodroidDemoApp.apk
```
-Don't run the app before granting the permission. Or you will have to uninstall
-the app, and then re-install it.
+(-t allows it to be installed even though it is marked as a test app, -g grants
+the necessary permission.)
+
+You can also explicitly grant or revoke the permission, e.g.
+```
+adb shell pm grant com.android.microdroid.demo android.permission.MANAGE_VIRTUAL_MACHINE
+```
## Running
diff --git a/docs/debug/tracing.md b/docs/debug/tracing.md
new file mode 100644
index 0000000..facd9d0
--- /dev/null
+++ b/docs/debug/tracing.md
@@ -0,0 +1,194 @@
+# Hypervisor & guest tracing
+
+## Hypervisor tracing
+
+Starting with android14-5.15 kernel it is possible to get traces from the hypervisor.
+
+### User space interface
+
+The user space hypervisor tracing interface is located either at /sys/kernel/tracing/hyp or at
+/sys/kernel/debug/tracing/hyp. On the Android phones it will usually be /sys/kernel/tracing/hyp,
+while on QEMU it will be /sys/kernel/debug/tracing/hyp.
+
+The user space interface is very similar to the ftrace user space interface, however there are some
+differences, e.g.:
+
+* Only boot clock is supported, and there is no way for user space to change the tracing_clock.
+* Hypervisor tracing periodically polls the data from the hypervisor, this is different from the
+ regular ftrace instance which pushes the events into the ring buffer.
+* Resetting ring buffers (by clearing the trace file) is only supported when there are no active
+ readers. If the trace file is cleared while there are active readers, then the ring buffers will
+ be cleared after the last reader disconnects.
+* Changing the size of the ring buffer while the tracing session is active is also not supported.
+
+Note: the list above is not exhaustive.
+
+### Perfetto integration
+
+[Perfetto](https://perfetto.dev/docs/) is an open-source stack for performance instrumentation and
+trace analysis widely used in Android. Perfetto supports capturing and visualizing hypervisor
+traces.
+
+#### Capturing hypervisor traces on Android
+
+Consider first familiarizing yourself with Perfetto documentation for recording traces on Android:
+https://perfetto.dev/docs/quickstart/android-tracing.
+
+The [record_android_trace](
+https://cs.android.com/android/platform/superproject/+/master:external/perfetto/tools/record_android_trace)
+script supports a shortcut to capture all hypervisor events that are known to Perfetto:
+
+```shell
+external/perfetto/tools/record_android_trace hyp -t 15s -b 32mb -o /tmp/hyp.pftrace
+```
+
+Alternatively you can use full trace config to capture hypervisor. Example usage:
+
+```shell
+cat<<EOF>config.pbtx
+duration_ms: 10000
+
+buffers: {
+ size_kb: 8960
+ fill_policy: DISCARD
+}
+
+data_sources: {
+ config {
+ name: "linux.ftrace"
+ ftrace_config {
+ instance_name: "hyp"
+ ftrace_events: "hyp/hyp_enter"
+ ftrace_events: "hyp/hyp_exit"
+ }
+ }
+}
+EOF
+
+./record_android_trace -c config.pbtx -o trace_file.perfetto-trace
+```
+
+If you have an Android tree checked out, then record_android_trace helper script can be located at
+${REPO_ROOT}/external/perfetto/tools/record_android_traces. Otherwise, you can download the script
+by following steps outlined in the [Perfetto docs](
+https://perfetto.dev/docs/quickstart/android-tracing#recording-a-trace-through-the-cmdline)
+
+#### Capturing hypervisor traces on QEMU
+
+Perfetto supports capturing traces on Linux: https://perfetto.dev/docs/quickstart/linux-tracing.
+However, since pKVM hypervisor is only supported on arm64, you will need to cross-compile Perfetto
+binaries for linux-arm64 (unless you have an arm64 workstation).
+
+1. Checkout Perfetto repository: https://perfetto.dev/docs/contributing/getting-started
+2. Follow https://perfetto.dev/docs/contributing/build-instructions#cross-compiling-for-linux-arm-64
+ to compile Perfetto binaries for arm64 architecture.
+3. Copy the tracebox binary to QEMU
+4. Run `tracebox` binary on QEMU to capture traces, it's interface is very similar to the
+`record_android_trace` binary. E.g. to capture all hypervisor events run:
+```shell
+tracebox -t 15s -b 32mb hyp
+```
+
+### Analysing traces using SQL
+
+On top of visualisation, Perfetto also provides a SQL interface to analyse traces. More
+documentation is available at https://perfetto.dev/docs/quickstart/trace-analysis and
+https://perfetto.dev/docs/analysis/trace-processor.
+
+Hypervisor events can be queried via `pkvm_hypervisor_events` SQL view. You can load that view by
+calling `SELECT IMPORT("pkvm.hypervisor");`, e.g.:
+
+```sql
+SELECT IMPORT("pkvm.hypervisor");
+SELECT * FROM pkvm_hypervisor_events limit 5;
+```
+
+Below are some SQL queries that might be useful when analysing hypervisor traces.
+
+**What is the longest time CPU spent in hypervisor, grouped by the reason to enter hypervisor**
+```sql
+SELECT IMPORT("pkvm.hypervisor");
+
+SELECT
+ cpu,
+ reason,
+ ts,
+ dur
+FROM pkvm_hypervisor_events
+JOIN (
+ SELECT
+ MAX(dur) as dur2,
+ cpu as cpu2,
+ reason as reason2
+ FROM pkvm_hypervisor_events
+ GROUP BY 2, 3) AS sc
+ON
+ cpu = sc.cpu2
+ AND dur = sc.dur2
+ AND (reason = sc.reason2 OR (reason IS NULL AND sc.reason2 IS NULL))
+ORDER BY dur desc;
+```
+
+**What are the 10 longest times CPU spent in hypervisor because of host_mem_abort**
+```sql
+SELECT
+ hyp.dur as dur,
+ hyp.ts as ts,
+ EXTRACT_ARG(slices.arg_set_id, 'esr') as esr,
+ EXTRACT_ARG(slices.arg_set_id, 'addr') as addr
+FROM pkvm_hypervisor_events as hyp
+JOIN slices
+ON hyp.slice_id = slices.id
+WHERE hyp.reason = 'host_mem_abort'
+ORDER BY dur desc
+LIMIT 10;
+```
+
+## Microdroid VM tracing
+
+IMPORTANT: Tracing is only supported for debuggable Microdroid VMs.
+
+### Capturing trace in Microdroid
+
+Starting with Android U, Microdroid contains Perfetto tracing binaries, which makes it possible to
+capture traces inside Microdroid VM using Perfetto stack. The commands used to capture traces on
+Android should work for Microdroid VM as well, with a difference that Perfetto's tracing binaries
+are not enabled in Microdroid by default, so you need to manually start them by setting
+`persist.traced.enable` system property to `1`.
+
+Here is a quick example on how trace Microdroid VM:
+
+1. First start your VM. For this example we are going to use
+`adb shell /apex/com.android.virt/bin/vm run-microdroid`.
+
+2. Set up an adb connection with the running VM:
+```shell
+adb shell forward tcp:9876 vsock:${CID}:5555
+adb connect localhost:9876
+adb -s localhost:9876 root
+```
+Where `${CID}` corresponds to the running Microdroid VM that you want to establish adb connection
+with. List of running VMs can be obtained by running `adb shell /apex/com.android.virt/bin/vm list`.
+Alternatively you can use `vm_shell` utility to connect to a running VM, i.e.: `vm_shell connect`.
+
+3. Start Perfetto daemons and capture trace
+```shell
+adb -s localhost:9876 shell setprop persist.traced.enable 1
+${ANDROID_BULD_TOP}/external/perfetto/tools/record_android_trace \
+ -s localhost:9876 \
+ -o /tmp/microdroid-trace-file.pftrace \
+ -t 10s \
+ -b 32mb \
+ sched/sched_switch task/task_newtask sched/sched_process_exit
+```
+
+If you don't have Android repo checked out, then you can download the record_android_trace script by
+following the following [instructions](
+https://perfetto.dev/docs/quickstart/android-tracing#recording-a-trace-through-the-cmdline)
+
+More documentation on Perfetto's tracing on Android is available here:
+https://perfetto.dev/docs/quickstart/android-tracing
+
+### Capturing Microdroid boot trace
+
+TODO(b/271412868): Stay tuned, more docs are coming soon!
diff --git a/encryptedstore/src/main.rs b/encryptedstore/src/main.rs
index 7a41f13..86fa6da 100644
--- a/encryptedstore/src/main.rs
+++ b/encryptedstore/src/main.rs
@@ -125,9 +125,13 @@
fn format_ext4(device: &Path) -> Result<()> {
let mkfs_options = [
- "-j", // Create appropriate sized journal
- "-O metadata_csum", // Metadata checksum for filesystem integrity
- "-b 4096", // block size in the filesystem
+ "-j", // Create appropriate sized journal
+ /* metadata_csum: enabled for filesystem integrity
+ * extents: Not enabling extents reduces the coverage of metadata checksumming.
+ * 64bit: larger fields afforded by this feature enable full-strength checksumming.
+ */
+ "-O metadata_csum, extents, 64bit",
+ "-b 4096", // block size in the filesystem
];
let mut cmd = Command::new(MK2FS_BIN);
let status = cmd
diff --git a/javalib/jni/android_system_virtualmachine_VirtualizationService.cpp b/javalib/jni/android_system_virtualmachine_VirtualizationService.cpp
index bd80880..fbd1fd5 100644
--- a/javalib/jni/android_system_virtualmachine_VirtualizationService.cpp
+++ b/javalib/jni/android_system_virtualmachine_VirtualizationService.cpp
@@ -29,7 +29,7 @@
using namespace android::base;
static constexpr const char VIRTMGR_PATH[] = "/apex/com.android.virt/bin/virtmgr";
-static constexpr size_t VIRTMGR_THREADS = 16;
+static constexpr size_t VIRTMGR_THREADS = 2;
extern "C" JNIEXPORT jint JNICALL
Java_android_system_virtualmachine_VirtualizationService_nativeSpawn(
@@ -83,7 +83,6 @@
ARpcSession_setFileDescriptorTransportMode(session.get(),
ARpcSession_FileDescriptorTransportMode::Unix);
ARpcSession_setMaxIncomingThreads(session.get(), VIRTMGR_THREADS);
- ARpcSession_setMaxOutgoingThreads(session.get(), VIRTMGR_THREADS);
// SAFETY - ARpcSession_setupUnixDomainBootstrapClient does not take ownership of clientFd.
auto client = ARpcSession_setupUnixDomainBootstrapClient(session.get(), clientFd);
return AIBinder_toJavaBinder(env, client);
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachine.java b/javalib/src/android/system/virtualmachine/VirtualMachine.java
index 5f39b1c..7713faf 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachine.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachine.java
@@ -459,7 +459,7 @@
}
}
- IVirtualizationService service = vm.mVirtualizationService.connect();
+ IVirtualizationService service = vm.mVirtualizationService.getBinder();
try {
service.initializeWritablePartition(
@@ -785,7 +785,7 @@
throw new VirtualMachineException("Failed to create APK signature file", e);
}
- IVirtualizationService service = mVirtualizationService.connect();
+ IVirtualizationService service = mVirtualizationService.getBinder();
try {
if (mVmOutputCaptured) {
diff --git a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
index 668d7dc..93e65db 100644
--- a/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
+++ b/javalib/src/android/system/virtualmachine/VirtualMachineConfig.java
@@ -31,11 +31,13 @@
import android.content.Context;
import android.content.pm.ApplicationInfo;
import android.content.pm.PackageManager;
+import android.os.Build;
import android.os.ParcelFileDescriptor;
import android.os.PersistableBundle;
import android.sysprop.HypervisorProperties;
import android.system.virtualizationservice.VirtualMachineAppConfig;
import android.system.virtualizationservice.VirtualMachinePayloadConfig;
+import android.util.Log;
import java.io.File;
import java.io.FileInputStream;
@@ -47,6 +49,7 @@
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.util.Objects;
+import java.util.zip.ZipFile;
/**
* Represents a configuration of a virtual machine. A configuration consists of hardware
@@ -57,6 +60,7 @@
*/
@SystemApi
public final class VirtualMachineConfig {
+ private static final String TAG = "VirtualMachineConfig";
private static final String[] EMPTY_STRING_ARRAY = {};
// These define the schema of the config file persisted on disk.
@@ -296,8 +300,7 @@
/**
* Returns the absolute path of the APK which should contain the binary payload that will
- * execute within the VM. Returns null if no specific path has been set, so the primary APK will
- * be used.
+ * execute within the VM. Returns null if no specific path has been set.
*
* @hide
*/
@@ -445,18 +448,7 @@
throws VirtualMachineException {
VirtualMachineAppConfig vsConfig = new VirtualMachineAppConfig();
- String apkPath = mApkPath;
- if (apkPath == null) {
- try {
- ApplicationInfo appInfo =
- packageManager.getApplicationInfo(
- mPackageName, PackageManager.ApplicationInfoFlags.of(0));
- // This really is the path to the APK, not a directory.
- apkPath = appInfo.sourceDir;
- } catch (PackageManager.NameNotFoundException e) {
- throw new VirtualMachineException("Package not found", e);
- }
- }
+ String apkPath = (mApkPath != null) ? mApkPath : findPayloadApk(packageManager);
try {
vsConfig.apk = ParcelFileDescriptor.open(new File(apkPath), MODE_READ_ONLY);
@@ -495,6 +487,45 @@
return vsConfig;
}
+ private String findPayloadApk(PackageManager packageManager) throws VirtualMachineException {
+ ApplicationInfo appInfo;
+ try {
+ appInfo =
+ packageManager.getApplicationInfo(
+ mPackageName, PackageManager.ApplicationInfoFlags.of(0));
+ } catch (PackageManager.NameNotFoundException e) {
+ throw new VirtualMachineException("Package not found", e);
+ }
+
+ String[] splitApkPaths = appInfo.splitSourceDirs;
+ String[] abis = Build.SUPPORTED_64_BIT_ABIS;
+
+ // If there are split APKs, and we know the payload binary name, see if we can find a
+ // split APK containing the binary.
+ if (mPayloadBinaryName != null && splitApkPaths != null && abis.length != 0) {
+ String[] libraryNames = new String[abis.length];
+ for (int i = 0; i < abis.length; i++) {
+ libraryNames[i] = "lib/" + abis[i] + "/" + mPayloadBinaryName;
+ }
+
+ for (String path : splitApkPaths) {
+ try (ZipFile zip = new ZipFile(path)) {
+ for (String name : libraryNames) {
+ if (zip.getEntry(name) != null) {
+ Log.i(TAG, "Found payload in " + path);
+ return path;
+ }
+ }
+ } catch (IOException e) {
+ Log.w(TAG, "Failed to scan split APK: " + path, e);
+ }
+ }
+ }
+
+ // This really is the path to the APK, not a directory.
+ return appInfo.sourceDir;
+ }
+
private int bytesToMebiBytes(long mMemoryBytes) {
long oneMebi = 1024 * 1024;
// We can't express requests for more than 2 exabytes, but then they're not going to succeed
@@ -596,7 +627,8 @@
/**
* Sets the absolute path of the APK containing the binary payload that will execute within
- * the VM. If not set explicitly, defaults to the primary APK of the context.
+ * the VM. If not set explicitly, defaults to the split APK containing the payload, if there
+ * is one, and otherwise the primary APK of the context.
*
* @hide
*/
diff --git a/javalib/src/android/system/virtualmachine/VirtualizationService.java b/javalib/src/android/system/virtualmachine/VirtualizationService.java
index c3f2ba3..1cf97b5 100644
--- a/javalib/src/android/system/virtualmachine/VirtualizationService.java
+++ b/javalib/src/android/system/virtualmachine/VirtualizationService.java
@@ -41,6 +41,9 @@
*/
private final ParcelFileDescriptor mClientFd;
+ /* Persistent connection to IVirtualizationService. */
+ private final IVirtualizationService mBinder;
+
private static native int nativeSpawn();
private native IBinder nativeConnect(int clientFd);
@@ -57,15 +60,18 @@
throw new VirtualMachineException("Could not spawn VirtualizationService");
}
mClientFd = ParcelFileDescriptor.adoptFd(clientFd);
- }
- /* Connects to the VirtualizationService AIDL service. */
- public IVirtualizationService connect() throws VirtualMachineException {
IBinder binder = nativeConnect(mClientFd.getFd());
if (binder == null) {
throw new VirtualMachineException("Could not connect to VirtualizationService");
}
- return IVirtualizationService.Stub.asInterface(binder);
+ mBinder = IVirtualizationService.Stub.asInterface(binder);
+ }
+
+ /* Returns the IVirtualizationService binder. */
+ @NonNull
+ IVirtualizationService getBinder() {
+ return mBinder;
}
/*
diff --git a/libs/apkverify/Android.bp b/libs/apkverify/Android.bp
index e556842..83dbff6 100644
--- a/libs/apkverify/Android.bp
+++ b/libs/apkverify/Android.bp
@@ -45,7 +45,9 @@
edition: "2021",
test_suites: ["general-tests"],
rustlibs: [
+ "libandroid_logger",
"libapkverify",
+ "liblog_rust",
"libzip",
],
data: ["tests/data/*"],
diff --git a/libs/apkverify/src/algorithms.rs b/libs/apkverify/src/algorithms.rs
index 442b47c..c05ab38 100644
--- a/libs/apkverify/src/algorithms.rs
+++ b/libs/apkverify/src/algorithms.rs
@@ -204,9 +204,10 @@
}
/// Hash algorithms.
-#[derive(Clone, Copy, Debug, PartialEq, Eq, FromPrimitive, ToPrimitive)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, FromPrimitive, ToPrimitive, Default)]
#[repr(u32)]
pub enum HashAlgorithm {
+ #[default]
/// SHA-256
SHA256 = 1,
}
@@ -217,9 +218,3 @@
Self::from_u32(val).context(format!("Unsupported hash algorithm: {}", val))
}
}
-
-impl Default for HashAlgorithm {
- fn default() -> Self {
- HashAlgorithm::SHA256
- }
-}
diff --git a/libs/apkverify/src/v3.rs b/libs/apkverify/src/v3.rs
index e1b728d..6082422 100644
--- a/libs/apkverify/src/v3.rs
+++ b/libs/apkverify/src/v3.rs
@@ -24,7 +24,7 @@
use openssl::x509::X509;
use std::fs::File;
use std::io::{Read, Seek};
-use std::ops::Range;
+use std::ops::RangeInclusive;
use std::path::Path;
use crate::algorithms::SignatureAlgorithmID;
@@ -33,11 +33,9 @@
pub const APK_SIGNATURE_SCHEME_V3_BLOCK_ID: u32 = 0xf05368c0;
-// TODO(b/190343842): get "ro.build.version.sdk"
-const SDK_INT: u32 = 31;
-
type Signers = LengthPrefixed<Vec<LengthPrefixed<Signer>>>;
+#[derive(Debug)]
pub(crate) struct Signer {
signed_data: LengthPrefixed<Bytes>, // not verified yet
min_sdk: u32,
@@ -47,8 +45,8 @@
}
impl Signer {
- fn sdk_range(&self) -> Range<u32> {
- self.min_sdk..self.max_sdk
+ fn sdk_range(&self) -> RangeInclusive<u32> {
+ self.min_sdk..=self.max_sdk
}
}
@@ -62,8 +60,8 @@
}
impl SignedData {
- fn sdk_range(&self) -> Range<u32> {
- self.min_sdk..self.max_sdk
+ fn sdk_range(&self) -> RangeInclusive<u32> {
+ self.min_sdk..=self.max_sdk
}
fn find_digest_by_algorithm(&self, algorithm_id: SignatureAlgorithmID) -> Result<&Digest> {
@@ -92,32 +90,30 @@
/// Verifies APK Signature Scheme v3 signatures of the provided APK and returns the public key
/// associated with the signer in DER format.
-pub fn verify<P: AsRef<Path>>(apk_path: P) -> Result<Box<[u8]>> {
+pub fn verify<P: AsRef<Path>>(apk_path: P, current_sdk: u32) -> Result<Box<[u8]>> {
let apk = File::open(apk_path.as_ref())?;
- let (signer, mut sections) = extract_signer_and_apk_sections(apk)?;
+ let (signer, mut sections) = extract_signer_and_apk_sections(apk, current_sdk)?;
signer.verify(&mut sections)
}
/// Gets the public key (in DER format) that was used to sign the given APK/APEX file
-pub fn get_public_key_der<P: AsRef<Path>>(apk_path: P) -> Result<Box<[u8]>> {
+pub fn get_public_key_der<P: AsRef<Path>>(apk_path: P, current_sdk: u32) -> Result<Box<[u8]>> {
let apk = File::open(apk_path.as_ref())?;
- let (signer, _) = extract_signer_and_apk_sections(apk)?;
+ let (signer, _) = extract_signer_and_apk_sections(apk, current_sdk)?;
Ok(signer.public_key.public_key_to_der()?.into_boxed_slice())
}
pub(crate) fn extract_signer_and_apk_sections<R: Read + Seek>(
apk: R,
+ current_sdk: u32,
) -> Result<(Signer, ApkSections<R>)> {
let mut sections = ApkSections::new(apk)?;
let mut block = sections.find_signature(APK_SIGNATURE_SCHEME_V3_BLOCK_ID).context(
"Fallback to v2 when v3 block not found is not yet implemented.", // b/197052981
)?;
- let mut supported = block
- .read::<Signers>()?
- .into_inner()
- .into_iter()
- .filter(|s| s.sdk_range().contains(&SDK_INT))
- .collect::<Vec<_>>();
+ let signers = block.read::<Signers>()?.into_inner();
+ let mut supported =
+ signers.into_iter().filter(|s| s.sdk_range().contains(¤t_sdk)).collect::<Vec<_>>();
ensure!(
supported.len() == 1,
"APK Signature Scheme V3 only supports one signer: {} signers found.",
diff --git a/libs/apkverify/src/v4.rs b/libs/apkverify/src/v4.rs
index 94abf99..045f4af 100644
--- a/libs/apkverify/src/v4.rs
+++ b/libs/apkverify/src/v4.rs
@@ -37,9 +37,10 @@
/// [apk_digest]: https://source.android.com/docs/security/apksigning/v4#apk-digest
pub fn get_apk_digest<R: Read + Seek>(
apk: R,
+ current_sdk: u32,
verify: bool,
) -> Result<(SignatureAlgorithmID, Box<[u8]>)> {
- let (signer, mut sections) = extract_signer_and_apk_sections(apk)?;
+ let (signer, mut sections) = extract_signer_and_apk_sections(apk, current_sdk)?;
let strongest_algorithm_id = signer
.strongest_signature()?
.signature_algorithm_id
@@ -104,9 +105,10 @@
}
/// Version of the idsig file format
-#[derive(Debug, PartialEq, Eq, FromPrimitive, ToPrimitive)]
+#[derive(Debug, PartialEq, Eq, FromPrimitive, ToPrimitive, Default)]
#[repr(u32)]
pub enum Version {
+ #[default]
/// Version 2, the only supported version.
V2 = 2,
}
@@ -117,12 +119,6 @@
}
}
-impl Default for Version {
- fn default() -> Self {
- Version::V2
- }
-}
-
impl V4Signature<fs::File> {
/// Creates a `V4Signature` struct from the given idsig path.
pub fn from_idsig_path<P: AsRef<Path>>(idsig_path: P) -> Result<Self> {
@@ -153,6 +149,7 @@
/// function OOMing.
pub fn create(
mut apk: &mut R,
+ current_sdk: u32,
block_size: usize,
salt: &[u8],
algorithm: HashAlgorithm,
@@ -180,7 +177,8 @@
ret.hashing_info.log2_blocksize = log2(block_size);
apk.seek(SeekFrom::Start(start))?;
- let (signature_algorithm_id, apk_digest) = get_apk_digest(apk, /*verify=*/ false)?;
+ let (signature_algorithm_id, apk_digest) =
+ get_apk_digest(apk, current_sdk, /*verify=*/ false)?;
ret.signing_info.signature_algorithm_id = signature_algorithm_id;
ret.signing_info.apk_digest = apk_digest;
// TODO(jiyong): add a signature to the signing_info struct
@@ -367,8 +365,9 @@
#[test]
fn digest_from_apk() {
let mut input = Cursor::new(include_bytes!("../tests/data/v4-digest-v3-Sha256withEC.apk"));
+ let current_sdk = 31;
let mut created =
- V4Signature::create(&mut input, 4096, &[], HashAlgorithm::SHA256).unwrap();
+ V4Signature::create(&mut input, current_sdk, 4096, &[], HashAlgorithm::SHA256).unwrap();
let mut golden = V4Signature::from_idsig_path(format!("{}.idsig", TEST_APK_PATH)).unwrap();
diff --git a/libs/apkverify/tests/apkverify_test.rs b/libs/apkverify/tests/apkverify_test.rs
index baf7c42..52e1da4 100644
--- a/libs/apkverify/tests/apkverify_test.rs
+++ b/libs/apkverify/tests/apkverify_test.rs
@@ -17,16 +17,30 @@
use apkverify::{
get_apk_digest, get_public_key_der, testing::assert_contains, verify, SignatureAlgorithmID,
};
+use log::info;
use std::{fs, matches, path::Path};
const KEY_NAMES_DSA: &[&str] = &["1024", "2048", "3072"];
const KEY_NAMES_ECDSA: &[&str] = &["p256", "p384", "p521"];
const KEY_NAMES_RSA: &[&str] = &["1024", "2048", "3072", "4096", "8192", "16384"];
+const SDK_INT: u32 = 31;
+
+/// Make sure any logging from the code under test ends up in logcat.
+fn setup() {
+ android_logger::init_once(
+ android_logger::Config::default()
+ .with_tag("apkverify_test")
+ .with_min_level(log::Level::Info),
+ );
+ info!("Test starting");
+}
+
#[test]
fn test_verify_truncated_cd() {
+ setup();
use zip::result::ZipError;
- let res = verify("tests/data/v2-only-truncated-cd.apk");
+ let res = verify("tests/data/v2-only-truncated-cd.apk", SDK_INT);
// TODO(b/190343842): consider making a helper for err assertion
assert!(matches!(
res.unwrap_err().root_cause().downcast_ref::<ZipError>().unwrap(),
@@ -36,13 +50,15 @@
#[test]
fn apex_signed_with_v3_rsa_pkcs1_sha512_is_valid() {
+ setup();
validate_apk("tests/data/test.apex", SignatureAlgorithmID::RsaPkcs1V15WithSha512);
}
#[test]
fn apks_signed_with_v3_dsa_sha256_are_not_supported() {
+ setup();
for key_name in KEY_NAMES_DSA.iter() {
- let res = verify(format!("tests/data/v3-only-with-dsa-sha256-{}.apk", key_name));
+ let res = verify(format!("tests/data/v3-only-with-dsa-sha256-{}.apk", key_name), SDK_INT);
assert!(res.is_err(), "DSA algorithm is not supported for verification. See b/197052981.");
assert_contains(&res.unwrap_err().to_string(), "No supported APK signatures found");
}
@@ -50,6 +66,7 @@
#[test]
fn apks_signed_with_v3_ecdsa_sha256_are_valid() {
+ setup();
for key_name in KEY_NAMES_ECDSA.iter() {
validate_apk(
format!("tests/data/v3-only-with-ecdsa-sha256-{}.apk", key_name),
@@ -60,6 +77,7 @@
#[test]
fn apks_signed_with_v3_ecdsa_sha512_are_valid() {
+ setup();
for key_name in KEY_NAMES_ECDSA.iter() {
validate_apk(
format!("tests/data/v3-only-with-ecdsa-sha512-{}.apk", key_name),
@@ -70,6 +88,7 @@
#[test]
fn apks_signed_with_v3_rsa_pkcs1_sha256_are_valid() {
+ setup();
for key_name in KEY_NAMES_RSA.iter() {
validate_apk(
format!("tests/data/v3-only-with-rsa-pkcs1-sha256-{}.apk", key_name),
@@ -80,6 +99,7 @@
#[test]
fn apks_signed_with_v3_rsa_pkcs1_sha512_are_valid() {
+ setup();
for key_name in KEY_NAMES_RSA.iter() {
validate_apk(
format!("tests/data/v3-only-with-rsa-pkcs1-sha512-{}.apk", key_name),
@@ -89,13 +109,35 @@
}
#[test]
+fn test_verify_v3_sig_min_max_sdk() {
+ setup();
+ // The Signer for this APK has min_sdk=24, max_sdk=32.
+ let path = "tests/data/v31-rsa-2048_2-tgt-33-1-tgt-28.apk";
+
+ let res = verify(path, 23);
+ assert!(res.is_err());
+ assert_contains(&res.unwrap_err().to_string(), "0 signers found");
+
+ let res = verify(path, 24);
+ assert!(res.is_ok());
+
+ let res = verify(path, 32);
+ assert!(res.is_ok());
+
+ let res = verify(path, 33);
+ assert!(res.is_err());
+ assert_contains(&res.unwrap_err().to_string(), "0 signers found");
+}
+
+#[test]
fn test_verify_v3_sig_does_not_verify() {
+ setup();
let path_list = [
"tests/data/v3-only-with-ecdsa-sha512-p521-sig-does-not-verify.apk",
"tests/data/v3-only-with-rsa-pkcs1-sha256-3072-sig-does-not-verify.apk",
];
for path in path_list.iter() {
- let res = verify(path);
+ let res = verify(path, SDK_INT);
assert!(res.is_err());
assert_contains(&res.unwrap_err().to_string(), "Signature is invalid");
}
@@ -103,22 +145,28 @@
#[test]
fn test_verify_v3_digest_mismatch() {
- let res = verify("tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk");
+ setup();
+ let res = verify("tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk", SDK_INT);
assert!(res.is_err());
assert_contains(&res.unwrap_err().to_string(), "Digest mismatch");
}
#[test]
fn test_verify_v3_wrong_apk_sig_block_magic() {
- let res = verify("tests/data/v3-only-with-ecdsa-sha512-p384-wrong-apk-sig-block-magic.apk");
+ setup();
+ let res =
+ verify("tests/data/v3-only-with-ecdsa-sha512-p384-wrong-apk-sig-block-magic.apk", SDK_INT);
assert!(res.is_err());
assert_contains(&res.unwrap_err().to_string(), "No APK Signing Block");
}
#[test]
fn test_verify_v3_apk_sig_block_size_mismatch() {
- let res =
- verify("tests/data/v3-only-with-rsa-pkcs1-sha512-4096-apk-sig-block-size-mismatch.apk");
+ setup();
+ let res = verify(
+ "tests/data/v3-only-with-rsa-pkcs1-sha512-4096-apk-sig-block-size-mismatch.apk",
+ SDK_INT,
+ );
assert!(res.is_err());
assert_contains(
&res.unwrap_err().to_string(),
@@ -128,35 +176,40 @@
#[test]
fn test_verify_v3_cert_and_public_key_mismatch() {
- let res = verify("tests/data/v3-only-cert-and-public-key-mismatch.apk");
+ setup();
+ let res = verify("tests/data/v3-only-cert-and-public-key-mismatch.apk", SDK_INT);
assert!(res.is_err());
assert_contains(&res.unwrap_err().to_string(), "Public key mismatch");
}
#[test]
fn test_verify_v3_empty() {
- let res = verify("tests/data/v3-only-empty.apk");
+ setup();
+ let res = verify("tests/data/v3-only-empty.apk", SDK_INT);
assert!(res.is_err());
assert_contains(&res.unwrap_err().to_string(), "APK too small for APK Signing Block");
}
#[test]
fn test_verify_v3_no_certs_in_sig() {
- let res = verify("tests/data/v3-only-no-certs-in-sig.apk");
+ setup();
+ let res = verify("tests/data/v3-only-no-certs-in-sig.apk", SDK_INT);
assert!(res.is_err());
assert_contains(&res.unwrap_err().to_string(), "No certificates listed");
}
#[test]
fn test_verify_v3_no_supported_sig_algs() {
- let res = verify("tests/data/v3-only-no-supported-sig-algs.apk");
+ setup();
+ let res = verify("tests/data/v3-only-no-supported-sig-algs.apk", SDK_INT);
assert!(res.is_err());
assert_contains(&res.unwrap_err().to_string(), "No supported APK signatures found");
}
#[test]
fn test_verify_v3_signatures_and_digests_block_mismatch() {
- let res = verify("tests/data/v3-only-signatures-and-digests-block-mismatch.apk");
+ setup();
+ let res = verify("tests/data/v3-only-signatures-and-digests-block-mismatch.apk", SDK_INT);
assert!(res.is_err());
assert_contains(
&res.unwrap_err().to_string(),
@@ -166,6 +219,7 @@
#[test]
fn apk_signed_with_v3_unknown_additional_attr_is_valid() {
+ setup();
validate_apk(
"tests/data/v3-only-unknown-additional-attr.apk",
SignatureAlgorithmID::RsaPkcs1V15WithSha256,
@@ -174,6 +228,7 @@
#[test]
fn apk_signed_with_v3_unknown_pair_in_apk_sig_block_is_valid() {
+ setup();
validate_apk(
"tests/data/v3-only-unknown-pair-in-apk-sig-block.apk",
SignatureAlgorithmID::RsaPkcs1V15WithSha256,
@@ -182,6 +237,7 @@
#[test]
fn apk_signed_with_v3_ignorable_unsupported_sig_algs_is_valid() {
+ setup();
validate_apk(
"tests/data/v3-only-with-ignorable-unsupported-sig-algs.apk",
SignatureAlgorithmID::RsaPkcs1V15WithSha256,
@@ -190,6 +246,7 @@
#[test]
fn apk_signed_with_v3_stamp_is_valid() {
+ setup();
validate_apk("tests/data/v3-only-with-stamp.apk", SignatureAlgorithmID::EcdsaWithSha256);
}
@@ -203,14 +260,14 @@
/// * public key extracted from apk without verification
/// * expected public key from the corresponding .der file
fn validate_apk_public_key<P: AsRef<Path>>(apk_path: P) {
- let public_key_from_verification = verify(&apk_path);
+ let public_key_from_verification = verify(&apk_path, SDK_INT);
let public_key_from_verification =
public_key_from_verification.expect("Error in verification result");
let expected_public_key_path = format!("{}.der", apk_path.as_ref().to_str().unwrap());
assert_bytes_eq_to_data_in_file(&public_key_from_verification, expected_public_key_path);
- let public_key_from_apk = get_public_key_der(&apk_path);
+ let public_key_from_apk = get_public_key_der(&apk_path, SDK_INT);
let public_key_from_apk =
public_key_from_apk.expect("Error when extracting public key from apk");
assert_eq!(
@@ -226,15 +283,17 @@
fn validate_apk_digest<P: AsRef<Path>>(apk_path: P, expected_algorithm_id: SignatureAlgorithmID) {
let apk = fs::File::open(&apk_path).expect("Unabled to open apk file");
- let (verified_algorithm_id, verified_digest) = get_apk_digest(&apk, /*verify=*/ true)
- .expect("Error when extracting apk digest with verification.");
+ let (verified_algorithm_id, verified_digest) =
+ get_apk_digest(&apk, SDK_INT, /*verify=*/ true)
+ .expect("Error when extracting apk digest with verification.");
assert_eq!(expected_algorithm_id, verified_algorithm_id);
let expected_digest_path = format!("{}.apk_digest", apk_path.as_ref().to_str().unwrap());
assert_bytes_eq_to_data_in_file(&verified_digest, expected_digest_path);
- let (unverified_algorithm_id, unverified_digest) = get_apk_digest(&apk, /*verify=*/ false)
- .expect("Error when extracting apk digest without verification.");
+ let (unverified_algorithm_id, unverified_digest) =
+ get_apk_digest(&apk, SDK_INT, /*verify=*/ false)
+ .expect("Error when extracting apk digest without verification.");
assert_eq!(expected_algorithm_id, unverified_algorithm_id);
assert_eq!(verified_digest, unverified_digest);
}
diff --git a/libs/apkverify/tests/data/v31-rsa-2048_2-tgt-33-1-tgt-28.apk b/libs/apkverify/tests/data/v31-rsa-2048_2-tgt-33-1-tgt-28.apk
new file mode 100644
index 0000000..aeaec33
--- /dev/null
+++ b/libs/apkverify/tests/data/v31-rsa-2048_2-tgt-33-1-tgt-28.apk
Binary files differ
diff --git a/libs/devicemapper/src/lib.rs b/libs/devicemapper/src/lib.rs
index 4cf4e99..fec0114 100644
--- a/libs/devicemapper/src/lib.rs
+++ b/libs/devicemapper/src/lib.rs
@@ -227,8 +227,8 @@
let context = Context::new(0);
let now = SystemTime::now().duration_since(UNIX_EPOCH)?;
let ts = Timestamp::from_unix(context, now.as_secs(), now.subsec_nanos());
- let uuid = Uuid::new_v1(ts, node_id)?;
- Ok(String::from(uuid.to_hyphenated().encode_lower(&mut Uuid::encode_buffer())))
+ let uuid = Uuid::new_v1(ts, node_id.try_into()?);
+ Ok(String::from(uuid.hyphenated().encode_lower(&mut Uuid::encode_buffer())))
}
#[cfg(test)]
diff --git a/libs/fdtpci/src/lib.rs b/libs/fdtpci/src/lib.rs
index e32e16d..96d98d6 100644
--- a/libs/fdtpci/src/lib.rs
+++ b/libs/fdtpci/src/lib.rs
@@ -197,24 +197,32 @@
Ok(memory_address..memory_address + memory_size)
}
+/// Encodes memory flags of a PCI range
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-struct PciMemoryFlags(u32);
+pub struct PciMemoryFlags(pub u32);
impl PciMemoryFlags {
+ /// Returns whether this PCI range is prefetchable
pub fn prefetchable(self) -> bool {
self.0 & 0x80000000 != 0
}
+ /// Returns the type of this PCI range
pub fn range_type(self) -> PciRangeType {
PciRangeType::from((self.0 & 0x3000000) >> 24)
}
}
+/// Type of a PCI range
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-enum PciRangeType {
+pub enum PciRangeType {
+ /// Range represents the PCI configuration space
ConfigurationSpace,
+ /// Range is on IO space
IoSpace,
+ /// Range is on 32-bit MMIO space
Memory32,
+ /// Range is on 64-bit MMIO space
Memory64,
}
diff --git a/libs/libfdt/src/iterators.rs b/libs/libfdt/src/iterators.rs
index a7ea0ee..05fdb4a 100644
--- a/libs/libfdt/src/iterators.rs
+++ b/libs/libfdt/src/iterators.rs
@@ -85,6 +85,31 @@
}
}
+// Converts two cells into bytes of the same size
+fn two_cells_to_bytes(cells: [u32; 2]) -> [u8; 2 * size_of::<u32>()] {
+ // SAFETY: the size of the two arrays are the same
+ unsafe { core::mem::transmute::<[u32; 2], [u8; 2 * size_of::<u32>()]>(cells) }
+}
+
+impl Reg<u64> {
+ const NUM_CELLS: usize = 2;
+ /// Converts addr and (optional) size to the format that is consumable by libfdt.
+ pub fn to_cells(
+ &self,
+ ) -> ([u8; Self::NUM_CELLS * size_of::<u32>()], Option<[u8; Self::NUM_CELLS * size_of::<u32>()]>)
+ {
+ let addr =
+ two_cells_to_bytes([((self.addr >> 32) as u32).to_be(), (self.addr as u32).to_be()]);
+ let size = if self.size.is_some() {
+ let size = self.size.unwrap();
+ Some(two_cells_to_bytes([((size >> 32) as u32).to_be(), (size as u32).to_be()]))
+ } else {
+ None
+ };
+ (addr, size)
+ }
+}
+
/// Iterator over the address ranges defined by the /memory/ node.
#[derive(Debug)]
pub struct MemRegIterator<'a> {
@@ -122,7 +147,7 @@
}
/// An address range from the 'ranges' property of a DT node.
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, Default)]
pub struct AddressRange<A, P, S> {
/// The physical address of the range within the child bus's address space.
pub addr: A,
@@ -202,3 +227,25 @@
})
}
}
+
+impl AddressRange<(u32, u64), u64, u64> {
+ const SIZE_CELLS: usize = 7;
+ /// Converts to the format that is consumable by libfdt
+ pub fn to_cells(&self) -> [u8; Self::SIZE_CELLS * size_of::<u32>()] {
+ let buf = [
+ self.addr.0.to_be(),
+ ((self.addr.1 >> 32) as u32).to_be(),
+ (self.addr.1 as u32).to_be(),
+ ((self.parent_addr >> 32) as u32).to_be(),
+ (self.parent_addr as u32).to_be(),
+ ((self.size >> 32) as u32).to_be(),
+ (self.size as u32).to_be(),
+ ];
+ // SAFETY: the size of the two arrays are the same
+ unsafe {
+ core::mem::transmute::<[u32; Self::SIZE_CELLS], [u8; Self::SIZE_CELLS * size_of::<u32>()]>(
+ buf,
+ )
+ }
+ }
+}
diff --git a/libs/libfdt/src/lib.rs b/libs/libfdt/src/lib.rs
index 1d295eb..7ddf680 100644
--- a/libs/libfdt/src/lib.rs
+++ b/libs/libfdt/src/lib.rs
@@ -21,6 +21,7 @@
pub use iterators::{AddressRange, CellIterator, MemRegIterator, RangesIterator, Reg, RegIterator};
+use core::cmp::max;
use core::ffi::{c_int, c_void, CStr};
use core::fmt;
use core::mem;
@@ -196,6 +197,10 @@
}
impl<'a> FdtNode<'a> {
+ /// Create immutable node from a mutable node at the same offset
+ pub fn from_mut(other: &'a FdtNodeMut) -> Self {
+ FdtNode { fdt: other.fdt, offset: other.offset }
+ }
/// Find parent node.
pub fn parent(&self) -> Result<Self> {
// SAFETY - Accesses (read-only) are constrained to the DT totalsize.
@@ -285,13 +290,31 @@
/// Retrieve the value of a given property.
pub fn getprop(&self, name: &CStr) -> Result<Option<&'a [u8]>> {
+ if let Some((prop, len)) = Self::getprop_internal(self.fdt, self.offset, name)? {
+ let offset = (prop as usize)
+ .checked_sub(self.fdt.as_ptr() as usize)
+ .ok_or(FdtError::Internal)?;
+
+ Ok(Some(self.fdt.buffer.get(offset..(offset + len)).ok_or(FdtError::Internal)?))
+ } else {
+ Ok(None) // property was not found
+ }
+ }
+
+ /// Return the pointer and size of the property named `name`, in a node at offset `offset`, in
+ /// a device tree `fdt`. The pointer is guaranteed to be non-null, in which case error returns.
+ fn getprop_internal(
+ fdt: &'a Fdt,
+ offset: c_int,
+ name: &CStr,
+ ) -> Result<Option<(*const c_void, usize)>> {
let mut len: i32 = 0;
// SAFETY - Accesses are constrained to the DT totalsize (validated by ctor) and the
// function respects the passed number of characters.
let prop = unsafe {
libfdt_bindgen::fdt_getprop_namelen(
- self.fdt.as_ptr(),
- self.offset,
+ fdt.as_ptr(),
+ offset,
name.as_ptr(),
// *_namelen functions don't include the trailing nul terminator in 'len'.
name.to_bytes().len().try_into().map_err(|_| FdtError::BadPath)?,
@@ -308,11 +331,7 @@
// We expected an error code in len but still received a valid value?!
return Err(FdtError::Internal);
}
-
- let offset =
- (prop as usize).checked_sub(self.fdt.as_ptr() as usize).ok_or(FdtError::Internal)?;
-
- Ok(Some(self.fdt.buffer.get(offset..(offset + len)).ok_or(FdtError::Internal)?))
+ Ok(Some((prop.cast::<c_void>(), len)))
}
/// Get reference to the containing device tree.
@@ -405,6 +424,23 @@
fdt_err_expect_zero(ret)
}
+ /// Replace the value of the given property with the given value, and ensure that the given
+ /// value has the same length as the current value length
+ pub fn setprop_inplace(&mut self, name: &CStr, value: &[u8]) -> Result<()> {
+ // SAFETY - fdt size is not altered
+ let ret = unsafe {
+ libfdt_bindgen::fdt_setprop_inplace(
+ self.fdt.as_mut_ptr(),
+ self.offset,
+ name.as_ptr(),
+ value.as_ptr().cast::<c_void>(),
+ value.len().try_into().map_err(|_| FdtError::BadValue)?,
+ )
+ };
+
+ fdt_err_expect_zero(ret)
+ }
+
/// Create or change a flag-like empty property.
pub fn setprop_empty(&mut self, name: &CStr) -> Result<()> {
self.setprop(name, &[])
@@ -423,6 +459,31 @@
fdt_err_expect_zero(ret)
}
+ /// Reduce the size of the given property to new_size
+ pub fn trimprop(&mut self, name: &CStr, new_size: usize) -> Result<()> {
+ let (prop, len) =
+ FdtNode::getprop_internal(self.fdt, self.offset, name)?.ok_or(FdtError::NotFound)?;
+ if len == new_size {
+ return Ok(());
+ }
+ if new_size > len {
+ return Err(FdtError::NoSpace);
+ }
+
+ // SAFETY - new_size is smaller than the old size
+ let ret = unsafe {
+ libfdt_bindgen::fdt_setprop(
+ self.fdt.as_mut_ptr(),
+ self.offset,
+ name.as_ptr(),
+ prop.cast::<c_void>(),
+ new_size.try_into().map_err(|_| FdtError::BadValue)?,
+ )
+ };
+
+ fdt_err_expect_zero(ret)
+ }
+
/// Get reference to the containing device tree.
pub fn fdt(&mut self) -> &mut Fdt {
self.fdt
@@ -444,6 +505,51 @@
Ok(FdtNode { fdt: &*self.fdt, offset: fdt_err(ret)? })
}
+
+ /// Return the compatible node of the given name that is next to this node
+ pub fn next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
+ // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe {
+ libfdt_bindgen::fdt_node_offset_by_compatible(
+ self.fdt.as_ptr(),
+ self.offset,
+ compatible.as_ptr(),
+ )
+ };
+
+ Ok(fdt_err_or_option(ret)?.map(|offset| Self { fdt: self.fdt, offset }))
+ }
+
+ /// Replace this node and its subtree with nop tags, effectively removing it from the tree, and
+ /// then return the next compatible node of the given name.
+ // Side note: without this, filterint out excessive compatible nodes from the DT is impossible.
+ // The reason is that libfdt ensures that the node from where the search for the next
+ // compatible node is started is always a valid one -- except for the special case of offset =
+ // -1 which is to find the first compatible node. So, we can't delete a node and then find the
+ // next compatible node from it.
+ //
+ // We can't do in the opposite direction either. If we call next_compatible to find the next
+ // node, and delete the current node, the Rust borrow checker kicks in. The next node has a
+ // mutable reference to DT, so we can't use current node (which also has a mutable reference to
+ // DT).
+ pub fn delete_and_next_compatible(self, compatible: &CStr) -> Result<Option<Self>> {
+ // SAFETY - Accesses (read-only) are constrained to the DT totalsize.
+ let ret = unsafe {
+ libfdt_bindgen::fdt_node_offset_by_compatible(
+ self.fdt.as_ptr(),
+ self.offset,
+ compatible.as_ptr(),
+ )
+ };
+ let next_offset = fdt_err_or_option(ret)?;
+
+ // SAFETY - fdt_nop_node alter only the bytes in the blob which contain the node and its
+ // properties and subnodes, and will not alter or move any other part of the tree.
+ let ret = unsafe { libfdt_bindgen::fdt_nop_node(self.fdt.as_mut_ptr(), self.offset) };
+ fdt_err_expect_zero(ret)?;
+
+ Ok(next_offset.map(|offset| Self { fdt: self.fdt, offset }))
+ }
}
/// Iterator over nodes sharing a same compatible string.
@@ -518,6 +624,21 @@
mem::transmute::<&mut [u8], &mut Self>(fdt)
}
+ /// Update this FDT from a slice containing another FDT
+ pub fn copy_from_slice(&mut self, new_fdt: &[u8]) -> Result<()> {
+ if self.buffer.len() < new_fdt.len() {
+ Err(FdtError::NoSpace)
+ } else {
+ let totalsize = self.totalsize();
+ self.buffer[..new_fdt.len()].clone_from_slice(new_fdt);
+ // Zeroize the remaining part. We zeroize up to the size of the original DT because
+ // zeroizing the entire buffer (max 2MB) is not necessary and may increase the VM boot
+ // time.
+ self.buffer[new_fdt.len()..max(new_fdt.len(), totalsize)].fill(0_u8);
+ Ok(())
+ }
+ }
+
/// Make the whole slice containing the DT available to libfdt.
pub fn unpack(&mut self) -> Result<()> {
// SAFETY - "Opens" the DT in-place (supported use-case) by updating its header and
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index bc4db6c..de06d01 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -51,6 +51,7 @@
deps: [
"init_second_stage",
"microdroid_build_prop",
+ "microdroid_init_debug_policy",
"microdroid_init_rc",
"microdroid_ueventd_rc",
"microdroid_launcher",
@@ -69,11 +70,8 @@
"libartpalette-system",
"apexd.microdroid",
- "atrace",
"debuggerd",
"linker",
- "tombstoned.microdroid",
- "tombstone_transmit.microdroid",
"cgroups.json",
"task_profiles.json",
"public.libraries.android.txt",
@@ -88,6 +86,12 @@
"libvm_payload", // used by payload to interact with microdroid manager
"prng_seeder_microdroid",
+
+ // Binaries required to capture traces in Microdroid.
+ "atrace",
+ "traced",
+ "traced_probes",
+ "perfetto",
] + microdroid_shell_and_utilities,
multilib: {
common: {
@@ -106,13 +110,26 @@
"authfs",
"authfs_service",
"encryptedstore",
- "microdroid_crashdump_kernel",
"microdroid_kexec",
"microdroid_manager",
"zipfuse",
],
},
},
+ arch: {
+ // b/273792258: These could be in multilib.lib64 except that
+ // microdroid_crashdump_kernel doesn't exist for riscv64 yet
+ arm64: {
+ deps: [
+ "microdroid_crashdump_kernel",
+ ],
+ },
+ x86_64: {
+ deps: [
+ "microdroid_crashdump_kernel",
+ ],
+ },
+ },
linker_config_src: "linker.config.json",
base_dir: "system",
dirs: microdroid_rootdirs,
diff --git a/microdroid/README.md b/microdroid/README.md
index 41278a5..f70965a 100644
--- a/microdroid/README.md
+++ b/microdroid/README.md
@@ -138,6 +138,7 @@
TEST_ROOT=/data/local/tmp/virt
adb shell /apex/com.android.virt/bin/vm run-app \
--log $TEST_ROOT/log.txt \
+--console $TEST_ROOT/console.txt \
PATH_TO_YOUR_APP \
$TEST_ROOT/MyApp.apk.idsig \
$TEST_ROOT/instance.img \
@@ -145,9 +146,9 @@
```
The last command lets you know the CID assigned to the VM. The console output
-from the VM is stored to `$TEST_ROOT/log.txt` file for debugging purpose. If you
-omit the `--log $TEST_ROOT/log.txt` option, it will be emitted to the current
-console.
+from the VM is stored to `$TEST_ROOT/console.txt` and logcat is stored to
+`$TEST_ROOT/log.txt` file for debugging purpose. If you omit `--log` or
+`--console` option, they will be emitted to the current console.
Stopping the VM can be done as follows:
@@ -159,12 +160,50 @@
invoked with the `--daemonize` flag. If the flag was not used, press Ctrl+C on
the console where the `vm run-app` command was invoked.
-## ADB
+## Debuggable microdroid
-On userdebug builds, you can have an adb connection to microdroid. To do so,
-first, delete `$TEST_ROOT/instance.img`; this is because changing debug settings
-requires a new instance. Then add the `--debug=full` flag to the
-`/apex/com.android.virt/bin/vm run-app` command, and then
+### Debugging features
+Microdroid supports following debugging features:
+
+- VM log
+- console output
+- kernel output
+- logcat output
+- [ramdump](../docs/debug/ramdump.md)
+- crashdump
+- [adb](#adb)
+- [gdb](#debugging-the-payload-on-microdroid)
+
+### Enabling debugging features
+There's two ways to enable the debugging features:
+
+#### Option 1) Running microdroid on AVF debug policy configured device
+
+microdroid can be started with debugging features by debug policies from the
+host. Host bootloader may provide debug policies to host OS's device tree for
+VMs.
+
+For protected VM, such device tree will be available in microdroid. microdroid
+can check which debuging features is enabled.
+
+Here are list of device tree properties for debugging features.
+
+- `/avf/guest/common/log`: `<1>` to enable kernel log and logcat. Ignored
+ otherwise.
+- `/avf/guest/common/ramdump`: `<1>` to enable ramdump. Ignored otherwise.
+- `/avf/guest/microdroid/adb`: `<1>` to enable `adb`. Ignored otherwise.
+
+#### Option 2) Lauching microdroid with debug level.
+
+microdroid can be started with debugging features. To do so, first, delete
+`$TEST_ROOT/instance.img`; this is because changing debug settings requires a
+new instance. Then add the `--debug=full` flag to the
+`/apex/com.android.virt/bin/vm run-app` command. This will enable all debugging
+features.
+
+### ADB
+
+If `adb` connection is enabled, launch following command.
```sh
vm_shell
@@ -175,13 +214,18 @@
Once you have an adb connection with `vm_shell`, `localhost:8000` will be the
serial of microdroid.
-## Debugging the payload on microdroid
+### Debugging the payload on microdroid
Like a normal adb device, you can debug native processes using `lldbclient.py`
script, either by running a new process, or attaching to an existing process.
Use `vm_shell` tool above, and then run `lldbclient.py`.
```sh
+adb -s localhost:8000 shell 'mount -o remount,exec /data'
development/scripts/lldbclient.py -s localhost:8000 --chroot . --user '' \
(-p PID | -n NAME | -r ...)
```
+
+**Note:** We need to pass `--chroot .` to skip verifying device, because
+microdroid doesn't match with the host's lunch target. We need to also pass
+`--user ''` as there is no `su` binary in microdroid.
diff --git a/microdroid/init.rc b/microdroid/init.rc
index 70c22d4..29f8970 100644
--- a/microdroid/init.rc
+++ b/microdroid/init.rc
@@ -21,13 +21,9 @@
write /linkerconfig/ld.config.txt \#
chmod 644 /linkerconfig/ld.config.txt
-# If VM is debuggable, send logs to outside ot the VM via the serial console.
-# If non-debuggable, logs are internally consumed at /dev/null
-on early-init && property:ro.boot.microdroid.debuggable=1
- setprop ro.log.file_logger.path /dev/hvc2
-
-on early-init && property:ro.boot.microdroid.debuggable=0
- setprop ro.log.file_logger.path /dev/null
+ # Applies debug policy to decide whether to enable adb, adb root, and logcat.
+ # We don't directly exec the binary to specify stdio_to_kmsg.
+ exec_start init_debug_policy
on init
mkdir /mnt/apk 0755 system system
@@ -39,7 +35,10 @@
restorecon /mnt/extra-apk
# Wait for apexd to finish activating APEXes before starting more processes.
- wait_for_prop apexd.status activated
+ # Microdroid starts apexd in VM mode in which apexd doesn't wait for init after setting
+ # apexd.status to activated, but immediately transitions to ready. Therefore, it's not safe to
+ # wait for the activated status, by the time this line is reached it may be already be ready.
+ wait_for_prop apexd.status ready
perform_apex_config
# Notify to microdroid_manager that perform_apex_config is done.
@@ -47,8 +46,6 @@
# payloads are not designed to run with bootstrap bionic
setprop apex_config.done true
- setprop ro.debuggable ${ro.boot.microdroid.debuggable:-0}
-
on property:microdroid_manager.init_done=1
# Stop ueventd to save memory
stop ueventd
@@ -57,7 +54,7 @@
# Mount tracefs (with GID=AID_READTRACEFS)
mount tracefs tracefs /sys/kernel/tracing gid=3012
-on init && property:ro.boot.adb.enabled=1
+on property:init_debug_policy.adbd.enabled=1
start adbd
# Mount filesystems and start core system services.
@@ -129,14 +126,6 @@
mkdir /data/vendor_de 0771 root root
mkdir /data/vendor/hardware 0771 root root
- # Start tombstoned early to be able to store tombstones.
- # microdroid doesn't have anr, but tombstoned requires it
- mkdir /data/anr 0775 system system
- mkdir /data/tombstones 0771 system system
- mkdir /data/vendor/tombstones 0771 root root
-
- start tombstoned
-
# For security reasons, /data/local/tmp should always be empty.
# Do not place files or directories in /data/local/tmp
mkdir /data/local 0751 root root
@@ -149,15 +138,6 @@
# Mark boot completed. This will notify microdroid_manager to run payload.
setprop dev.bootcomplete 1
-on property:tombstone_transmit.start=1
- mkdir /data/tombstones 0771 system system
- start tombstone_transmit
-
-service tombstone_transmit /system/bin/tombstone_transmit.microdroid -cid 2 -port 2000 -remove_tombstones_after_transmitting
- user system
- group system
- shutdown critical
-
service apexd-vm /system/bin/apexd --vm
user root
group system
@@ -179,3 +159,8 @@
group shell log readproc
seclabel u:r:shell:s0
setenv HOSTNAME console
+
+service init_debug_policy /system/bin/init_debug_policy
+ oneshot
+ disabled
+ stdio_to_kmsg
diff --git a/microdroid/init_debug_policy/Android.bp b/microdroid/init_debug_policy/Android.bp
new file mode 100644
index 0000000..afc2e73
--- /dev/null
+++ b/microdroid/init_debug_policy/Android.bp
@@ -0,0 +1,15 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_binary {
+ name: "microdroid_init_debug_policy",
+ srcs: ["src/init_debug_policy.rs"],
+ stem: "init_debug_policy",
+ rustlibs: [
+ "librustutils",
+ ],
+ installable: false, // match with microdroid_init_rc.
+ bootstrap: true,
+ prefer_rlib: true,
+}
diff --git a/microdroid/init_debug_policy/src/init_debug_policy.rs b/microdroid/init_debug_policy/src/init_debug_policy.rs
new file mode 100644
index 0000000..6c80926
--- /dev/null
+++ b/microdroid/init_debug_policy/src/init_debug_policy.rs
@@ -0,0 +1,57 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Applies debug policies when booting microdroid
+
+use rustutils::system_properties;
+use rustutils::system_properties::PropertyWatcherError;
+use std::fs::File;
+use std::io::Read;
+
+/// Get debug policy value in bool. It's true iff the value is explicitly set to <1>.
+fn get_debug_policy_bool(path: &'static str) -> Option<bool> {
+ let mut file = File::open(path).ok()?;
+ let mut log: [u8; 4] = Default::default();
+ file.read_exact(&mut log).ok()?;
+ // DT spec uses big endian although Android is always little endian.
+ Some(u32::from_be_bytes(log) == 1)
+}
+
+fn main() -> Result<(), PropertyWatcherError> {
+ // If VM is debuggable or debug policy says so, send logs to outside ot the VM via the serial console.
+ // Otherwise logs are internally consumed at /dev/null
+ let log_path = if system_properties::read_bool("ro.boot.microdroid.debuggable", false)?
+ || get_debug_policy_bool("/sys/firmware/devicetree/base/avf/guest/common/log")
+ .unwrap_or_default()
+ {
+ "/dev/hvc2"
+ } else {
+ "/dev/null"
+ };
+ system_properties::write("ro.log.file_logger.path", log_path)?;
+
+ let (adbd_enabled, debuggable) = if system_properties::read_bool("ro.boot.adb.enabled", false)?
+ || get_debug_policy_bool("/sys/firmware/devicetree/base/avf/guest/microdroid/adb")
+ .unwrap_or_default()
+ {
+ // debuggable is required for adb root and bypassing adb authorization.
+ ("1", "1")
+ } else {
+ ("0", "0")
+ };
+ system_properties::write("init_debug_policy.adbd.enabled", adbd_enabled)?;
+ system_properties::write("ro.debuggable", debuggable)?;
+
+ Ok(())
+}
diff --git a/microdroid/kdump/kexec.c b/microdroid/kdump/kexec.c
index 8d88951..d3e8e02 100644
--- a/microdroid/kdump/kexec.c
+++ b/microdroid/kdump/kexec.c
@@ -23,6 +23,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
@@ -53,6 +54,20 @@
if (syscall(SYS_kexec_file_load, open_checked(KERNEL), open_checked(INITRD), cmdline_len,
CMDLINE, KEXEC_FILE_ON_CRASH) == -1) {
fprintf(stderr, "Failed to load panic kernel: %s\n", strerror(errno));
+ if (errno == EADDRNOTAVAIL) {
+ struct stat st;
+ off_t kernel_size = 0;
+ off_t initrd_size = 0;
+
+ if (stat(KERNEL, &st) == 0) {
+ kernel_size = st.st_size;
+ }
+ if (stat(INITRD, &st) == 0) {
+ initrd_size = st.st_size;
+ }
+ fprintf(stderr, "Image size too big? %s:%ld bytes, %s:%ld bytes", KERNEL, kernel_size,
+ INITRD, initrd_size);
+ }
return 1;
}
return 0;
diff --git a/microdroid/payload/config/src/lib.rs b/microdroid/payload/config/src/lib.rs
index 925a543..cdef3e4 100644
--- a/microdroid/payload/config/src/lib.rs
+++ b/microdroid/payload/config/src/lib.rs
@@ -64,10 +64,11 @@
/// Payload's task can be one of plain executable
/// or an .so library which can be started via /system/bin/microdroid_launcher
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, Default)]
pub enum TaskType {
/// Task's command indicates the path to the executable binary.
#[serde(rename = "executable")]
+ #[default]
Executable,
/// Task's command indicates the .so library in /mnt/apk/lib/{arch}
#[serde(rename = "microdroid_launcher")]
@@ -87,12 +88,6 @@
pub command: String,
}
-impl Default for TaskType {
- fn default() -> TaskType {
- TaskType::Executable
- }
-}
-
/// APEX config
/// For now, we only pass the name of APEX.
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
diff --git a/microdroid_manager/Android.bp b/microdroid_manager/Android.bp
index 18cf49d..495d3bb 100644
--- a/microdroid_manager/Android.bp
+++ b/microdroid_manager/Android.bp
@@ -19,9 +19,9 @@
"libbinder_rs",
"libbyteorder",
"libcap_rust",
+ "libciborium",
"libdiced_open_dice",
"libdiced_sample_inputs",
- "libdiced_utils",
"libglob",
"libhex",
"libitertools",
diff --git a/microdroid_manager/src/dice.rs b/microdroid_manager/src/dice.rs
index c3136e8..3a2a1e6 100644
--- a/microdroid_manager/src/dice.rs
+++ b/microdroid_manager/src/dice.rs
@@ -16,12 +16,14 @@
use anyhow::{anyhow, bail, Context, Error, Result};
use byteorder::{NativeEndian, ReadBytesExt};
+use ciborium::{cbor, ser};
use diced_open_dice::{
bcc_handover_parse, retry_bcc_main_flow, BccHandover, Config, DiceArtifacts, DiceMode, Hash,
Hidden, InputValues, OwnedDiceArtifacts,
};
use keystore2_crypto::ZVec;
use libc::{c_void, mmap, munmap, MAP_FAILED, MAP_PRIVATE, PROT_READ};
+use microdroid_metadata::PayloadMetadata;
use openssl::hkdf::hkdf;
use openssl::md::Md;
use std::fs;
@@ -157,3 +159,70 @@
}
}
}
+
+/// Returns a configuration descriptor of the given payload following the BCC's specification:
+/// https://cs.android.com/android/platform/superproject/+/master:hardware/interfaces/security/rkp/aidl/android/hardware/security/keymint/ProtectedData.aidl
+/// {
+/// -70002: "Microdroid payload",
+/// ? -71000: tstr // payload_config_path
+/// ? -71001: PayloadConfig
+/// }
+/// PayloadConfig = {
+/// 1: tstr // payload_binary_name
+/// }
+pub fn format_payload_config_descriptor(payload_metadata: &PayloadMetadata) -> Result<Vec<u8>> {
+ const MICRODROID_PAYLOAD_COMPONENT_NAME: &str = "Microdroid payload";
+
+ let config_descriptor_cbor_value = match payload_metadata {
+ PayloadMetadata::config_path(payload_config_path) => cbor!({
+ -70002 => MICRODROID_PAYLOAD_COMPONENT_NAME,
+ -71000 => payload_config_path
+ }),
+ PayloadMetadata::config(payload_config) => cbor!({
+ -70002 => MICRODROID_PAYLOAD_COMPONENT_NAME,
+ -71001 => {1 => payload_config.payload_binary_name}
+ }),
+ }
+ .context("Failed to build a CBOR Value from payload metadata")?;
+ let mut config_descriptor = Vec::new();
+ ser::into_writer(&config_descriptor_cbor_value, &mut config_descriptor)?;
+ Ok(config_descriptor)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use microdroid_metadata::PayloadConfig;
+
+ #[test]
+ fn payload_metadata_with_path_formats_correctly() -> Result<()> {
+ let payload_metadata = PayloadMetadata::config_path("/config_path".to_string());
+ let config_descriptor = format_payload_config_descriptor(&payload_metadata)?;
+ static EXPECTED_CONFIG_DESCRIPTOR: &[u8] = &[
+ 0xa2, 0x3a, 0x00, 0x01, 0x11, 0x71, 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72,
+ 0x6f, 0x69, 0x64, 0x20, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x3a, 0x00, 0x01,
+ 0x15, 0x57, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x70, 0x61, 0x74,
+ 0x68,
+ ];
+ assert_eq!(EXPECTED_CONFIG_DESCRIPTOR, &config_descriptor);
+ Ok(())
+ }
+
+ #[test]
+ fn payload_metadata_with_config_formats_correctly() -> Result<()> {
+ let payload_config = PayloadConfig {
+ payload_binary_name: "payload_binary".to_string(),
+ ..Default::default()
+ };
+ let payload_metadata = PayloadMetadata::config(payload_config);
+ let config_descriptor = format_payload_config_descriptor(&payload_metadata)?;
+ static EXPECTED_CONFIG_DESCRIPTOR: &[u8] = &[
+ 0xa2, 0x3a, 0x00, 0x01, 0x11, 0x71, 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72,
+ 0x6f, 0x69, 0x64, 0x20, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x3a, 0x00, 0x01,
+ 0x15, 0x58, 0xa1, 0x01, 0x6e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62,
+ 0x69, 0x6e, 0x61, 0x72, 0x79,
+ ];
+ assert_eq!(EXPECTED_CONFIG_DESCRIPTOR, &config_descriptor);
+ Ok(())
+ }
+}
diff --git a/microdroid_manager/src/main.rs b/microdroid_manager/src/main.rs
index a464163..8732be1 100644
--- a/microdroid_manager/src/main.rs
+++ b/microdroid_manager/src/main.rs
@@ -21,7 +21,7 @@
mod swap;
mod vm_payload_service;
-use crate::dice::{DiceDriver, derive_sealing_key};
+use crate::dice::{DiceDriver, derive_sealing_key, format_payload_config_descriptor};
use crate::instance::{ApexData, ApkData, InstanceDisk, MicrodroidData, RootHash};
use crate::vm_payload_service::register_vm_payload_service;
use android_system_virtualizationcommon::aidl::android::system::virtualizationcommon::ErrorCode::ErrorCode;
@@ -35,7 +35,6 @@
use apkverify::{get_public_key_der, verify, V4Signature};
use binder::Strong;
use diced_open_dice::OwnedDiceArtifacts;
-use diced_utils::cbor::{encode_header, encode_number};
use glob::glob;
use itertools::sorted;
use libc::VMADDR_CID_HOST;
@@ -56,8 +55,8 @@
use std::convert::TryInto;
use std::env;
use std::ffi::CString;
-use std::fs::{self, create_dir, OpenOptions};
-use std::io::Write;
+use std::fs::{self, create_dir, OpenOptions, File};
+use std::io::{Read, Write};
use std::os::unix::process::CommandExt;
use std::os::unix::process::ExitStatusExt;
use std::path::Path;
@@ -74,6 +73,7 @@
const DM_MOUNTED_APK_PATH: &str = "/dev/block/mapper/microdroid-apk";
const AVF_STRICT_BOOT: &str = "/sys/firmware/devicetree/base/chosen/avf,strict-boot";
const AVF_NEW_INSTANCE: &str = "/sys/firmware/devicetree/base/chosen/avf,new-instance";
+const AVF_DEBUG_POLICY_RAMDUMP: &str = "/sys/firmware/devicetree/base/avf/guest/common/ramdump";
const DEBUG_MICRODROID_NO_VERIFIED_BOOT: &str =
"/sys/firmware/devicetree/base/virtualization/guest/debug-microdroid,no-verified-boot";
@@ -82,7 +82,6 @@
const ZIPFUSE_BIN: &str = "/system/bin/zipfuse";
const APEX_CONFIG_DONE_PROP: &str = "apex_config.done";
-const TOMBSTONE_TRANSMIT_DONE_PROP: &str = "tombstone_transmit.init_done";
const DEBUGGABLE_PROP: &str = "ro.boot.microdroid.debuggable";
// SYNC WITH virtualizationservice/src/crosvm.rs
@@ -287,54 +286,14 @@
let code_hash = code_hash_ctx.finish();
let authority_hash = authority_hash_ctx.finish();
- // {
- // -70002: "Microdroid payload",
- // ? -71000: tstr // payload_config_path
- // ? -71001: PayloadConfig
- // }
- // PayloadConfig = {
- // 1: tstr // payload_binary_name
- // }
-
- let mut config_desc = vec![
- 0xa2, // map(2)
- 0x3a, 0x00, 0x01, 0x11, 0x71, // -70002
- 0x72, 0x4d, 0x69, 0x63, 0x72, 0x6f, 0x64, 0x72, 0x6f, 0x69, 0x64, 0x20, 0x70, 0x61, 0x79,
- 0x6c, 0x6f, 0x61, 0x64, // "Microdroid payload"
- ];
-
- match payload_metadata {
- PayloadMetadata::config_path(payload_config_path) => {
- encode_negative_number(-71000, &mut config_desc)?;
- encode_tstr(payload_config_path, &mut config_desc)?;
- }
- PayloadMetadata::config(payload_config) => {
- encode_negative_number(-71001, &mut config_desc)?;
- encode_header(5, 1, &mut config_desc)?; // map(1)
- encode_number(1, &mut config_desc)?;
- encode_tstr(&payload_config.payload_binary_name, &mut config_desc)?;
- }
- }
+ let config_descriptor = format_payload_config_descriptor(payload_metadata)?;
// Check debuggability, conservatively assuming it is debuggable
let debuggable = system_properties::read_bool(DEBUGGABLE_PROP, true)?;
// Send the details to diced
let hidden = verified_data.salt.clone().try_into().unwrap();
- dice.derive(code_hash, &config_desc, authority_hash, debuggable, hidden)
-}
-
-fn encode_tstr(tstr: &str, buffer: &mut Vec<u8>) -> Result<()> {
- let bytes = tstr.as_bytes();
- encode_header(3, bytes.len().try_into().unwrap(), buffer)?;
- buffer.extend_from_slice(bytes);
- Ok(())
-}
-
-fn encode_negative_number(n: i64, buffer: &mut dyn Write) -> Result<()> {
- ensure!(n < 0);
- let n = -1 - n;
- encode_header(1, n.try_into().unwrap(), buffer)
+ dice.derive(code_hash, &config_descriptor, authority_hash, debuggable, hidden)
}
fn is_strict_boot() -> bool {
@@ -356,6 +315,21 @@
}
}
+/// Get debug policy value in bool. It's true iff the value is explicitly set to <1>.
+fn get_debug_policy_bool(path: &'static str) -> Result<Option<bool>> {
+ let mut file = match File::open(path) {
+ Ok(dp) => dp,
+ Err(e) => {
+ info!("{e:?}. Assumes <0>");
+ return Ok(Some(false));
+ }
+ };
+ let mut log: [u8; 4] = Default::default();
+ file.read_exact(&mut log).context("Malformed data in {path}")?;
+ // DT spec uses big endian although Android is always little endian.
+ Ok(Some(u32::from_be_bytes(log) == 1))
+}
+
fn try_run_payload(service: &Strong<dyn IVirtualMachineService>) -> Result<i32> {
let metadata = load_metadata().context("Failed to load payload metadata")?;
let dice = DiceDriver::new(Path::new("/dev/open-dice0")).context("Failed to load DICE")?;
@@ -464,12 +438,11 @@
setup_config_sysprops(&config)?;
- // Start tombstone_transmit if enabled
+ // Set export_tombstones if enabled
if should_export_tombstones(&config) {
- system_properties::write("tombstone_transmit.start", "1")
- .context("set tombstone_transmit.start")?;
- } else {
- control_service("stop", "tombstoned")?;
+ // This property is read by tombstone_handler.
+ system_properties::write("microdroid_manager.export_tombstones.enabled", "1")
+ .context("set microdroid_manager.export_tombstones.enabled")?;
}
// Wait until zipfuse has mounted the APKs so we can access the payload
@@ -489,20 +462,10 @@
system_properties::write("microdroid_manager.init_done", "1")
.context("set microdroid_manager.init_done")?;
- // Wait for tombstone_transmit to init
- if should_export_tombstones(&config) {
- wait_for_tombstone_transmit_done()?;
- }
-
info!("boot completed, time to run payload");
exec_task(task, service).context("Failed to run payload")
}
-fn control_service(action: &str, service: &str) -> Result<()> {
- system_properties::write(&format!("ctl.{}", action), service)
- .with_context(|| format!("Failed to {} {}", action, service))
-}
-
struct ApkDmverityArgument<'a> {
apk: &'a str,
idsig: &'a str,
@@ -774,11 +737,6 @@
wait_for_property_true(APEX_CONFIG_DONE_PROP).context("Failed waiting for apex config done")
}
-fn wait_for_tombstone_transmit_done() -> Result<()> {
- wait_for_property_true(TOMBSTONE_TRANSMIT_DONE_PROP)
- .context("Failed waiting for tombstone transmit done")
-}
-
fn wait_for_property_true(property_name: &str) -> Result<()> {
let mut prop = PropertyWatcher::new(property_name)?;
loop {
@@ -795,16 +753,23 @@
}
fn get_public_key_from_apk(apk: &str, root_hash_trustful: bool) -> Result<Box<[u8]>> {
+ let current_sdk = get_current_sdk()?;
if !root_hash_trustful {
- verify(apk).context(MicrodroidError::PayloadVerificationFailed(format!(
+ verify(apk, current_sdk).context(MicrodroidError::PayloadVerificationFailed(format!(
"failed to verify {}",
apk
)))
} else {
- get_public_key_der(apk)
+ get_public_key_der(apk, current_sdk)
}
}
+fn get_current_sdk() -> Result<u32> {
+ let current_sdk = system_properties::read("ro.build.version.sdk")?;
+ let current_sdk = current_sdk.ok_or_else(|| anyhow!("SDK version missing"))?;
+ current_sdk.parse().context("Malformed SDK version")
+}
+
fn load_config(payload_metadata: PayloadMetadata) -> Result<VmPayloadConfig> {
match payload_metadata {
PayloadMetadata::config_path(path) => {
@@ -832,16 +797,27 @@
}
}
-/// Loads the crashkernel into memory using kexec if the VM is loaded with `crashkernel=' parameter
-/// in the cmdline.
+/// Loads the crashkernel into memory using kexec if debuggable or debug policy says so.
+/// The VM should be loaded with `crashkernel=' parameter in the cmdline to allocate memory
+/// for crashkernel.
fn load_crashkernel_if_supported() -> Result<()> {
let supported = std::fs::read_to_string("/proc/cmdline")?.contains(" crashkernel=");
info!("ramdump supported: {}", supported);
- if supported {
+
+ if !supported {
+ return Ok(());
+ }
+
+ let debuggable = system_properties::read_bool(DEBUGGABLE_PROP, true)?;
+ let ramdump = get_debug_policy_bool(AVF_DEBUG_POLICY_RAMDUMP)?.unwrap_or_default();
+ let requested = debuggable | ramdump;
+
+ if requested {
let status = Command::new("/system/bin/kexec_load").status()?;
if !status.success() {
return Err(anyhow!("Failed to load crashkernel: {:?}", status));
}
+ info!("ramdump is loaded: debuggable={debuggable}, ramdump={ramdump}");
}
Ok(())
}
diff --git a/pvmfw/Android.bp b/pvmfw/Android.bp
index d78f4f2..0d845f9 100644
--- a/pvmfw/Android.bp
+++ b/pvmfw/Android.bp
@@ -22,6 +22,7 @@
"libonce_cell_nostd",
"libpvmfw_avb_nostd",
"libpvmfw_embedded_key",
+ "libpvmfw_fdt_template",
"libstatic_assertions",
"libtinyvec_nostd",
"libuuid_nostd",
@@ -53,6 +54,7 @@
// partition image. This is just to package the unstripped file into the
// symbols zip file for debugging purpose.
installable: true,
+ native_coverage: false,
}
raw_binary {
@@ -117,6 +119,37 @@
installable: false,
}
+// platform.dts is passed to clang for macro preprocessing, and then compiled to dtbo using dtc.
+// The raw content of the dtbo file is then written as a Rust byte array.
+genrule {
+ name: "pvmfw_fdt_template_rs",
+ srcs: [
+ "platform.dts",
+ ":arm_dt_bindings_headers", // implicit dependency
+ ],
+ out: ["lib.rs"],
+ tools: ["dtc"],
+ cmd: "prebuilts/clang/host/linux-x86/clang-r487747/bin/clang " + // UGLY!!!
+ "-E -P -x assembler-with-cpp -I external/arm-trusted-firmware/include " +
+ "-o $(genDir)/preprocessed.dts $(location platform.dts) && " +
+ "$(location dtc) -I dts -O dtb -o $(genDir)/compiled.dtbo $(genDir)/preprocessed.dts && " +
+ "(" +
+ " echo '#![no_std]';" +
+ " echo '#![allow(missing_docs)]';" +
+ " echo 'pub const RAW: &[u8] = &[';" +
+ " xxd -i < $(genDir)/compiled.dtbo;" +
+ " echo '];';" +
+ ") > $(out)",
+}
+
+rust_library_rlib {
+ name: "libpvmfw_fdt_template",
+ defaults: ["vmbase_ffi_defaults"],
+ prefer_rlib: true,
+ srcs: [":pvmfw_fdt_template_rs"],
+ crate_name: "pvmfw_fdt_template",
+}
+
bootimg {
name: "pvmfw_img",
stem: "pvmfw.img",
diff --git a/pvmfw/README.md b/pvmfw/README.md
index 1e4b605..04ad8c4 100644
--- a/pvmfw/README.md
+++ b/pvmfw/README.md
@@ -61,13 +61,27 @@
Starting in Android T, the `PRODUCT_BUILD_PVMFW_IMAGE` build variable controls
the generation of `pvmfw.img`, a new [ABL partition][ABL-part] containing the
-pvmfw binary and following the internal format of the [`boot`][boot-img]
-partition, intended to be verified and loaded by ABL on AVF-compatible devices.
+pvmfw binary (sometimes called "`pvmfw.bin`") and following the internal format
+of the [`boot`][boot-img] partition, intended to be verified and loaded by ABL
+on AVF-compatible devices.
+
+Once ABL has verified the `pvmfw.img` chained static partition, the contained
+[`boot.img` header][boot-img] may be used to obtain the size of the `pvmfw.bin`
+image (recorded in the `kernel_size` field), as it already does for the kernel
+itself. In accordance with the header format, the `kernel_size` bytes of the
+partition following the header will be the `pvmfw.bin` image.
+
+Note that when it gets executed in the context of a pVM, `pvmfw` expects to have
+been loaded at 4KiB-aligned intermediate physical address (IPA) so if ABL loads
+the `pvmfw.bin` image without respecting this alignment, it is the
+responsibility of the hypervisor to either reject the image or copy it into
+guest address space with the right alignment.
To support pKVM, ABL is expected to describe the region using a reserved memory
device tree node where both address and size have been properly aligned to the
-page size used by the hypervisor. For example, the following node describes a
-region of size `0x40000` at address `0x80000000`:
+page size used by the hypervisor. This single region must include both the pvmfw
+binary image and its configuration data (see below). For example, the following
+node describes a region of size `0x40000` at address `0x80000000`:
```
reserved-memory {
...
@@ -220,3 +234,31 @@
[dice-dt]: https://www.kernel.org/doc/Documentation/devicetree/bindings/reserved-memory/google%2Copen-dice.yaml
[Layering]: https://pigweed.googlesource.com/open-dice/+/refs/heads/main/docs/specification.md#layering-details
[Trusty-BCC]: https://android.googlesource.com/trusty/lib/+/1696be0a8f3a7103/lib/hwbcc/common/swbcc.c#554
+
+#### pVM Device Tree Overlay
+
+Config header can provide a DTBO to be overlaid on top of the baseline device
+tree from crosvm.
+
+The DTBO may contain debug policies as follows.
+
+```
+/ {
+ fragment@avf {
+ target-path = "/";
+
+ __overlay__ {
+ avf {
+ /* your debug policy here */
+ };
+ };
+ };
+}; /* end of avf */
+```
+
+For specifying DTBO, host bootloader should apply the DTBO to both host
+OS's device tree and config header of `pvmfw`. Both `virtualizationmanager` and
+`pvmfw` will prepare for debugging features.
+
+For details about device tree properties for debug policies, see
+[microdroid's debugging policy guide](../microdroid/README.md#option-1-running-microdroid-on-avf-debug-policy-configured-device).
diff --git a/pvmfw/platform.dts b/pvmfw/platform.dts
new file mode 100644
index 0000000..a7b1de7
--- /dev/null
+++ b/pvmfw/platform.dts
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define PLACEHOLDER 0xffffffff
+#define PLACEHOLDER2 PLACEHOLDER PLACEHOLDER
+#define PLACEHOLDER4 PLACEHOLDER2 PLACEHOLDER2
+
+#define IRQ_BASE 4
+
+/dts-v1/;
+
+/ {
+ interrupt-parent = <&intc>;
+ compatible = "linux,dummy-virt";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen {
+ stdout-path = "/uart@3f8";
+ linux,pci-probe-only = <1>;
+ kaslr-seed = <PLACEHOLDER2>;
+ avf,strict-boot;
+ avf,new-instance;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00 0x80000000 PLACEHOLDER2>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ swiotlb: restricted_dma_reserved {
+ compatible = "restricted-dma-pool";
+ size = <PLACEHOLDER2>;
+ alignment = <PLACEHOLDER2>;
+ };
+
+ dice {
+ compatible = "google,open-dice";
+ no-map;
+ reg = <PLACEHOLDER4>;
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <0>;
+ };
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <1>;
+ };
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <2>;
+ };
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <3>;
+ };
+ cpu@4 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <4>;
+ };
+ cpu@5 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <5>;
+ };
+ cpu@6 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <6>;
+ };
+ cpu@7 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <7>;
+ };
+ cpu@8 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <8>;
+ };
+ cpu@9 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <9>;
+ };
+ cpu@10 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <10>;
+ };
+ cpu@11 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <11>;
+ };
+ cpu@12 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <12>;
+ };
+ cpu@13 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <13>;
+ };
+ cpu@14 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <14>;
+ };
+ cpu@15 {
+ device_type = "cpu";
+ compatible = "arm,arm-v8";
+ enable-method = "psci";
+ reg = <15>;
+ };
+ };
+
+ intc: intc {
+ compatible = "arm,gic-v3";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x00 0x3fff0000 0x00 0x10000>, <PLACEHOLDER4>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ always-on;
+ /* The IRQ type needs to be OR-ed with the CPU mask */
+ interrupts = <GIC_PPI 0xd IRQ_TYPE_LEVEL_LOW
+ GIC_PPI 0xe IRQ_TYPE_LEVEL_LOW
+ GIC_PPI 0xb IRQ_TYPE_LEVEL_LOW
+ GIC_PPI 0xa IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ uart@2e8 {
+ compatible = "ns16550a";
+ reg = <0x00 0x2e8 0x00 0x8>;
+ clock-frequency = <0x1c2000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ uart@2f8 {
+ compatible = "ns16550a";
+ reg = <0x00 0x2f8 0x00 0x8>;
+ clock-frequency = <0x1c2000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ uart@3e8 {
+ compatible = "ns16550a";
+ reg = <0x00 0x3e8 0x00 0x8>;
+ clock-frequency = <0x1c2000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ uart@3f8 {
+ compatible = "ns16550a";
+ reg = <0x00 0x3f8 0x00 0x8>;
+ clock-frequency = <0x1c2000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "hvc";
+ };
+
+ pci {
+ compatible = "pci-host-cam-generic";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ dma-coherent;
+ memory-region = <&swiotlb>;
+ ranges = <
+ 0x3000000 0x0 0x02000000 0x0 0x02000000 0x00 0x02000000
+ 0x3000000 PLACEHOLDER2 PLACEHOLDER2 PLACEHOLDER2
+ >;
+ bus-range = <0x00 0x00>;
+ reg = <0x00 0x10000 0x00 0x1000000>;
+ interrupt-map = <
+ 0x0800 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 0) IRQ_TYPE_LEVEL_HIGH
+ 0x1000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 1) IRQ_TYPE_LEVEL_HIGH
+ 0x1800 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 2) IRQ_TYPE_LEVEL_HIGH
+ 0x2000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 3) IRQ_TYPE_LEVEL_HIGH
+ 0x2800 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 4) IRQ_TYPE_LEVEL_HIGH
+ 0x3000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 5) IRQ_TYPE_LEVEL_HIGH
+ 0x3800 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 6) IRQ_TYPE_LEVEL_HIGH
+ 0x4000 0x0 0x0 1 &intc 0 0 GIC_SPI (IRQ_BASE + 7) IRQ_TYPE_LEVEL_HIGH
+ >;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7
+ 0xf800 0x0 0x0 0x7
+ 0xf800 0x0 0x0 0x7
+ 0xf800 0x0 0x0 0x7
+ 0xf800 0x0 0x0 0x7
+ 0xf800 0x0 0x0 0x7
+ 0xf800 0x0 0x0 0x7
+ 0xf800 0x0 0x0 0x7>;
+ };
+
+ clk: pclk@3M {
+ compatible = "fixed-clock";
+ clock-frequency = <0x2fefd8>;
+ #clock-cells = <0>;
+ };
+
+ rtc@2000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x41030>;
+ reg = <0x00 0x2000 0x00 0x1000>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "apb_pclk";
+ clocks = <&clk>;
+ };
+
+ vmwdt@3000 {
+ compatible = "qemu,vcpu-stall-detector";
+ reg = <0x00 0x3000 0x00 0x1000>;
+ clock-frequency = <10>;
+ timeout-sec = <8>;
+ };
+};
diff --git a/pvmfw/src/crypto.rs b/pvmfw/src/crypto.rs
index 85dc6c9..275de7a 100644
--- a/pvmfw/src/crypto.rs
+++ b/pvmfw/src/crypto.rs
@@ -14,6 +14,8 @@
//! Wrapper around BoringSSL/OpenSSL symbols.
+use crate::cstr;
+
use core::convert::AsRef;
use core::ffi::{c_char, c_int, CStr};
use core::fmt;
@@ -81,14 +83,10 @@
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- let unknown_library = CStr::from_bytes_with_nul(b"{unknown library}\0").unwrap();
- let unknown_reason = CStr::from_bytes_with_nul(b"{unknown reason}\0").unwrap();
- let unknown_file = CStr::from_bytes_with_nul(b"??\0").unwrap();
-
let packed = self.packed_value();
- let library = self.library_name().unwrap_or(unknown_library).to_str().unwrap();
- let reason = self.reason().unwrap_or(unknown_reason).to_str().unwrap();
- let file = self.file.unwrap_or(unknown_file).to_str().unwrap();
+ let library = self.library_name().unwrap_or(cstr!("{unknown library}")).to_str().unwrap();
+ let reason = self.reason().unwrap_or(cstr!("{unknown reason}")).to_str().unwrap();
+ let file = self.file.unwrap_or(cstr!("??")).to_str().unwrap();
let line = self.line;
write!(f, "{file}:{line}: {library}: {reason} ({packed:#x})")
diff --git a/pvmfw/src/debug_policy.rs b/pvmfw/src/debug_policy.rs
index 23d3e1d..f4b99a6 100644
--- a/pvmfw/src/debug_policy.rs
+++ b/pvmfw/src/debug_policy.rs
@@ -14,7 +14,8 @@
//! Support for the debug policy overlay in pvmfw
-use alloc::{vec, vec::Vec};
+use crate::cstr;
+use alloc::vec::Vec;
use core::ffi::CStr;
use core::fmt;
use libfdt::FdtError;
@@ -63,76 +64,11 @@
fdt.pack().map_err(|e| DebugPolicyError::OverlaidFdt("Failed to re-pack", e))
}
-/// Disables ramdump by removing crashkernel from bootargs in /chosen.
-fn disable_ramdump(fdt: &mut libfdt::Fdt) -> Result<(), DebugPolicyError> {
- let chosen_path = CStr::from_bytes_with_nul(b"/chosen\0").unwrap();
- let bootargs_name = CStr::from_bytes_with_nul(b"bootargs\0").unwrap();
-
- let chosen = match fdt
- .node(chosen_path)
- .map_err(|e| DebugPolicyError::Fdt("Failed to find /chosen", e))?
- {
- Some(node) => node,
- None => return Ok(()),
- };
-
- let bootargs = match chosen
- .getprop_str(bootargs_name)
- .map_err(|e| DebugPolicyError::Fdt("Failed to find bootargs prop", e))?
- {
- Some(value) if !value.to_bytes().is_empty() => value,
- _ => return Ok(()),
- };
-
- // TODO: Improve add 'crashkernel=17MB' only when it's unnecessary.
- // Currently 'crashkernel=17MB' in virtualizationservice and passed by
- // chosen node, because it's not exactly a debug policy but a
- // configuration. However, it's actually microdroid specific
- // so we need a way to generalize it.
- let mut args = vec![];
- for arg in bootargs.to_bytes().split(|byte| byte.is_ascii_whitespace()) {
- if arg.is_empty() || arg.starts_with(b"crashkernel=") {
- continue;
- }
- args.push(arg);
- }
- let mut new_bootargs = args.as_slice().join(&b" "[..]);
- new_bootargs.push(b'\0');
-
- // We've checked existence of /chosen node at the beginning.
- let mut chosen_mut = fdt.node_mut(chosen_path).unwrap().unwrap();
- chosen_mut.setprop(bootargs_name, new_bootargs.as_slice()).map_err(|e| {
- DebugPolicyError::OverlaidFdt("Failed to remove crashkernel. FDT might be corrupted", e)
- })
-}
-
-/// Returns true only if fdt has ramdump prop in the /avf/guest/common node with value <1>
-fn is_ramdump_enabled(fdt: &libfdt::Fdt) -> Result<bool, DebugPolicyError> {
- let common = match fdt
- .node(CStr::from_bytes_with_nul(b"/avf/guest/common\0").unwrap())
- .map_err(|e| DebugPolicyError::DebugPolicyFdt("Failed to find /avf/guest/common node", e))?
- {
- Some(node) => node,
- None => return Ok(false),
- };
-
- match common
- .getprop_u32(CStr::from_bytes_with_nul(b"ramdump\0").unwrap())
- .map_err(|e| DebugPolicyError::DebugPolicyFdt("Failed to find ramdump prop", e))?
- {
- Some(1) => Ok(true),
- _ => Ok(false),
- }
-}
-
/// Enables console output by adding kernel.printk.devkmsg and kernel.console to bootargs.
/// This uses hardcoded console name 'hvc0' and it should be match with microdroid's bootconfig.debuggable.
fn enable_console_output(fdt: &mut libfdt::Fdt) -> Result<(), DebugPolicyError> {
- let chosen_path = CStr::from_bytes_with_nul(b"/chosen\0").unwrap();
- let bootargs_name = CStr::from_bytes_with_nul(b"bootargs\0").unwrap();
-
let chosen = match fdt
- .node(chosen_path)
+ .node(cstr!("/chosen"))
.map_err(|e| DebugPolicyError::Fdt("Failed to find /chosen", e))?
{
Some(node) => node,
@@ -140,7 +76,7 @@
};
let bootargs = match chosen
- .getprop_str(bootargs_name)
+ .getprop_str(cstr!("bootargs"))
.map_err(|e| DebugPolicyError::Fdt("Failed to find bootargs prop", e))?
{
Some(value) if !value.to_bytes().is_empty() => value,
@@ -154,8 +90,8 @@
fdt.unpack().map_err(|e| DebugPolicyError::OverlaidFdt("Failed to unpack", e))?;
// We've checked existence of /chosen node at the beginning.
- let mut chosen_mut = fdt.node_mut(chosen_path).unwrap().unwrap();
- chosen_mut.setprop(bootargs_name, new_bootargs.as_slice()).map_err(|e| {
+ let mut chosen_mut = fdt.node_mut(cstr!("/chosen")).unwrap().unwrap();
+ chosen_mut.setprop(cstr!("bootargs"), new_bootargs.as_slice()).map_err(|e| {
DebugPolicyError::OverlaidFdt("Failed to enabled console output. FDT might be corrupted", e)
})?;
@@ -166,7 +102,7 @@
/// Returns true only if fdt has log prop in the /avf/guest/common node with value <1>
fn is_console_output_enabled(fdt: &libfdt::Fdt) -> Result<bool, DebugPolicyError> {
let common = match fdt
- .node(CStr::from_bytes_with_nul(b"/avf/guest/common\0").unwrap())
+ .node(cstr!("/avf/guest/common"))
.map_err(|e| DebugPolicyError::DebugPolicyFdt("Failed to find /avf/guest/common node", e))?
{
Some(node) => node,
@@ -174,7 +110,7 @@
};
match common
- .getprop_u32(CStr::from_bytes_with_nul(b"log\0").unwrap())
+ .getprop_u32(cstr!("log"))
.map_err(|e| DebugPolicyError::DebugPolicyFdt("Failed to find log prop", e))?
{
Some(1) => Ok(true),
@@ -196,13 +132,6 @@
apply_debug_policy(fdt, dp)?;
}
- // Handles ramdump in the debug policy
- if is_ramdump_enabled(fdt)? {
- info!("ramdump is enabled by debug policy");
- } else {
- disable_ramdump(fdt)?;
- }
-
// Handles console output in the debug policy
if is_console_output_enabled(fdt)? {
enable_console_output(fdt)?;
diff --git a/pvmfw/src/dice.rs b/pvmfw/src/dice.rs
index 3ceb8ef..bad3453 100644
--- a/pvmfw/src/dice.rs
+++ b/pvmfw/src/dice.rs
@@ -14,6 +14,7 @@
//! Support for DICE derivation and BCC generation.
+use crate::cstr;
use crate::helpers::flushed_zeroize;
use core::ffi::c_void;
use core::ffi::CStr;
@@ -60,10 +61,9 @@
self,
salt: &[u8; HIDDEN_SIZE],
) -> diced_open_dice::Result<InputValues> {
- let component_name = CStr::from_bytes_with_nul(b"vm_entry\0").unwrap();
let mut config_descriptor_buffer = [0; 128];
let config_descriptor_size = bcc_format_config_descriptor(
- Some(component_name),
+ Some(cstr!("vm_entry")),
None, // component_version
false, // resettable
&mut config_descriptor_buffer,
diff --git a/pvmfw/src/entry.rs b/pvmfw/src/entry.rs
index 106a4ef..8219882 100644
--- a/pvmfw/src/entry.rs
+++ b/pvmfw/src/entry.rs
@@ -109,37 +109,17 @@
RebootReason::InvalidFdt
})?;
+ let info = fdt::sanitize_device_tree(fdt)?;
debug!("Fdt passed validation!");
- let memory_range = fdt
- .memory()
- .map_err(|e| {
- error!("Failed to get /memory from the DT: {e}");
- RebootReason::InvalidFdt
- })?
- .ok_or_else(|| {
- error!("Node /memory was found empty");
- RebootReason::InvalidFdt
- })?
- .next()
- .ok_or_else(|| {
- error!("Failed to read the memory size from the FDT");
- RebootReason::InternalError
- })?;
-
+ let memory_range = info.memory_range;
debug!("Resizing MemoryTracker to range {memory_range:#x?}");
-
memory.shrink(&memory_range).map_err(|_| {
error!("Failed to use memory range value from DT: {memory_range:#x?}");
RebootReason::InvalidFdt
})?;
- let kernel_range = fdt::kernel_range(fdt).map_err(|e| {
- error!("Error while attempting to read the kernel range from the DT: {e}");
- RebootReason::InvalidFdt
- })?;
-
- let kernel_range = if let Some(r) = kernel_range {
+ let kernel_range = if let Some(r) = info.kernel_range {
memory.alloc_range(&r).map_err(|e| {
error!("Failed to obtain the kernel range with DT range: {e}");
RebootReason::InternalError
@@ -165,12 +145,7 @@
let kernel =
unsafe { slice::from_raw_parts(kernel_range.start as *const u8, kernel_range.len()) };
- let ramdisk_range = fdt::initrd_range(fdt).map_err(|e| {
- error!("An error occurred while locating the ramdisk in the device tree: {e}");
- RebootReason::InternalError
- })?;
-
- let ramdisk = if let Some(r) = ramdisk_range {
+ let ramdisk = if let Some(r) = info.initrd_range {
debug!("Located ramdisk at {r:?}");
let r = memory.alloc_range(&r).map_err(|e| {
error!("Failed to obtain the initrd range: {e}");
diff --git a/pvmfw/src/fdt.rs b/pvmfw/src/fdt.rs
index 793eaac..7d88455 100644
--- a/pvmfw/src/fdt.rs
+++ b/pvmfw/src/fdt.rs
@@ -14,18 +14,37 @@
//! High-level FDT functions.
+use crate::cstr;
+use crate::helpers::flatten;
+use crate::helpers::GUEST_PAGE_SIZE;
+use crate::helpers::SIZE_4KB;
+use crate::memory::BASE_ADDR;
+use crate::memory::MAX_ADDR;
+use crate::RebootReason;
+use alloc::ffi::CString;
+use core::cmp::max;
+use core::cmp::min;
use core::ffi::CStr;
+use core::mem::size_of;
use core::ops::Range;
+use fdtpci::PciMemoryFlags;
+use fdtpci::PciRangeType;
+use libfdt::AddressRange;
+use libfdt::CellIterator;
use libfdt::Fdt;
use libfdt::FdtError;
+use libfdt::FdtNode;
+use log::debug;
+use log::error;
+use tinyvec::ArrayVec;
-/// Extract from /config the address range containing the pre-loaded kernel.
-pub fn kernel_range(fdt: &libfdt::Fdt) -> libfdt::Result<Option<Range<usize>>> {
- let config = CStr::from_bytes_with_nul(b"/config\0").unwrap();
- let addr = CStr::from_bytes_with_nul(b"kernel-address\0").unwrap();
- let size = CStr::from_bytes_with_nul(b"kernel-size\0").unwrap();
+/// Extract from /config the address range containing the pre-loaded kernel. Absence of /config is
+/// not an error.
+fn read_kernel_range_from(fdt: &Fdt) -> libfdt::Result<Option<Range<usize>>> {
+ let addr = cstr!("kernel-address");
+ let size = cstr!("kernel-size");
- if let Some(config) = fdt.node(config)? {
+ if let Some(config) = fdt.node(cstr!("/config"))? {
if let (Some(addr), Some(size)) = (config.getprop_u32(addr)?, config.getprop_u32(size)?) {
let addr = addr as usize;
let size = size as usize;
@@ -37,10 +56,11 @@
Ok(None)
}
-/// Extract from /chosen the address range containing the pre-loaded ramdisk.
-pub fn initrd_range(fdt: &libfdt::Fdt) -> libfdt::Result<Option<Range<usize>>> {
- let start = CStr::from_bytes_with_nul(b"linux,initrd-start\0").unwrap();
- let end = CStr::from_bytes_with_nul(b"linux,initrd-end\0").unwrap();
+/// Extract from /chosen the address range containing the pre-loaded ramdisk. Absence is not an
+/// error as there can be initrd-less VM.
+fn read_initrd_range_from(fdt: &Fdt) -> libfdt::Result<Option<Range<usize>>> {
+ let start = cstr!("linux,initrd-start");
+ let end = cstr!("linux,initrd-end");
if let Some(chosen) = fdt.chosen()? {
if let (Some(start), Some(end)) = (chosen.getprop_u32(start)?, chosen.getprop_u32(end)?) {
@@ -51,6 +71,601 @@
Ok(None)
}
+fn patch_initrd_range(fdt: &mut Fdt, initrd_range: &Range<usize>) -> libfdt::Result<()> {
+ let start = u32::try_from(initrd_range.start).unwrap();
+ let end = u32::try_from(initrd_range.end).unwrap();
+
+ let mut node = fdt.chosen_mut()?.ok_or(FdtError::NotFound)?;
+ node.setprop(cstr!("linux,initrd-start"), &start.to_be_bytes())?;
+ node.setprop(cstr!("linux,initrd-end"), &end.to_be_bytes())?;
+ Ok(())
+}
+
+fn read_bootargs_from(fdt: &Fdt) -> libfdt::Result<Option<CString>> {
+ if let Some(chosen) = fdt.chosen()? {
+ if let Some(bootargs) = chosen.getprop_str(cstr!("bootargs"))? {
+ // We need to copy the string to heap because the original fdt will be invalidated
+ // by the templated DT
+ let copy = CString::new(bootargs.to_bytes()).map_err(|_| FdtError::BadValue)?;
+ return Ok(Some(copy));
+ }
+ }
+ Ok(None)
+}
+
+fn patch_bootargs(fdt: &mut Fdt, bootargs: &CStr) -> libfdt::Result<()> {
+ let mut node = fdt.chosen_mut()?.ok_or(FdtError::NotFound)?;
+ // TODO(b/275306568) filter out dangerous options
+ node.setprop(cstr!("bootargs"), bootargs.to_bytes_with_nul())
+}
+
+/// Read the first range in /memory node in DT
+fn read_memory_range_from(fdt: &Fdt) -> libfdt::Result<Range<usize>> {
+ fdt.memory()?.ok_or(FdtError::NotFound)?.next().ok_or(FdtError::NotFound)
+}
+
+/// Check if memory range is ok
+fn validate_memory_range(range: &Range<usize>) -> Result<(), RebootReason> {
+ let base = range.start;
+ if base != BASE_ADDR {
+ error!("Memory base address {:#x} is not {:#x}", base, BASE_ADDR);
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ let size = range.len();
+ if size % GUEST_PAGE_SIZE != 0 {
+ error!("Memory size {:#x} is not a multiple of page size {:#x}", size, GUEST_PAGE_SIZE);
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ if size == 0 {
+ error!("Memory size is 0");
+ return Err(RebootReason::InvalidFdt);
+ }
+ Ok(())
+}
+
+fn patch_memory_range(fdt: &mut Fdt, memory_range: &Range<usize>) -> libfdt::Result<()> {
+ let size = memory_range.len() as u64;
+ fdt.node_mut(cstr!("/memory"))?
+ .ok_or(FdtError::NotFound)?
+ .setprop_inplace(cstr!("reg"), flatten(&[BASE_ADDR.to_be_bytes(), size.to_be_bytes()]))
+}
+
+/// Read the number of CPUs from DT
+fn read_num_cpus_from(fdt: &Fdt) -> libfdt::Result<usize> {
+ Ok(fdt.compatible_nodes(cstr!("arm,arm-v8"))?.count())
+}
+
+/// Validate number of CPUs
+fn validate_num_cpus(num_cpus: usize) -> Result<(), RebootReason> {
+ if num_cpus == 0 {
+ error!("Number of CPU can't be 0");
+ return Err(RebootReason::InvalidFdt);
+ }
+ if DeviceTreeInfo::GIC_REDIST_SIZE_PER_CPU.checked_mul(num_cpus.try_into().unwrap()).is_none() {
+ error!("Too many CPUs for gic: {}", num_cpus);
+ return Err(RebootReason::InvalidFdt);
+ }
+ Ok(())
+}
+
+/// Patch DT by keeping `num_cpus` number of arm,arm-v8 compatible nodes, and pruning the rest.
+fn patch_num_cpus(fdt: &mut Fdt, num_cpus: usize) -> libfdt::Result<()> {
+ let cpu = cstr!("arm,arm-v8");
+ let mut next = fdt.root_mut()?.next_compatible(cpu)?;
+ for _ in 0..num_cpus {
+ next = if let Some(current) = next {
+ current.next_compatible(cpu)?
+ } else {
+ return Err(FdtError::NoSpace);
+ };
+ }
+ while let Some(current) = next {
+ next = current.delete_and_next_compatible(cpu)?;
+ }
+ Ok(())
+}
+
+#[derive(Debug)]
+struct PciInfo {
+ ranges: [PciAddrRange; 2],
+ irq_masks: ArrayVec<[PciIrqMask; PciInfo::MAX_IRQS]>,
+ irq_maps: ArrayVec<[PciIrqMap; PciInfo::MAX_IRQS]>,
+}
+
+impl PciInfo {
+ const IRQ_MASK_CELLS: usize = 4;
+ const IRQ_MAP_CELLS: usize = 10;
+ const MAX_IRQS: usize = 8;
+}
+
+type PciAddrRange = AddressRange<(u32, u64), u64, u64>;
+type PciIrqMask = [u32; PciInfo::IRQ_MASK_CELLS];
+type PciIrqMap = [u32; PciInfo::IRQ_MAP_CELLS];
+
+/// Iterator that takes N cells as a chunk
+struct CellChunkIterator<'a, const N: usize> {
+ cells: CellIterator<'a>,
+}
+
+impl<'a, const N: usize> CellChunkIterator<'a, N> {
+ fn new(cells: CellIterator<'a>) -> Self {
+ Self { cells }
+ }
+}
+
+impl<'a, const N: usize> Iterator for CellChunkIterator<'a, N> {
+ type Item = [u32; N];
+ fn next(&mut self) -> Option<Self::Item> {
+ let mut ret: Self::Item = [0; N];
+ for i in ret.iter_mut() {
+ *i = self.cells.next()?;
+ }
+ Some(ret)
+ }
+}
+
+/// Read pci host controller ranges, irq maps, and irq map masks from DT
+fn read_pci_info_from(fdt: &Fdt) -> libfdt::Result<PciInfo> {
+ let node =
+ fdt.compatible_nodes(cstr!("pci-host-cam-generic"))?.next().ok_or(FdtError::NotFound)?;
+
+ let mut ranges = node.ranges::<(u32, u64), u64, u64>()?.ok_or(FdtError::NotFound)?;
+ let range0 = ranges.next().ok_or(FdtError::NotFound)?;
+ let range1 = ranges.next().ok_or(FdtError::NotFound)?;
+
+ let irq_masks = node.getprop_cells(cstr!("interrupt-map-mask"))?.ok_or(FdtError::NotFound)?;
+ let irq_masks = CellChunkIterator::<{ PciInfo::IRQ_MASK_CELLS }>::new(irq_masks);
+ let irq_masks: ArrayVec<[PciIrqMask; PciInfo::MAX_IRQS]> =
+ irq_masks.take(PciInfo::MAX_IRQS).collect();
+
+ let irq_maps = node.getprop_cells(cstr!("interrupt-map"))?.ok_or(FdtError::NotFound)?;
+ let irq_maps = CellChunkIterator::<{ PciInfo::IRQ_MAP_CELLS }>::new(irq_maps);
+ let irq_maps: ArrayVec<[PciIrqMap; PciInfo::MAX_IRQS]> =
+ irq_maps.take(PciInfo::MAX_IRQS).collect();
+
+ Ok(PciInfo { ranges: [range0, range1], irq_masks, irq_maps })
+}
+
+fn validate_pci_info(pci_info: &PciInfo, memory_range: &Range<usize>) -> Result<(), RebootReason> {
+ for range in pci_info.ranges.iter() {
+ validate_pci_addr_range(range, memory_range)?;
+ }
+ for irq_mask in pci_info.irq_masks.iter() {
+ validate_pci_irq_mask(irq_mask)?;
+ }
+ for (idx, irq_map) in pci_info.irq_maps.iter().enumerate() {
+ validate_pci_irq_map(irq_map, idx)?;
+ }
+ Ok(())
+}
+
+fn validate_pci_addr_range(
+ range: &PciAddrRange,
+ memory_range: &Range<usize>,
+) -> Result<(), RebootReason> {
+ let mem_flags = PciMemoryFlags(range.addr.0);
+ let range_type = mem_flags.range_type();
+ let prefetchable = mem_flags.prefetchable();
+ let bus_addr = range.addr.1;
+ let cpu_addr = range.parent_addr;
+ let size = range.size;
+
+ if range_type != PciRangeType::Memory64 {
+ error!("Invalid range type {:?} for bus address {:#x} in PCI node", range_type, bus_addr);
+ return Err(RebootReason::InvalidFdt);
+ }
+ if prefetchable {
+ error!("PCI bus address {:#x} in PCI node is prefetchable", bus_addr);
+ return Err(RebootReason::InvalidFdt);
+ }
+ // Enforce ID bus-to-cpu mappings, as used by crosvm.
+ if bus_addr != cpu_addr {
+ error!("PCI bus address: {:#x} is different from CPU address: {:#x}", bus_addr, cpu_addr);
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ let Some(bus_end) = bus_addr.checked_add(size) else {
+ error!("PCI address range size {:#x} overflows", size);
+ return Err(RebootReason::InvalidFdt);
+ };
+ if bus_end > MAX_ADDR.try_into().unwrap() {
+ error!("PCI address end {:#x} is outside of translatable range", bus_end);
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ let memory_start = memory_range.start.try_into().unwrap();
+ let memory_end = memory_range.end.try_into().unwrap();
+
+ if max(bus_addr, memory_start) < min(bus_end, memory_end) {
+ error!(
+ "PCI address range {:#x}-{:#x} overlaps with main memory range {:#x}-{:#x}",
+ bus_addr, bus_end, memory_start, memory_end
+ );
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ Ok(())
+}
+
+fn validate_pci_irq_mask(irq_mask: &PciIrqMask) -> Result<(), RebootReason> {
+ const IRQ_MASK_ADDR_HI: u32 = 0xf800;
+ const IRQ_MASK_ADDR_ME: u32 = 0x0;
+ const IRQ_MASK_ADDR_LO: u32 = 0x0;
+ const IRQ_MASK_ANY_IRQ: u32 = 0x7;
+ const EXPECTED: PciIrqMask =
+ [IRQ_MASK_ADDR_HI, IRQ_MASK_ADDR_ME, IRQ_MASK_ADDR_LO, IRQ_MASK_ANY_IRQ];
+ if *irq_mask != EXPECTED {
+ error!("Invalid PCI irq mask {:#?}", irq_mask);
+ return Err(RebootReason::InvalidFdt);
+ }
+ Ok(())
+}
+
+fn validate_pci_irq_map(irq_map: &PciIrqMap, idx: usize) -> Result<(), RebootReason> {
+ const PCI_DEVICE_IDX: usize = 11;
+ const PCI_IRQ_ADDR_ME: u32 = 0;
+ const PCI_IRQ_ADDR_LO: u32 = 0;
+ const PCI_IRQ_INTC: u32 = 1;
+ const AARCH64_IRQ_BASE: u32 = 4; // from external/crosvm/aarch64/src/lib.rs
+ const GIC_SPI: u32 = 0;
+ const IRQ_TYPE_LEVEL_HIGH: u32 = 4;
+
+ let pci_addr = (irq_map[0], irq_map[1], irq_map[2]);
+ let pci_irq_number = irq_map[3];
+ let _controller_phandle = irq_map[4]; // skipped.
+ let gic_addr = (irq_map[5], irq_map[6]); // address-cells is <2> for GIC
+ // interrupt-cells is <3> for GIC
+ let gic_peripheral_interrupt_type = irq_map[7];
+ let gic_irq_number = irq_map[8];
+ let gic_irq_type = irq_map[9];
+
+ let phys_hi: u32 = (0x1 << PCI_DEVICE_IDX) * (idx + 1) as u32;
+ let expected_pci_addr = (phys_hi, PCI_IRQ_ADDR_ME, PCI_IRQ_ADDR_LO);
+
+ if pci_addr != expected_pci_addr {
+ error!("PCI device address {:#x} {:#x} {:#x} in interrupt-map is different from expected address \
+ {:#x} {:#x} {:#x}",
+ pci_addr.0, pci_addr.1, pci_addr.2, expected_pci_addr.0, expected_pci_addr.1, expected_pci_addr.2);
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ if pci_irq_number != PCI_IRQ_INTC {
+ error!(
+ "PCI INT# {:#x} in interrupt-map is different from expected value {:#x}",
+ pci_irq_number, PCI_IRQ_INTC
+ );
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ if gic_addr != (0, 0) {
+ error!(
+ "GIC address {:#x} {:#x} in interrupt-map is different from expected address \
+ {:#x} {:#x}",
+ gic_addr.0, gic_addr.1, 0, 0
+ );
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ if gic_peripheral_interrupt_type != GIC_SPI {
+ error!("GIC peripheral interrupt type {:#x} in interrupt-map is different from expected value \
+ {:#x}", gic_peripheral_interrupt_type, GIC_SPI);
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ let irq_nr: u32 = AARCH64_IRQ_BASE + (idx as u32);
+ if gic_irq_number != irq_nr {
+ error!(
+ "GIC irq number {:#x} in interrupt-map is unexpected. Expected {:#x}",
+ gic_irq_number, irq_nr
+ );
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ if gic_irq_type != IRQ_TYPE_LEVEL_HIGH {
+ error!(
+ "IRQ type in {:#x} is invalid. Must be LEVEL_HIGH {:#x}",
+ gic_irq_type, IRQ_TYPE_LEVEL_HIGH
+ );
+ return Err(RebootReason::InvalidFdt);
+ }
+ Ok(())
+}
+
+fn patch_pci_info(fdt: &mut Fdt, pci_info: &PciInfo) -> libfdt::Result<()> {
+ let mut node = fdt
+ .root_mut()?
+ .next_compatible(cstr!("pci-host-cam-generic"))?
+ .ok_or(FdtError::NotFound)?;
+
+ let irq_masks_size = pci_info.irq_masks.len() * size_of::<PciIrqMask>();
+ node.trimprop(cstr!("interrupt-map-mask"), irq_masks_size)?;
+
+ let irq_maps_size = pci_info.irq_maps.len() * size_of::<PciIrqMap>();
+ node.trimprop(cstr!("interrupt-map"), irq_maps_size)?;
+
+ node.setprop_inplace(
+ cstr!("ranges"),
+ flatten(&[pci_info.ranges[0].to_cells(), pci_info.ranges[1].to_cells()]),
+ )
+}
+
+#[derive(Default, Debug)]
+struct SerialInfo {
+ addrs: ArrayVec<[u64; Self::MAX_SERIALS]>,
+}
+
+impl SerialInfo {
+ const MAX_SERIALS: usize = 4;
+}
+
+fn read_serial_info_from(fdt: &Fdt) -> libfdt::Result<SerialInfo> {
+ let mut addrs: ArrayVec<[u64; SerialInfo::MAX_SERIALS]> = Default::default();
+ for node in fdt.compatible_nodes(cstr!("ns16550a"))?.take(SerialInfo::MAX_SERIALS) {
+ let reg = node.reg()?.ok_or(FdtError::NotFound)?.next().ok_or(FdtError::NotFound)?;
+ addrs.push(reg.addr);
+ }
+ Ok(SerialInfo { addrs })
+}
+
+/// Patch the DT by deleting the ns16550a compatible nodes whose address are unknown
+fn patch_serial_info(fdt: &mut Fdt, serial_info: &SerialInfo) -> libfdt::Result<()> {
+ let name = cstr!("ns16550a");
+ let mut next = fdt.root_mut()?.next_compatible(name);
+ while let Some(current) = next? {
+ let reg = FdtNode::from_mut(¤t)
+ .reg()?
+ .ok_or(FdtError::NotFound)?
+ .next()
+ .ok_or(FdtError::NotFound)?;
+ next = if !serial_info.addrs.contains(®.addr) {
+ current.delete_and_next_compatible(name)
+ } else {
+ current.next_compatible(name)
+ }
+ }
+ Ok(())
+}
+
+#[derive(Debug)]
+struct SwiotlbInfo {
+ size: u64,
+ align: u64,
+}
+
+fn read_swiotlb_info_from(fdt: &Fdt) -> libfdt::Result<SwiotlbInfo> {
+ let node =
+ fdt.compatible_nodes(cstr!("restricted-dma-pool"))?.next().ok_or(FdtError::NotFound)?;
+ let size = node.getprop_u64(cstr!("size"))?.ok_or(FdtError::NotFound)?;
+ let align = node.getprop_u64(cstr!("alignment"))?.ok_or(FdtError::NotFound)?;
+ Ok(SwiotlbInfo { size, align })
+}
+
+fn validate_swiotlb_info(swiotlb_info: &SwiotlbInfo) -> Result<(), RebootReason> {
+ let size = swiotlb_info.size;
+ let align = swiotlb_info.align;
+
+ if size == 0 || (size % GUEST_PAGE_SIZE as u64) != 0 {
+ error!("Invalid swiotlb size {:#x}", size);
+ return Err(RebootReason::InvalidFdt);
+ }
+
+ if (align % GUEST_PAGE_SIZE as u64) != 0 {
+ error!("Invalid swiotlb alignment {:#x}", align);
+ return Err(RebootReason::InvalidFdt);
+ }
+ Ok(())
+}
+
+fn patch_swiotlb_info(fdt: &mut Fdt, swiotlb_info: &SwiotlbInfo) -> libfdt::Result<()> {
+ let mut node =
+ fdt.root_mut()?.next_compatible(cstr!("restricted-dma-pool"))?.ok_or(FdtError::NotFound)?;
+ node.setprop_inplace(cstr!("size"), &swiotlb_info.size.to_be_bytes())?;
+ node.setprop_inplace(cstr!("alignment"), &swiotlb_info.align.to_be_bytes())?;
+ Ok(())
+}
+
+fn patch_gic(fdt: &mut Fdt, num_cpus: usize) -> libfdt::Result<()> {
+ let node = fdt.compatible_nodes(cstr!("arm,gic-v3"))?.next().ok_or(FdtError::NotFound)?;
+ let mut ranges = node.reg()?.ok_or(FdtError::NotFound)?;
+ let range0 = ranges.next().ok_or(FdtError::NotFound)?;
+ let mut range1 = ranges.next().ok_or(FdtError::NotFound)?;
+
+ let addr = range0.addr;
+ // SAFETY - doesn't overflow. checked in validate_num_cpus
+ let size: u64 =
+ DeviceTreeInfo::GIC_REDIST_SIZE_PER_CPU.checked_mul(num_cpus.try_into().unwrap()).unwrap();
+
+ // range1 is just below range0
+ range1.addr = addr - size;
+ range1.size = Some(size);
+
+ let range0 = range0.to_cells();
+ let range1 = range1.to_cells();
+ let value = [
+ range0.0, // addr
+ range0.1.unwrap(), //size
+ range1.0, // addr
+ range1.1.unwrap(), //size
+ ];
+
+ let mut node =
+ fdt.root_mut()?.next_compatible(cstr!("arm,gic-v3"))?.ok_or(FdtError::NotFound)?;
+ node.setprop_inplace(cstr!("reg"), flatten(&value))
+}
+
+fn patch_timer(fdt: &mut Fdt, num_cpus: usize) -> libfdt::Result<()> {
+ const NUM_INTERRUPTS: usize = 4;
+ const CELLS_PER_INTERRUPT: usize = 3;
+ let node = fdt.compatible_nodes(cstr!("arm,armv8-timer"))?.next().ok_or(FdtError::NotFound)?;
+ let interrupts = node.getprop_cells(cstr!("interrupts"))?.ok_or(FdtError::NotFound)?;
+ let mut value: ArrayVec<[u32; NUM_INTERRUPTS * CELLS_PER_INTERRUPT]> =
+ interrupts.take(NUM_INTERRUPTS * CELLS_PER_INTERRUPT).collect();
+
+ let num_cpus: u32 = num_cpus.try_into().unwrap();
+ let cpu_mask: u32 = (((0x1 << num_cpus) - 1) & 0xff) << 8;
+ for v in value.iter_mut().skip(2).step_by(CELLS_PER_INTERRUPT) {
+ *v |= cpu_mask;
+ }
+ for v in value.iter_mut() {
+ *v = v.to_be();
+ }
+
+ // SAFETY - array size is the same
+ let value = unsafe {
+ core::mem::transmute::<
+ [u32; NUM_INTERRUPTS * CELLS_PER_INTERRUPT],
+ [u8; NUM_INTERRUPTS * CELLS_PER_INTERRUPT * size_of::<u32>()],
+ >(value.into_inner())
+ };
+
+ let mut node =
+ fdt.root_mut()?.next_compatible(cstr!("arm,armv8-timer"))?.ok_or(FdtError::NotFound)?;
+ node.setprop_inplace(cstr!("interrupts"), value.as_slice())
+}
+
+#[derive(Debug)]
+pub struct DeviceTreeInfo {
+ pub kernel_range: Option<Range<usize>>,
+ pub initrd_range: Option<Range<usize>>,
+ pub memory_range: Range<usize>,
+ bootargs: Option<CString>,
+ num_cpus: usize,
+ pci_info: PciInfo,
+ serial_info: SerialInfo,
+ swiotlb_info: SwiotlbInfo,
+}
+
+impl DeviceTreeInfo {
+ const GIC_REDIST_SIZE_PER_CPU: u64 = (32 * SIZE_4KB) as u64;
+}
+
+pub fn sanitize_device_tree(fdt: &mut Fdt) -> Result<DeviceTreeInfo, RebootReason> {
+ let info = parse_device_tree(fdt)?;
+ debug!("Device tree info: {:?}", info);
+
+ fdt.copy_from_slice(pvmfw_fdt_template::RAW).map_err(|e| {
+ error!("Failed to instantiate FDT from the template DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ patch_device_tree(fdt, &info)?;
+ Ok(info)
+}
+
+fn parse_device_tree(fdt: &libfdt::Fdt) -> Result<DeviceTreeInfo, RebootReason> {
+ let kernel_range = read_kernel_range_from(fdt).map_err(|e| {
+ error!("Failed to read kernel range from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ let initrd_range = read_initrd_range_from(fdt).map_err(|e| {
+ error!("Failed to read initrd range from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ let memory_range = read_memory_range_from(fdt).map_err(|e| {
+ error!("Failed to read memory range from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ validate_memory_range(&memory_range)?;
+
+ let bootargs = read_bootargs_from(fdt).map_err(|e| {
+ error!("Failed to read bootargs from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ let num_cpus = read_num_cpus_from(fdt).map_err(|e| {
+ error!("Failed to read num cpus from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ validate_num_cpus(num_cpus)?;
+
+ let pci_info = read_pci_info_from(fdt).map_err(|e| {
+ error!("Failed to read pci info from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ validate_pci_info(&pci_info, &memory_range)?;
+
+ let serial_info = read_serial_info_from(fdt).map_err(|e| {
+ error!("Failed to read serial info from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ let swiotlb_info = read_swiotlb_info_from(fdt).map_err(|e| {
+ error!("Failed to read swiotlb info from DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ validate_swiotlb_info(&swiotlb_info)?;
+
+ Ok(DeviceTreeInfo {
+ kernel_range,
+ initrd_range,
+ memory_range,
+ bootargs,
+ num_cpus,
+ pci_info,
+ serial_info,
+ swiotlb_info,
+ })
+}
+
+fn patch_device_tree(fdt: &mut Fdt, info: &DeviceTreeInfo) -> Result<(), RebootReason> {
+ fdt.unpack().map_err(|e| {
+ error!("Failed to unpack DT for patching: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ if let Some(initrd_range) = &info.initrd_range {
+ patch_initrd_range(fdt, initrd_range).map_err(|e| {
+ error!("Failed to patch initrd range to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ }
+ patch_memory_range(fdt, &info.memory_range).map_err(|e| {
+ error!("Failed to patch memory range to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ if let Some(bootargs) = &info.bootargs {
+ patch_bootargs(fdt, bootargs.as_c_str()).map_err(|e| {
+ error!("Failed to patch bootargs to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ }
+ patch_num_cpus(fdt, info.num_cpus).map_err(|e| {
+ error!("Failed to patch cpus to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ patch_pci_info(fdt, &info.pci_info).map_err(|e| {
+ error!("Failed to patch pci info to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ patch_serial_info(fdt, &info.serial_info).map_err(|e| {
+ error!("Failed to patch serial info to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ patch_swiotlb_info(fdt, &info.swiotlb_info).map_err(|e| {
+ error!("Failed to patch swiotlb info to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ patch_gic(fdt, info.num_cpus).map_err(|e| {
+ error!("Failed to patch gic info to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+ patch_timer(fdt, info.num_cpus).map_err(|e| {
+ error!("Failed to patch timer info to DT: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ fdt.pack().map_err(|e| {
+ error!("Failed to pack DT after patching: {e}");
+ RebootReason::InvalidFdt
+ })?;
+
+ Ok(())
+}
+
/// Modifies the input DT according to the fields of the configuration.
pub fn modify_for_next_stage(
fdt: &mut Fdt,
@@ -60,46 +675,27 @@
) -> libfdt::Result<()> {
fdt.unpack()?;
- add_dice_node(fdt, bcc.as_ptr() as usize, bcc.len())?;
+ patch_dice_node(fdt, bcc.as_ptr() as usize, bcc.len())?;
- set_or_clear_chosen_flag(
- fdt,
- CStr::from_bytes_with_nul(b"avf,strict-boot\0").unwrap(),
- strict_boot,
- )?;
- set_or_clear_chosen_flag(
- fdt,
- CStr::from_bytes_with_nul(b"avf,new-instance\0").unwrap(),
- new_instance,
- )?;
+ set_or_clear_chosen_flag(fdt, cstr!("avf,strict-boot"), strict_boot)?;
+ set_or_clear_chosen_flag(fdt, cstr!("avf,new-instance"), new_instance)?;
fdt.pack()?;
Ok(())
}
-/// Add a "google,open-dice"-compatible reserved-memory node to the tree.
-fn add_dice_node(fdt: &mut Fdt, addr: usize, size: usize) -> libfdt::Result<()> {
- let reserved_memory = CStr::from_bytes_with_nul(b"/reserved-memory\0").unwrap();
+/// Patch the "google,open-dice"-compatible reserved-memory node to point to the bcc range
+fn patch_dice_node(fdt: &mut Fdt, addr: usize, size: usize) -> libfdt::Result<()> {
// We reject DTs with missing reserved-memory node as validation should have checked that the
// "swiotlb" subnode (compatible = "restricted-dma-pool") was present.
- let mut reserved_memory = fdt.node_mut(reserved_memory)?.ok_or(libfdt::FdtError::NotFound)?;
+ let node = fdt.node_mut(cstr!("/reserved-memory"))?.ok_or(libfdt::FdtError::NotFound)?;
- let dice = CStr::from_bytes_with_nul(b"dice\0").unwrap();
- let mut dice = reserved_memory.add_subnode(dice)?;
+ let mut node = node.next_compatible(cstr!("google,open-dice"))?.ok_or(FdtError::NotFound)?;
- let compatible = CStr::from_bytes_with_nul(b"compatible\0").unwrap();
- dice.appendprop(compatible, b"google,open-dice\0")?;
-
- let no_map = CStr::from_bytes_with_nul(b"no-map\0").unwrap();
- dice.appendprop(no_map, &[])?;
-
- let addr = addr.try_into().unwrap();
- let size = size.try_into().unwrap();
- let reg = CStr::from_bytes_with_nul(b"reg\0").unwrap();
- dice.appendprop_addrrange(reg, addr, size)?;
-
- Ok(())
+ let addr: u64 = addr.try_into().unwrap();
+ let size: u64 = size.try_into().unwrap();
+ node.setprop_inplace(cstr!("reg"), flatten(&[addr.to_be_bytes(), size.to_be_bytes()]))
}
fn set_or_clear_chosen_flag(fdt: &mut Fdt, flag: &CStr, value: bool) -> libfdt::Result<()> {
diff --git a/pvmfw/src/heap.rs b/pvmfw/src/heap.rs
index 435a6ff..eea2e98 100644
--- a/pvmfw/src/heap.rs
+++ b/pvmfw/src/heap.rs
@@ -53,7 +53,15 @@
#[no_mangle]
unsafe extern "C" fn malloc(size: usize) -> *mut c_void {
- malloc_(size).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
+ malloc_(size, false).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
+}
+
+#[no_mangle]
+unsafe extern "C" fn calloc(nmemb: usize, size: usize) -> *mut c_void {
+ let Some(size) = nmemb.checked_mul(size) else {
+ return ptr::null_mut()
+ };
+ malloc_(size, true).map_or(ptr::null_mut(), |p| p.cast::<c_void>().as_ptr())
}
#[no_mangle]
@@ -67,9 +75,11 @@
}
}
-unsafe fn malloc_(size: usize) -> Option<NonNull<usize>> {
+unsafe fn malloc_(size: usize, zeroed: bool) -> Option<NonNull<usize>> {
let size = NonZeroUsize::new(size)?.checked_add(mem::size_of::<usize>())?;
- let ptr = HEAP_ALLOCATOR.alloc(malloc_layout(size)?);
+ let layout = malloc_layout(size)?;
+ let ptr =
+ if zeroed { HEAP_ALLOCATOR.alloc_zeroed(layout) } else { HEAP_ALLOCATOR.alloc(layout) };
let ptr = NonNull::new(ptr)?.cast::<usize>().as_ptr();
*ptr = size.get();
NonNull::new(ptr.offset(1))
diff --git a/pvmfw/src/helpers.rs b/pvmfw/src/helpers.rs
index 9560d8d..6310826 100644
--- a/pvmfw/src/helpers.rs
+++ b/pvmfw/src/helpers.rs
@@ -128,7 +128,13 @@
for line in (start..end).step_by(line_size) {
// SAFETY - Clearing cache lines shouldn't have Rust-visible side effects.
- unsafe { asm!("dc cvau, {x}", x = in(reg) line) }
+ unsafe {
+ asm!(
+ "dc cvau, {x}",
+ x = in(reg) line,
+ options(nomem, nostack, preserves_flags),
+ )
+ }
}
}
@@ -144,3 +150,20 @@
reg.zeroize();
flush(reg)
}
+
+/// Flatten [[T; N]] into &[T]
+/// TODO: use slice::flatten when it graduates from experimental
+pub fn flatten<T, const N: usize>(original: &[[T; N]]) -> &[T] {
+ // SAFETY: no overflow because original (whose size is len()*N) is already in memory
+ let len = original.len() * N;
+ // SAFETY: [T] has the same layout as [T;N]
+ unsafe { core::slice::from_raw_parts(original.as_ptr().cast(), len) }
+}
+
+/// Create &CStr out of &str literal
+#[macro_export]
+macro_rules! cstr {
+ ($str:literal) => {{
+ CStr::from_bytes_with_nul(concat!($str, "\0").as_bytes()).unwrap()
+ }};
+}
diff --git a/pvmfw/src/hvc.rs b/pvmfw/src/hvc.rs
index 319ff9d..08edd86 100644
--- a/pvmfw/src/hvc.rs
+++ b/pvmfw/src/hvc.rs
@@ -38,14 +38,14 @@
/// Queries the memory protection parameters for a protected virtual machine.
///
/// Returns the memory protection granule size in bytes.
-pub fn hyp_meminfo() -> smccc::Result<u64> {
+pub fn kvm_hyp_meminfo() -> smccc::Result<u64> {
let args = [0u64; 17];
checked_hvc64(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO, args)
}
/// Shares a region of memory with the KVM host, granting it read, write and execute permissions.
/// The size of the region is equal to the memory protection granule returned by [`hyp_meminfo`].
-pub fn mem_share(base_ipa: u64) -> smccc::Result<()> {
+pub fn kvm_mem_share(base_ipa: u64) -> smccc::Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
@@ -55,26 +55,26 @@
/// Revokes access permission from the KVM host to a memory region previously shared with
/// [`mem_share`]. The size of the region is equal to the memory protection granule returned by
/// [`hyp_meminfo`].
-pub fn mem_unshare(base_ipa: u64) -> smccc::Result<()> {
+pub fn kvm_mem_unshare(base_ipa: u64) -> smccc::Result<()> {
let mut args = [0u64; 17];
args[0] = base_ipa;
checked_hvc64_expect_zero(ARM_SMCCC_KVM_FUNC_MEM_UNSHARE, args)
}
-pub fn mmio_guard_info() -> smccc::Result<u64> {
+pub fn kvm_mmio_guard_info() -> smccc::Result<u64> {
let args = [0u64; 17];
checked_hvc64(VENDOR_HYP_KVM_MMIO_GUARD_INFO_FUNC_ID, args)
}
-pub fn mmio_guard_enroll() -> smccc::Result<()> {
+pub fn kvm_mmio_guard_enroll() -> smccc::Result<()> {
let args = [0u64; 17];
checked_hvc64_expect_zero(VENDOR_HYP_KVM_MMIO_GUARD_ENROLL_FUNC_ID, args)
}
-pub fn mmio_guard_map(ipa: u64) -> smccc::Result<()> {
+pub fn kvm_mmio_guard_map(ipa: u64) -> smccc::Result<()> {
let mut args = [0u64; 17];
args[0] = ipa;
@@ -94,7 +94,7 @@
}
}
-pub fn mmio_guard_unmap(ipa: u64) -> smccc::Result<()> {
+pub fn kvm_mmio_guard_unmap(ipa: u64) -> smccc::Result<()> {
let mut args = [0u64; 17];
args[0] = ipa;
diff --git a/pvmfw/src/hypervisor.rs b/pvmfw/src/hypervisor.rs
new file mode 100644
index 0000000..e06d809
--- /dev/null
+++ b/pvmfw/src/hypervisor.rs
@@ -0,0 +1,46 @@
+// Copyright 2023, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Wrappers around hypervisor back-ends.
+
+use crate::hvc;
+use crate::smccc;
+
+pub fn hyp_meminfo() -> smccc::Result<u64> {
+ hvc::kvm_hyp_meminfo()
+}
+
+pub fn mem_share(base_ipa: u64) -> smccc::Result<()> {
+ hvc::kvm_mem_share(base_ipa)
+}
+
+pub fn mem_unshare(base_ipa: u64) -> smccc::Result<()> {
+ hvc::kvm_mem_unshare(base_ipa)
+}
+
+pub fn mmio_guard_info() -> smccc::Result<u64> {
+ hvc::kvm_mmio_guard_info()
+}
+
+pub fn mmio_guard_enroll() -> smccc::Result<()> {
+ hvc::kvm_mmio_guard_enroll()
+}
+
+pub fn mmio_guard_map(ipa: u64) -> smccc::Result<()> {
+ hvc::kvm_mmio_guard_map(ipa)
+}
+
+pub fn mmio_guard_unmap(ipa: u64) -> smccc::Result<()> {
+ hvc::kvm_mmio_guard_unmap(ipa)
+}
diff --git a/pvmfw/src/instance.rs b/pvmfw/src/instance.rs
index fbf2040..a974543 100644
--- a/pvmfw/src/instance.rs
+++ b/pvmfw/src/instance.rs
@@ -258,11 +258,11 @@
impl EntryHeader {
fn new(uuid: Uuid, payload_size: usize) -> Self {
- Self { uuid: uuid.as_u128(), payload_size: u64::try_from(payload_size).unwrap().to_le() }
+ Self { uuid: uuid.to_u128_le(), payload_size: u64::try_from(payload_size).unwrap().to_le() }
}
fn uuid(&self) -> Uuid {
- Uuid::from_u128(self.uuid)
+ Uuid::from_u128_le(self.uuid)
}
fn payload_size(&self) -> usize {
diff --git a/pvmfw/src/main.rs b/pvmfw/src/main.rs
index d89e718..00ff61f 100644
--- a/pvmfw/src/main.rs
+++ b/pvmfw/src/main.rs
@@ -16,7 +16,6 @@
#![no_main]
#![no_std]
-#![feature(default_alloc_error_handler)]
extern crate alloc;
@@ -31,6 +30,7 @@
mod heap;
mod helpers;
mod hvc;
+mod hypervisor;
mod instance;
mod memory;
mod mmio_guard;
diff --git a/pvmfw/src/memory.rs b/pvmfw/src/memory.rs
index 86fcd00..b223f82 100644
--- a/pvmfw/src/memory.rs
+++ b/pvmfw/src/memory.rs
@@ -17,7 +17,7 @@
#![deny(unsafe_op_in_unsafe_fn)]
use crate::helpers::{self, align_down, align_up, page_4kb_of, SIZE_4KB};
-use crate::hvc::{hyp_meminfo, mem_share, mem_unshare};
+use crate::hypervisor::{hyp_meminfo, mem_share, mem_unshare};
use crate::mmio_guard;
use crate::mmu;
use crate::smccc;
@@ -35,6 +35,11 @@
use log::error;
use tinyvec::ArrayVec;
+/// Base of the system's contiguous "main" memory.
+pub const BASE_ADDR: usize = 0x8000_0000;
+/// First address that can't be translated by a level 1 TTBR0_EL1.
+pub const MAX_ADDR: usize = 1 << 40;
+
pub type MemoryRange = Range<usize>;
#[derive(Clone, Copy, Debug, Default)]
@@ -129,15 +134,11 @@
impl MemoryTracker {
const CAPACITY: usize = 5;
const MMIO_CAPACITY: usize = 5;
- /// Base of the system's contiguous "main" memory.
- const BASE: usize = 0x8000_0000;
- /// First address that can't be translated by a level 1 TTBR0_EL1.
- const MAX_ADDR: usize = 1 << 39;
/// Create a new instance from an active page table, covering the maximum RAM size.
pub fn new(page_table: mmu::PageTable) -> Self {
Self {
- total: Self::BASE..Self::MAX_ADDR,
+ total: BASE_ADDR..MAX_ADDR,
page_table,
regions: ArrayVec::new(),
mmio_regions: ArrayVec::new(),
diff --git a/pvmfw/src/mmio_guard.rs b/pvmfw/src/mmio_guard.rs
index e5f376e..dac26e0 100644
--- a/pvmfw/src/mmio_guard.rs
+++ b/pvmfw/src/mmio_guard.rs
@@ -15,7 +15,7 @@
//! Safe MMIO_GUARD support.
use crate::helpers;
-use crate::hvc::{mmio_guard_enroll, mmio_guard_info, mmio_guard_map, mmio_guard_unmap};
+use crate::hypervisor::{mmio_guard_enroll, mmio_guard_info, mmio_guard_map, mmio_guard_unmap};
use crate::smccc;
use core::{fmt, result};
diff --git a/pvmfw/src/rand.rs b/pvmfw/src/rand.rs
index a53cac6..bf0edd5 100644
--- a/pvmfw/src/rand.rs
+++ b/pvmfw/src/rand.rs
@@ -52,12 +52,11 @@
fn fill_with_entropy(s: &mut [u8]) -> Result<()> {
const MAX_BYTES_PER_CALL: usize = size_of::<hvc::TrngRng64Entropy>();
- let bits = usize::try_from(u8::BITS).unwrap();
let (aligned, remainder) = s.split_at_mut(s.len() - s.len() % MAX_BYTES_PER_CALL);
for chunk in aligned.chunks_exact_mut(MAX_BYTES_PER_CALL) {
- let (r, s, t) = hvc::trng_rnd64((chunk.len() * bits).try_into().unwrap())?;
+ let (r, s, t) = repeat_trng_rnd(chunk.len())?;
let mut words = chunk.chunks_exact_mut(size_of::<u64>());
words.next().unwrap().clone_from_slice(&t.to_ne_bytes());
@@ -67,7 +66,7 @@
if !remainder.is_empty() {
let mut entropy = [0; MAX_BYTES_PER_CALL];
- let (r, s, t) = hvc::trng_rnd64((remainder.len() * bits).try_into().unwrap())?;
+ let (r, s, t) = repeat_trng_rnd(remainder.len())?;
let mut words = entropy.chunks_exact_mut(size_of::<u64>());
words.next().unwrap().clone_from_slice(&t.to_ne_bytes());
@@ -80,6 +79,17 @@
Ok(())
}
+fn repeat_trng_rnd(n_bytes: usize) -> hvc::trng::Result<hvc::TrngRng64Entropy> {
+ let bits = usize::try_from(u8::BITS).unwrap();
+ let n_bits = (n_bytes * bits).try_into().unwrap();
+ loop {
+ match hvc::trng_rnd64(n_bits) {
+ Err(hvc::trng::Error::NoEntropy) => continue,
+ res => return res,
+ }
+ }
+}
+
pub fn random_array<const N: usize>() -> Result<[u8; N]> {
let mut arr = [0; N];
fill_with_entropy(&mut arr)?;
diff --git a/rialto/src/main.rs b/rialto/src/main.rs
index 3b730f4..59ee0b6 100644
--- a/rialto/src/main.rs
+++ b/rialto/src/main.rs
@@ -16,7 +16,6 @@
#![no_main]
#![no_std]
-#![feature(default_alloc_error_handler)]
mod exceptions;
diff --git a/tests/aidl/Android.bp b/tests/aidl/Android.bp
index d59ca7e..ed4e8ff 100644
--- a/tests/aidl/Android.bp
+++ b/tests/aidl/Android.bp
@@ -6,6 +6,10 @@
name: "com.android.microdroid.testservice",
srcs: ["com/android/microdroid/testservice/**/*.aidl"],
unstable: true,
+ flags: [
+ "-Werror",
+ "-Wno-mixed-oneway",
+ ],
backend: {
java: {
gen_rpc: true,
diff --git a/tests/aidl/com/android/microdroid/testservice/IAppCallback.aidl b/tests/aidl/com/android/microdroid/testservice/IAppCallback.aidl
new file mode 100644
index 0000000..9859090
--- /dev/null
+++ b/tests/aidl/com/android/microdroid/testservice/IAppCallback.aidl
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.microdroid.testservice;
+
+import com.android.microdroid.testservice.IVmCallback;
+
+/**
+ * An interface exposed by the app for callbacks from the VM.
+ *
+ * {@hide}
+ */
+interface IAppCallback {
+ /** Invites the app to call vmCallback#echoMessage() */
+ void setVmCallback(IVmCallback vmCallback);
+
+ /** Asynchronusly called by the VM in response to a call to echoMessage(). */
+ void onEchoRequestReceived(String message);
+}
diff --git a/tests/aidl/com/android/microdroid/testservice/ITestService.aidl b/tests/aidl/com/android/microdroid/testservice/ITestService.aidl
index 66dbe4b..36c3aaf 100644
--- a/tests/aidl/com/android/microdroid/testservice/ITestService.aidl
+++ b/tests/aidl/com/android/microdroid/testservice/ITestService.aidl
@@ -15,7 +15,12 @@
*/
package com.android.microdroid.testservice;
-/** {@hide} */
+import com.android.microdroid.testservice.IAppCallback;
+
+/**
+ * This is the service exposed by the test payload, called by the test app.
+ * {@hide}
+ */
interface ITestService {
const long SERVICE_PORT = 5678;
@@ -62,6 +67,9 @@
/** Returns flags for the given mountPoint. */
int getMountFlags(String mountPoint);
+ /** Requests the VM to asynchronously call appCallback.setVmCallback() */
+ void requestCallback(IAppCallback appCallback);
+
/**
* Request the service to exit, triggering the termination of the VM. This may cause any
* requests in flight to fail.
diff --git a/tests/aidl/com/android/microdroid/testservice/IVmCallback.aidl b/tests/aidl/com/android/microdroid/testservice/IVmCallback.aidl
new file mode 100644
index 0000000..617d184
--- /dev/null
+++ b/tests/aidl/com/android/microdroid/testservice/IVmCallback.aidl
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.microdroid.testservice;
+
+/**
+ * An interface exposed by the VM for callbacks from the app.
+ *
+ * {@hide}
+ */
+interface IVmCallback {
+ /** Requests the VM to asynchronously call the app's onEchoRequestReceived() callback. */
+ void echoMessage(String message);
+}
diff --git a/tests/benchmark/AndroidTest.xml b/tests/benchmark/AndroidTest.xml
index 0214cd9..29bc95a 100644
--- a/tests/benchmark/AndroidTest.xml
+++ b/tests/benchmark/AndroidTest.xml
@@ -25,6 +25,11 @@
<target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
<option name="force-root" value="true" />
</target_preparer>
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="push" value="perf-setup.sh->/data/local/tmp/perf-setup.sh" />
+ <option name="post-push" value="chmod 755 /data/local/tmp/perf-setup.sh;/data/local/tmp/perf-setup.sh" />
+ <option name="cleanup" value="true" />
+ </target_preparer>
<test class="com.android.tradefed.testtype.AndroidJUnitTest" >
<option name="package" value="com.android.microdroid.benchmark" />
<option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
diff --git a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
index 4b11d77..9851a17 100644
--- a/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
+++ b/tests/benchmark/src/java/com/android/microdroid/benchmark/MicrodroidBenchmarks.java
@@ -36,6 +36,7 @@
import android.system.virtualmachine.VirtualMachine;
import android.system.virtualmachine.VirtualMachineConfig;
import android.system.virtualmachine.VirtualMachineException;
+import android.system.Os;
import android.util.Log;
import com.android.microdroid.test.common.MetricsProcessor;
@@ -347,16 +348,7 @@
CrosvmStats(Function<String, String> shellExecutor) {
try {
- List<Integer> crosvmPids =
- ProcessUtil.getProcessMap(shellExecutor).entrySet().stream()
- .filter(e -> e.getValue().contains("crosvm"))
- .map(e -> e.getKey())
- .collect(java.util.stream.Collectors.toList());
- if (crosvmPids.size() != 1) {
- throw new IllegalStateException(
- "expected to find exactly one crosvm processes, found "
- + crosvmPids.size());
- }
+ int crosvmPid = ProcessUtil.getCrosvmPid(Os.getpid(), shellExecutor);
long hostRss = 0;
long hostPss = 0;
@@ -364,7 +356,7 @@
long guestPss = 0;
boolean hasGuestMaps = false;
for (ProcessUtil.SMapEntry entry :
- ProcessUtil.getProcessSmaps(crosvmPids.get(0), shellExecutor)) {
+ ProcessUtil.getProcessSmaps(crosvmPid, shellExecutor)) {
long rss = entry.metrics.get("Rss");
long pss = entry.metrics.get("Pss");
if (entry.name.contains("crosvm_guest")) {
diff --git a/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java b/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
index 9c8714f..73c3b33 100644
--- a/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
+++ b/tests/benchmark_hostside/java/android/avf/test/AVFHostTestCase.java
@@ -85,10 +85,13 @@
private boolean mNeedTearDown = false;
+ private boolean mNeedToRestartPkvmStatus = false;
+
@Before
public void setUp() throws Exception {
testIfDeviceIsCapable(getDevice());
mNeedTearDown = true;
+ mNeedToRestartPkvmStatus = false;
getDevice().installPackage(findTestFile(APK_NAME), /* reinstall */ false);
@@ -103,8 +106,8 @@
// sees, so we can't rely on that - b/268688303.)
return;
}
- // Set PKVM enable and reboot to prevent previous staged session.
- if (!isCuttlefish()) {
+ // Restore PKVM status and reboot to prevent previous staged session, if switched.
+ if (mNeedToRestartPkvmStatus) {
setPKVMStatusWithRebootToBootloader(true);
rebootFromBootloaderAndWaitBootCompleted();
}
@@ -422,7 +425,7 @@
}
private void enableDisablePKVMTestHelper(boolean isEnable) throws Exception {
- skipIfPKVMStatusSwitchNotSupported();
+ assumePKVMStatusSwitchSupported();
List<Double> bootDmesgTime = new ArrayList<>(ROUND_COUNT);
Map<String, List<Double>> bootloaderTime = new HashMap<>();
@@ -478,9 +481,16 @@
reportMetric(bootDmesgTime, "dmesg_boot_time_" + suffix, "s");
}
- private void skipIfPKVMStatusSwitchNotSupported() throws Exception {
+ private void assumePKVMStatusSwitchSupported() throws Exception {
assumeFalse("Skip on CF; can't reboot to bootloader", isCuttlefish());
+ // This is an overkill. The intention is to exclude remote_device_proxy, which uses
+ // different serial for fastboot. But there's no good way to distinguish from regular IP
+ // transport. This is currently not a problem until someone really needs to run the test
+ // over regular IP transport.
+ boolean isAdbOverIp = getDevice().getSerialNumber().contains(":");
+ assumeFalse("Skip over IP (overkill for remote_device_proxy)", isAdbOverIp);
+
if (!getDevice().isStateBootloaderOrFastbootd()) {
getDevice().rebootIntoBootloader();
}
@@ -513,6 +523,7 @@
}
private void setPKVMStatusWithRebootToBootloader(boolean isEnable) throws Exception {
+ mNeedToRestartPkvmStatus = true;
if (!getDevice().isStateBootloaderOrFastbootd()) {
getDevice().rebootIntoBootloader();
diff --git a/tests/helper/src/java/com/android/microdroid/test/common/DeviceProperties.java b/tests/helper/src/java/com/android/microdroid/test/common/DeviceProperties.java
index ba82c38..23f8ca6 100644
--- a/tests/helper/src/java/com/android/microdroid/test/common/DeviceProperties.java
+++ b/tests/helper/src/java/com/android/microdroid/test/common/DeviceProperties.java
@@ -20,6 +20,7 @@
/** This class can be used in both host tests and device tests to get the device properties. */
public final class DeviceProperties {
+
/** PropertyGetter is used to get the property associated to a given key. */
public interface PropertyGetter {
String getProperty(String key) throws Exception;
diff --git a/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java b/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java
index 940ec9c..c72d91e 100644
--- a/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java
+++ b/tests/helper/src/java/com/android/microdroid/test/common/ProcessUtil.java
@@ -22,9 +22,12 @@
import java.util.List;
import java.util.Map;
import java.util.function.Function;
+import java.util.stream.IntStream;
/** This class provides process utility for both device tests and host tests. */
public final class ProcessUtil {
+ private static final String CROSVM_BIN = "/apex/com.android.virt/bin/crosvm";
+ private static final String VIRTMGR_BIN = "/apex/com.android.virt/bin/virtmgr";
/** A memory map entry from /proc/{pid}/smaps */
public static class SMapEntry {
@@ -89,6 +92,35 @@
return processMap;
}
+ private static IntStream getChildProcesses(
+ int pid, String cmdlineFilter, Function<String, String> shellExecutor) {
+ String cmd = "pgrep -P " + pid;
+ if (cmdlineFilter != null) {
+ cmd += " -f " + cmdlineFilter;
+ }
+ return shellExecutor.apply(cmd).trim().lines().mapToInt(Integer::parseInt);
+ }
+
+ private static int getSingleChildProcess(
+ int parentPid, String cmdlineFilter, Function<String, String> shellExecutor) {
+ int[] pids = getChildProcesses(parentPid, cmdlineFilter, shellExecutor).toArray();
+ if (pids.length == 0) {
+ throw new IllegalStateException("No process found for " + cmdlineFilter);
+ } else if (pids.length > 1) {
+ throw new IllegalStateException("More than one process found for " + cmdlineFilter);
+ }
+ return pids[0];
+ }
+
+ public static int getVirtmgrPid(int parentPid, Function<String, String> shellExecutor) {
+ return getSingleChildProcess(parentPid, VIRTMGR_BIN, shellExecutor);
+ }
+
+ public static int getCrosvmPid(int parentPid, Function<String, String> shellExecutor) {
+ int virtmgrPid = getVirtmgrPid(parentPid, shellExecutor);
+ return getSingleChildProcess(virtmgrPid, CROSVM_BIN, shellExecutor);
+ }
+
// To ensures that only one object is created at a time.
private ProcessUtil() {}
diff --git a/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java b/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
index b877a77..744f94c 100644
--- a/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
+++ b/tests/helper/src/java/com/android/microdroid/test/device/MicrodroidDeviceTestBase.java
@@ -57,16 +57,19 @@
private final String MAX_PERFORMANCE_TASK_PROFILE = "CPUSET_SP_TOP_APP";
public static boolean isCuttlefish() {
- return DeviceProperties.create(SystemProperties::get).isCuttlefish();
+ return getDeviceProperties().isCuttlefish();
}
public static boolean isUserBuild() {
- return DeviceProperties.create(SystemProperties::get).isUserBuild();
+ return getDeviceProperties().isUserBuild();
}
public static String getMetricPrefix() {
- return MetricsProcessor.getMetricPrefix(
- DeviceProperties.create(SystemProperties::get).getMetricsTag());
+ return MetricsProcessor.getMetricPrefix(getDeviceProperties().getMetricsTag());
+ }
+
+ private static DeviceProperties getDeviceProperties() {
+ return DeviceProperties.create(SystemProperties::get);
}
protected final void grantPermission(String permission) {
diff --git a/tests/hostside/Android.bp b/tests/hostside/Android.bp
index d217c00..4b5cbda 100644
--- a/tests/hostside/Android.bp
+++ b/tests/hostside/Android.bp
@@ -9,31 +9,31 @@
}
genrule {
- name: "test_avf_debug_policy_with_ramdump",
+ name: "test_avf_debug_policy_with_log.dtbo",
defaults: ["test_avf_debug_policy_overlay"],
- srcs: ["assets/avf_debug_policy_with_ramdump.dts"],
- out: ["avf_debug_policy_with_ramdump.dtbo"],
+ srcs: ["assets/avf_debug_policy_with_log.dts"],
+ out: ["avf_debug_policy_with_log.dtbo"],
}
genrule {
- name: "test_avf_debug_policy_without_ramdump",
+ name: "test_avf_debug_policy_without_log.dtbo",
defaults: ["test_avf_debug_policy_overlay"],
- srcs: ["assets/avf_debug_policy_without_ramdump.dts"],
- out: ["avf_debug_policy_without_ramdump.dtbo"],
+ srcs: ["assets/avf_debug_policy_without_log.dts"],
+ out: ["avf_debug_policy_without_log.dtbo"],
}
genrule {
- name: "test_avf_debug_policy_with_console_output",
+ name: "test_avf_debug_policy_with_adb",
defaults: ["test_avf_debug_policy_overlay"],
- srcs: ["assets/avf_debug_policy_with_console_output.dts"],
- out: ["avf_debug_policy_with_console_output.dtbo"],
+ srcs: ["assets/avf_debug_policy_with_adb.dts"],
+ out: ["avf_debug_policy_with_adb.dtbo"],
}
genrule {
- name: "test_avf_debug_policy_without_console_output",
+ name: "test_avf_debug_policy_without_adb",
defaults: ["test_avf_debug_policy_overlay"],
- srcs: ["assets/avf_debug_policy_without_console_output.dts"],
- out: ["avf_debug_policy_without_console_output.dtbo"],
+ srcs: ["assets/avf_debug_policy_without_adb.dts"],
+ out: ["avf_debug_policy_without_adb.dtbo"],
}
java_test_host {
@@ -60,10 +60,10 @@
":test.com.android.virt.pem",
":test2.com.android.virt.pem",
":pvmfw_test",
- ":test_avf_debug_policy_with_ramdump",
- ":test_avf_debug_policy_without_ramdump",
- ":test_avf_debug_policy_with_console_output",
- ":test_avf_debug_policy_without_console_output",
+ ":test_avf_debug_policy_with_log.dtbo",
+ ":test_avf_debug_policy_without_log.dtbo",
+ ":test_avf_debug_policy_with_adb",
+ ":test_avf_debug_policy_without_adb",
"assets/bcc.dat",
],
data_native_bins: [
diff --git a/tests/hostside/assets/avf_debug_policy_with_ramdump.dts b/tests/hostside/assets/avf_debug_policy_with_adb.dts
similarity index 75%
rename from tests/hostside/assets/avf_debug_policy_with_ramdump.dts
rename to tests/hostside/assets/avf_debug_policy_with_adb.dts
index f1a5196..9ad15dd 100644
--- a/tests/hostside/assets/avf_debug_policy_with_ramdump.dts
+++ b/tests/hostside/assets/avf_debug_policy_with_adb.dts
@@ -8,8 +8,8 @@
__overlay__ {
avf {
guest {
- common {
- ramdump = <1>;
+ microdroid {
+ adb = <1>;
};
};
};
diff --git a/tests/hostside/assets/avf_debug_policy_with_console_output.dts b/tests/hostside/assets/avf_debug_policy_with_log.dts
similarity index 100%
rename from tests/hostside/assets/avf_debug_policy_with_console_output.dts
rename to tests/hostside/assets/avf_debug_policy_with_log.dts
diff --git a/tests/hostside/assets/avf_debug_policy_with_ramdump.dts b/tests/hostside/assets/avf_debug_policy_without_adb.dts
similarity index 75%
copy from tests/hostside/assets/avf_debug_policy_with_ramdump.dts
copy to tests/hostside/assets/avf_debug_policy_without_adb.dts
index f1a5196..992e0ff 100644
--- a/tests/hostside/assets/avf_debug_policy_with_ramdump.dts
+++ b/tests/hostside/assets/avf_debug_policy_without_adb.dts
@@ -8,8 +8,8 @@
__overlay__ {
avf {
guest {
- common {
- ramdump = <1>;
+ microdroid {
+ adb = <0>;
};
};
};
diff --git a/tests/hostside/assets/avf_debug_policy_without_console_output.dts b/tests/hostside/assets/avf_debug_policy_without_log.dts
similarity index 100%
rename from tests/hostside/assets/avf_debug_policy_without_console_output.dts
rename to tests/hostside/assets/avf_debug_policy_without_log.dts
diff --git a/tests/hostside/assets/avf_debug_policy_without_ramdump.dts b/tests/hostside/assets/avf_debug_policy_without_ramdump.dts
deleted file mode 100644
index 5b15d51..0000000
--- a/tests/hostside/assets/avf_debug_policy_without_ramdump.dts
+++ /dev/null
@@ -1,18 +0,0 @@
-/dts-v1/;
-/plugin/;
-
-/ {
- fragment@avf {
- target-path = "/";
-
- __overlay__ {
- avf {
- guest {
- common {
- ramdump = <0>;
- };
- };
- };
- };
- };
-};
\ No newline at end of file
diff --git a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
index 20a6045..a7f7906 100644
--- a/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
+++ b/tests/hostside/helper/java/com/android/microdroid/test/host/MicrodroidHostTestCaseBase.java
@@ -67,6 +67,8 @@
// remove any leftover files under test root
android.tryRun("rm", "-rf", TEST_ROOT + "*");
+
+ android.tryRun("mkdir " + TEST_ROOT);
}
public static void cleanUpVirtualizationTestSetup(ITestDevice androidDevice)
diff --git a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
index 1fa0afe..4c1dba5 100644
--- a/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/MicrodroidHostTests.java
@@ -24,7 +24,6 @@
import static com.google.common.truth.Truth.assertWithMessage;
import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assume.assumeFalse;
import static org.junit.Assume.assumeTrue;
@@ -56,7 +55,6 @@
import org.json.JSONObject;
import org.junit.After;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
@@ -83,6 +81,7 @@
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import java.util.stream.Collectors;
@RunWith(DeviceJUnit4ClassRunner.class)
public class MicrodroidHostTests extends MicrodroidHostTestCaseBase {
@@ -527,13 +526,14 @@
return !result.trim().isEmpty();
}
- private boolean isTombstoneGeneratedWithCmd(String configPath, String... crashCommand)
- throws Exception {
+ private boolean isTombstoneGeneratedWithCmd(
+ boolean protectedVm, String configPath, String... crashCommand) throws Exception {
mMicrodroidDevice =
MicrodroidBuilder.fromDevicePath(getPathForPackage(PACKAGE_NAME), configPath)
.debugLevel("full")
.memoryMib(minMemorySize())
.cpuTopology("match_host")
+ .protectedVm(protectedVm)
.build(getAndroidDevice());
mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
mMicrodroidDevice.enableAdbRoot();
@@ -552,6 +552,7 @@
public void testTombstonesAreGeneratedUponUserspaceCrash() throws Exception {
assertThat(
isTombstoneGeneratedWithCmd(
+ false,
"assets/vm_config.json",
"kill",
"-SIGSEGV",
@@ -563,6 +564,7 @@
public void testTombstonesAreNotGeneratedIfNotExportedUponUserspaceCrash() throws Exception {
assertThat(
isTombstoneGeneratedWithCmd(
+ false,
"assets/vm_config_no_tombstone.json",
"kill",
"-SIGSEGV",
@@ -570,17 +572,33 @@
.isFalse();
}
- @Test
- @Ignore("b/243630590: Temporal workaround until lab devices has flashed new DPM")
- public void testTombstonesAreGeneratedUponKernelCrash() throws Exception {
+ private void testTombstonesAreGeneratedUponKernelCrash(boolean protectedVm) throws Exception {
assumeFalse("Cuttlefish is not supported", isCuttlefish());
assumeFalse("Skipping test because ramdump is disabled on user build", isUserBuild());
assertThat(
isTombstoneGeneratedWithCmd(
- "assets/vm_config.json", "echo", "c", ">", "/proc/sysrq-trigger"))
+ protectedVm,
+ "assets/vm_config.json",
+ "echo",
+ "c",
+ ">",
+ "/proc/sysrq-trigger"))
.isTrue();
}
+ @Test
+ public void testTombstonesAreGeneratedUponKernelCrashOnNonPvm() throws Exception {
+ testTombstonesAreGeneratedUponKernelCrash(false);
+ }
+
+ @Test
+ public void testTombstonesAreGeneratedUponKernelCrashOnPvm() throws Exception {
+ assumeTrue(
+ "Protected VMs are not supported",
+ getAndroidDevice().supportsMicrodroid(/*protectedVm=*/ true));
+ testTombstonesAreGeneratedUponKernelCrash(true);
+ }
+
private boolean isTombstoneGeneratedWithVmRunApp(boolean debuggable, String... additionalArgs)
throws Exception {
// we can't use microdroid builder as it wants ADB connection (debuggable)
@@ -664,19 +682,24 @@
microdroid.waitForBootComplete(BOOT_COMPLETE_TIMEOUT);
device.shutdownMicrodroid(microdroid);
+ // Try to collect atoms for 60000 milliseconds.
List<StatsLog.EventMetricData> data = new ArrayList<>();
- assertThatEventually(
- 10000,
- () -> {
- data.addAll(ReportUtils.getEventMetricDataList(getDevice()));
- return data.size();
- },
- is(3)
- );
+ long start = System.currentTimeMillis();
+ while ((System.currentTimeMillis() - start < 60000) && data.size() < 3) {
+ data.addAll(ReportUtils.getEventMetricDataList(getDevice()));
+ Thread.sleep(500);
+ }
+ assertThat(
+ data.stream()
+ .map(x -> x.getAtom().getPushedCase().getNumber())
+ .collect(Collectors.toList()))
+ .containsExactly(
+ AtomsProto.Atom.VM_CREATION_REQUESTED_FIELD_NUMBER,
+ AtomsProto.Atom.VM_BOOTED_FIELD_NUMBER,
+ AtomsProto.Atom.VM_EXITED_FIELD_NUMBER)
+ .inOrder();
// Check VmCreationRequested atom
- assertThat(data.get(0).getAtom().getPushedCase().getNumber()).isEqualTo(
- AtomsProto.Atom.VM_CREATION_REQUESTED_FIELD_NUMBER);
AtomsProto.VmCreationRequested atomVmCreationRequested =
data.get(0).getAtom().getVmCreationRequested();
assertThat(atomVmCreationRequested.getHypervisor())
@@ -693,14 +716,10 @@
.isEqualTo("com.android.art:com.android.compos:com.android.sdkext");
// Check VmBooted atom
- assertThat(data.get(1).getAtom().getPushedCase().getNumber())
- .isEqualTo(AtomsProto.Atom.VM_BOOTED_FIELD_NUMBER);
AtomsProto.VmBooted atomVmBooted = data.get(1).getAtom().getVmBooted();
assertThat(atomVmBooted.getVmIdentifier()).isEqualTo("VmRunApp");
// Check VmExited atom
- assertThat(data.get(2).getAtom().getPushedCase().getNumber())
- .isEqualTo(AtomsProto.Atom.VM_EXITED_FIELD_NUMBER);
AtomsProto.VmExited atomVmExited = data.get(2).getAtom().getVmExited();
assertThat(atomVmExited.getVmIdentifier()).isEqualTo("VmRunApp");
assertThat(atomVmExited.getDeathReason()).isEqualTo(AtomsProto.VmExited.DeathReason.KILLED);
diff --git a/tests/hostside/java/com/android/microdroid/test/PvmfwDebugPolicyHostTests.java b/tests/hostside/java/com/android/microdroid/test/PvmfwDebugPolicyHostTests.java
index 755613a..18aa273 100644
--- a/tests/hostside/java/com/android/microdroid/test/PvmfwDebugPolicyHostTests.java
+++ b/tests/hostside/java/com/android/microdroid/test/PvmfwDebugPolicyHostTests.java
@@ -23,6 +23,7 @@
import static org.junit.Assume.assumeTrue;
import static org.junit.Assume.assumeFalse;
+import static org.junit.Assert.assertThrows;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
@@ -31,9 +32,11 @@
import com.android.microdroid.test.host.MicrodroidHostTestCaseBase;
import com.android.microdroid.test.host.Pvmfw;
import com.android.tradefed.device.DeviceNotAvailableException;
-import com.android.tradefed.device.TestDevice;
+import com.android.tradefed.device.DeviceRuntimeException;
import com.android.tradefed.device.ITestDevice;
+import com.android.tradefed.device.TestDevice;
import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
+import com.android.tradefed.util.CommandStatus;
import com.android.tradefed.util.CommandResult;
import com.android.tradefed.util.FileUtil;
@@ -55,9 +58,11 @@
@NonNull private static final String PACKAGE_FILE_NAME = "MicrodroidTestApp.apk";
@NonNull private static final String PACKAGE_NAME = "com.android.microdroid.test";
@NonNull private static final String MICRODROID_DEBUG_FULL = "full";
+ @NonNull private static final String MICRODROID_DEBUG_NONE = "none";
@NonNull private static final String MICRODROID_CONFIG_PATH = "assets/vm_config_apex.json";
@NonNull private static final String MICRODROID_LOG_PATH = TEST_ROOT + "log.txt";
private static final int BOOT_COMPLETE_TIMEOUT_MS = 30000; // 30 seconds
+ private static final int BOOT_FAILURE_WAIT_TIME_MS = 10000; // 10 seconds
private static final int CONSOLE_OUTPUT_WAIT_MS = 5000; // 5 seconds
@NonNull private static final String CUSTOM_PVMFW_FILE_PREFIX = "pvmfw";
@@ -65,6 +70,9 @@
@NonNull private static final String CUSTOM_PVMFW_IMG_PATH = TEST_ROOT + PVMFW_FILE_NAME;
@NonNull private static final String CUSTOM_PVMFW_IMG_PATH_PROP = "hypervisor.pvmfw.path";
+ @NonNull
+ private static final String AVF_DEBUG_POLICY_ADB_DT_PROP_PATH = "/avf/guest/microdroid/adb";
+
@NonNull private static final String MICRODROID_CMDLINE_PATH = "/proc/cmdline";
@NonNull private static final String MICRODROID_DT_ROOT_PATH = "/proc/device-tree";
@@ -143,35 +151,8 @@
}
@Test
- public void testRamdump() throws Exception {
- Pvmfw pvmfw = createPvmfw("avf_debug_policy_with_ramdump.dtbo");
- pvmfw.serialize(mCustomPvmfwBinFileOnHost);
- mMicrodroidDevice = launchProtectedVmAndWaitForBootCompleted();
-
- assertThat(readMicrodroidFileAsString(MICRODROID_CMDLINE_PATH)).contains("crashkernel=");
- assertThat(readMicrodroidFileAsString(MICRODROID_DT_BOOTARGS_PATH))
- .contains("crashkernel=");
- assertThat(readMicrodroidFileAsHexString(MICRODROID_DT_RAMDUMP_PATH))
- .isEqualTo(HEX_STRING_ONE);
- }
-
- @Test
- public void testNoRamdump() throws Exception {
- Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_ramdump.dtbo");
- pvmfw.serialize(mCustomPvmfwBinFileOnHost);
- mMicrodroidDevice = launchProtectedVmAndWaitForBootCompleted();
-
- assertThat(readMicrodroidFileAsString(MICRODROID_CMDLINE_PATH))
- .doesNotContain("crashkernel=");
- assertThat(readMicrodroidFileAsString(MICRODROID_DT_BOOTARGS_PATH))
- .doesNotContain("crashkernel=");
- assertThat(readMicrodroidFileAsHexString(MICRODROID_DT_RAMDUMP_PATH))
- .isEqualTo(HEX_STRING_ZERO);
- }
-
- @Test
- public void testConsoleOutput() throws Exception {
- Pvmfw pvmfw = createPvmfw("avf_debug_policy_with_console_output.dtbo");
+ public void testLog_consoleOutput() throws Exception {
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_with_log.dtbo");
pvmfw.serialize(mCustomPvmfwBinFileOnHost);
CommandResult result = tryLaunchProtectedNonDebuggableVm();
@@ -182,8 +163,20 @@
}
@Test
- public void testNoConsoleOutput() throws Exception {
- Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_console_output.dtbo");
+ public void testLog_logcat() throws Exception {
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_with_log.dtbo");
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ tryLaunchProtectedNonDebuggableVm();
+
+ assertWithMessage("Microdroid's logcat should have been enabled")
+ .that(hasMicrodroidLogcatOutput())
+ .isTrue();
+ }
+
+ @Test
+ public void testNoLog_noConsoleOutput() throws Exception {
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_log.dtbo");
pvmfw.serialize(mCustomPvmfwBinFileOnHost);
CommandResult result = tryLaunchProtectedNonDebuggableVm();
@@ -193,6 +186,69 @@
.isFalse();
}
+ @Test
+ public void testNoLog_noLogcat() throws Exception {
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_log.dtbo");
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ assertThrows(
+ "Microdroid shouldn't be recognized because of missing adb connection",
+ DeviceRuntimeException.class,
+ () ->
+ launchProtectedVmAndWaitForBootCompleted(
+ MICRODROID_DEBUG_NONE, BOOT_FAILURE_WAIT_TIME_MS));
+ assertThat(hasMicrodroidLogcatOutput()).isFalse();
+ }
+
+ @Test
+ public void testAdb_boots() throws Exception {
+ assumeTrue(
+ "Skip if host wouldn't install adbd",
+ isDebugPolicyEnabled(AVF_DEBUG_POLICY_ADB_DT_PROP_PATH));
+
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_with_adb.dtbo");
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ launchProtectedVmAndWaitForBootCompleted(MICRODROID_DEBUG_NONE);
+ }
+
+ @Test
+ public void testNoAdb_boots() throws Exception {
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_adb.dtbo");
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ // VM would boot, but cannot verify directly because of no adbd in the VM.
+ CommandResult result = tryLaunchProtectedNonDebuggableVm();
+ assertThat(result.getStatus()).isEqualTo(CommandStatus.TIMED_OUT);
+ assertWithMessage("Microdroid should have booted")
+ .that(result.getStderr())
+ .contains("payload is ready");
+ }
+
+ @Test
+ public void testNoAdb_noConnection() throws Exception {
+ Pvmfw pvmfw = createPvmfw("avf_debug_policy_without_adb.dtbo");
+ pvmfw.serialize(mCustomPvmfwBinFileOnHost);
+
+ assertThrows(
+ "Microdroid shouldn't be recognized because of missing adb connection",
+ DeviceRuntimeException.class,
+ () ->
+ launchProtectedVmAndWaitForBootCompleted(
+ MICRODROID_DEBUG_NONE, BOOT_FAILURE_WAIT_TIME_MS));
+ }
+
+ private boolean isDebugPolicyEnabled(@NonNull String dtPropertyPath)
+ throws DeviceNotAvailableException {
+ CommandRunner runner = new CommandRunner(mAndroidDevice);
+ CommandResult result =
+ runner.runForResult("xxd", "-p", "/proc/device-tree" + dtPropertyPath);
+ if (result.getStatus() == CommandStatus.SUCCESS) {
+ return HEX_STRING_ONE.equals(result.getStdout().trim());
+ }
+ return false;
+ }
+
@NonNull
private String readMicrodroidFileAsString(@NonNull String path)
throws DeviceNotAvailableException {
@@ -215,19 +271,31 @@
.build();
}
- @NonNull
- private boolean hasConsoleOutput(CommandResult result) throws DeviceNotAvailableException {
+ private boolean hasConsoleOutput(@NonNull CommandResult result)
+ throws DeviceNotAvailableException {
return result.getStdout().contains("Run /init as init process");
}
- private ITestDevice launchProtectedVmAndWaitForBootCompleted()
+ private boolean hasMicrodroidLogcatOutput() throws DeviceNotAvailableException {
+ CommandResult result =
+ new CommandRunner(mAndroidDevice).runForResult("test", "-s", MICRODROID_LOG_PATH);
+ return result.getExitCode() == 0;
+ }
+
+ private ITestDevice launchProtectedVmAndWaitForBootCompleted(String debugLevel)
throws DeviceNotAvailableException {
+ return launchProtectedVmAndWaitForBootCompleted(debugLevel, BOOT_COMPLETE_TIMEOUT_MS);
+ }
+
+ private ITestDevice launchProtectedVmAndWaitForBootCompleted(
+ String debugLevel, long adbTimeoutMs) throws DeviceNotAvailableException {
mMicrodroidDevice =
MicrodroidBuilder.fromDevicePath(
getPathForPackage(PACKAGE_NAME), MICRODROID_CONFIG_PATH)
- .debugLevel(MICRODROID_DEBUG_FULL)
+ .debugLevel(debugLevel)
.protectedVm(/* protectedVm= */ true)
.addBootFile(mCustomPvmfwBinFileOnHost, PVMFW_FILE_NAME)
+ .setAdbConnectTimeoutMs(adbTimeoutMs)
.build(mAndroidDevice);
assertThat(mMicrodroidDevice.waitForBootComplete(BOOT_COMPLETE_TIMEOUT_MS)).isTrue();
assertThat(mMicrodroidDevice.enableAdbRoot()).isTrue();
@@ -235,10 +303,10 @@
}
// Try to launch protected non-debuggable VM for a while and quit.
- // Non-debuggable VM doesn't enable adb, so there's no ITestDevice instance of it.
+ // Non-debuggable VM might not enable adb, so there's no ITestDevice instance of it.
private CommandResult tryLaunchProtectedNonDebuggableVm() throws DeviceNotAvailableException {
// Can't use MicrodroidBuilder because it expects adb connection
- // but non-debuggable VM doesn't enable adb.
+ // but non-debuggable VM may not enable adb.
CommandRunner runner = new CommandRunner(mAndroidDevice);
runner.run("mkdir", "-p", TEST_ROOT);
mAndroidDevice.pushFile(mCustomPvmfwBinFileOnHost, TEST_ROOT + PVMFW_FILE_NAME);
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index d05d9b3..7044ae7 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -30,6 +30,7 @@
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
import static org.junit.Assume.assumeFalse;
import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
import com.google.common.base.Strings;
import com.google.common.truth.BooleanSubject;
@@ -56,13 +57,15 @@
import android.util.Log;
import com.android.compatibility.common.util.CddTest;
+import com.android.compatibility.common.util.VsrTest;
import com.android.microdroid.test.device.MicrodroidDeviceTestBase;
import com.android.microdroid.test.vmshare.IVmShareTestService;
+import com.android.microdroid.testservice.IAppCallback;
import com.android.microdroid.testservice.ITestService;
+import com.android.microdroid.testservice.IVmCallback;
import org.junit.After;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.function.ThrowingRunnable;
@@ -136,7 +139,7 @@
private static final String VM_SHARE_APP_PACKAGE_NAME = "com.android.microdroid.vmshare_app";
private void createAndConnectToVmHelper(int cpuTopology) throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -181,7 +184,7 @@
@Test
@CddTest(requirements = {"9.17/C-1-1", "9.17/C-2-1"})
public void createAndRunNoDebugVm() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
// For most of our tests we use a debug VM so failures can be diagnosed.
// But we do need non-debug VMs to work, so run one.
@@ -208,7 +211,7 @@
"9.17/C-1-4",
})
public void createVmRequiresPermission() {
- assumeSupportedKernel();
+ assumeSupportedDevice();
revokePermission(VirtualMachine.MANAGE_VIRTUAL_MACHINE_PERMISSION);
@@ -229,7 +232,7 @@
@Test
@CddTest(requirements = {"9.17/C-1-1"})
public void autoCloseVm() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -314,7 +317,7 @@
@Test
@CddTest(requirements = {"9.17/C-1-1"})
public void vmLifecycleChecks() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -363,7 +366,7 @@
@Test
@CddTest(requirements = {"9.17/C-1-1"})
public void connectVsock() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -401,6 +404,59 @@
@Test
@CddTest(requirements = {"9.17/C-1-1"})
+ public void binderCallbacksWork() throws Exception {
+ assumeSupportedDevice();
+
+ VirtualMachineConfig config =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setMemoryBytes(minMemoryRequired())
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .build();
+ VirtualMachine vm = forceCreateNewVirtualMachine("test_vm", config);
+
+ String request = "Hello";
+ CompletableFuture<String> response = new CompletableFuture<>();
+
+ IAppCallback appCallback =
+ new IAppCallback.Stub() {
+ @Override
+ public void setVmCallback(IVmCallback vmCallback) {
+ // Do this on a separate thread to simulate an asynchronous trigger,
+ // and to make sure it doesn't happen in the context of an inbound binder
+ // call.
+ new Thread() {
+ @Override
+ public void run() {
+ try {
+ vmCallback.echoMessage(request);
+ } catch (Exception e) {
+ response.completeExceptionally(e);
+ }
+ }
+ }.start();
+ }
+
+ @Override
+ public void onEchoRequestReceived(String message) {
+ response.complete(message);
+ }
+ };
+
+ TestResults testResults =
+ runVmTestService(
+ TAG,
+ vm,
+ (service, results) -> {
+ service.requestCallback(appCallback);
+ response.get(10, TimeUnit.SECONDS);
+ });
+ testResults.assertNoException();
+ assertThat(response.getNow("no response")).isEqualTo("Received: " + request);
+ }
+
+ @Test
+ @CddTest(requirements = {"9.17/C-1-1"})
public void vmConfigGetAndSetTests() {
// Minimal has as little as specified as possible; everything that can be is defaulted.
VirtualMachineConfig.Builder minimalBuilder = newVmConfigBuilder();
@@ -580,8 +636,16 @@
@Test
@CddTest(requirements = {"9.17/C-1-1"})
+ public void testAvfRequiresUpdatableApex() throws Exception {
+ assertWithMessage("Devices that support AVF must also support updatable APEX")
+ .that(SystemProperties.getBoolean("ro.apex.updatable", false))
+ .isTrue();
+ }
+
+ @Test
+ @CddTest(requirements = {"9.17/C-1-1"})
public void vmmGetAndCreate() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -679,7 +743,7 @@
"9.17/C-1-4",
})
public void createVmWithConfigRequiresPermission() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -702,7 +766,7 @@
"9.17/C-1-1",
})
public void deleteVm() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -730,7 +794,7 @@
"9.17/C-1-1",
})
public void deleteVmFiles() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -763,7 +827,7 @@
"9.17/C-1-1",
})
public void validApkPathIsAccepted() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -800,7 +864,7 @@
"9.17/C-2-1"
})
public void extraApk() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
VirtualMachineConfig config =
@@ -868,7 +932,7 @@
}
private void changeDebugLevel(int fromLevel, int toLevel) throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig.Builder builder =
newVmConfigBuilder()
@@ -939,7 +1003,7 @@
"9.17/C-2-7"
})
public void instancesOfSameVmHaveDifferentCdis() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
VirtualMachineConfig normalConfig =
@@ -965,7 +1029,7 @@
"9.17/C-2-7"
})
public void sameInstanceKeepsSameCdis() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
assume().withMessage("Skip on CF. Too Slow. b/257270529").that(isCuttlefish()).isFalse();
grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
@@ -990,7 +1054,7 @@
"9.17/C-2-7"
})
public void bccIsSuperficiallyWellFormed() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
VirtualMachineConfig normalConfig =
@@ -1029,7 +1093,7 @@
"9.17/C-1-2"
})
public void accessToCdisIsRestricted() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -1118,7 +1182,6 @@
}
@Test
- @Ignore("b/249723852")
@CddTest(requirements = {
"9.17/C-1-1",
"9.17/C-2-7"
@@ -1229,7 +1292,7 @@
@Test
public void importedVmAndOriginalVmHaveTheSameCdi() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
// Arrange
grantPermission(VirtualMachine.USE_CUSTOM_VIRTUAL_MACHINE_PERMISSION);
VirtualMachineConfig config =
@@ -1328,7 +1391,7 @@
@Test
@CddTest(requirements = {"9.17/C-1-1"})
public void encryptedStorageAvailable() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -1352,7 +1415,7 @@
@Test
@CddTest(requirements = {"9.17/C-1-1"})
public void encryptedStorageIsInaccessibleToDifferentVm() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -1413,7 +1476,7 @@
@Test
@CddTest(requirements = {"9.17/C-1-1", "9.17/C-2-1"})
public void microdroidLauncherHasEmptyCapabilities() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
final VirtualMachineConfig vmConfig =
newVmConfigBuilder()
@@ -1438,7 +1501,7 @@
@Test
@CddTest(requirements = {"9.17/C-1-1"})
public void encryptedStorageIsPersistent() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -1475,7 +1538,7 @@
@Test
@CddTest(requirements = {"9.17/C-1-1", "9.17/C-2-1"})
public void canReadFileFromAssets_debugFull() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -1499,7 +1562,7 @@
@Test
public void outputShouldBeExplicitlyCaptured() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
final VirtualMachineConfig vmConfig =
new VirtualMachineConfig.Builder(getContext())
@@ -1524,14 +1587,14 @@
if (isUserBuild()) {
Log.i(
TAG,
- "Debug policy is inaccessible in userd build. Assumes that console output is"
+ "Debug policy is inaccessible in user build. Assumes that console output is"
+ " disabled");
return false;
}
try {
return getDebugPolicyBoolean("/avf/guest/common/log");
} catch (IOException e) {
- Log.i(TAG, "Fail to read debug policy. Assumes false", e);
+ Log.w(TAG, "Fail to read debug policy. Assumes false", e);
return false;
}
}
@@ -1568,9 +1631,9 @@
@Test
public void outputIsRedirectedToLogcatIfNotCaptured() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
assumeFalse(
- "Debug policy would turn on console output. Perhapse userdebug build?",
+ "Debug policy would turn on console output. Perhaps userdebug build?",
isConsoleOutputEnabledByDebugPolicy());
assertThat(checkVmOutputIsRedirectedToLogcat(true)).isTrue();
@@ -1578,9 +1641,9 @@
@Test
public void outputIsNotRedirectedToLogcatIfNotDebuggable() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
assumeFalse(
- "Debug policy would turn on console output. Perhapse userdebug build?",
+ "Debug policy would turn on console output. Perhaps userdebug build?",
isConsoleOutputEnabledByDebugPolicy());
assertThat(checkVmOutputIsRedirectedToLogcat(false)).isFalse();
@@ -1588,7 +1651,7 @@
@Test
public void testStartVmWithPayloadOfAnotherApp() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
Context ctx = getContext();
Context otherAppCtx = ctx.createPackageContext(VM_SHARE_APP_PACKAGE_NAME, 0);
@@ -1616,7 +1679,7 @@
@Test
public void testVmDescriptorParcelUnparcel_noTrustedStorage() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -1650,7 +1713,7 @@
@Test
public void testVmDescriptorParcelUnparcel_withTrustedStorage() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig config =
newVmConfigBuilder()
@@ -1704,7 +1767,7 @@
@Test
public void testShareVmWithAnotherApp() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
Context ctx = getContext();
Context otherAppCtx = ctx.createPackageContext(VM_SHARE_APP_PACKAGE_NAME, 0);
@@ -1752,7 +1815,7 @@
@Test
public void testShareVmWithAnotherApp_encryptedStorage() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
Context ctx = getContext();
Context otherAppCtx = ctx.createPackageContext(VM_SHARE_APP_PACKAGE_NAME, 0);
@@ -1807,7 +1870,7 @@
@Test
@CddTest(requirements = {"9.17/C-1-5"})
public void testFileUnderBinHasExecutePermission() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig vmConfig =
newVmConfigBuilder()
@@ -1844,8 +1907,9 @@
private static final int MS_NOEXEC = 8;
@Test
+ @CddTest(requirements = {"9.17/C-1-5"})
public void dataIsMountedWithNoExec() throws Exception {
- assumeSupportedKernel();
+ assumeSupportedDevice();
VirtualMachineConfig vmConfig =
newVmConfigBuilder()
@@ -1868,6 +1932,52 @@
.isEqualTo(MS_NOEXEC);
}
+ @Test
+ @CddTest(requirements = {"9.17/C-1-5"})
+ public void encryptedStoreIsMountedWithNoExec() throws Exception {
+ assumeSupportedDevice();
+
+ VirtualMachineConfig vmConfig =
+ newVmConfigBuilder()
+ .setPayloadBinaryName("MicrodroidTestNativeLib.so")
+ .setDebugLevel(DEBUG_LEVEL_FULL)
+ .setEncryptedStorageBytes(4_000_000)
+ .build();
+ VirtualMachine vm = forceCreateNewVirtualMachine("test_vm_encstore_no_exec", vmConfig);
+
+ TestResults testResults =
+ runVmTestService(
+ TAG,
+ vm,
+ (ts, tr) -> {
+ tr.mMountFlags = ts.getMountFlags("/mnt/encryptedstore");
+ });
+
+ assertThat(testResults.mException).isNull();
+ assertWithMessage("/mnt/encryptedstore should be mounted with MS_NOEXEC")
+ .that(testResults.mMountFlags & MS_NOEXEC)
+ .isEqualTo(MS_NOEXEC);
+ }
+
+ @Test
+ @VsrTest(requirements = {"VSR-7.1-001.003"})
+ public void kernelVersionRequirement() throws Exception {
+ int firstApiLevel = SystemProperties.getInt("ro.product.first_api_level", 0);
+ assume().withMessage("Skip on devices launched before Android 14 (API level 34)")
+ .that(firstApiLevel)
+ .isAtLeast(34);
+
+ String[] tokens = KERNEL_VERSION.split("\\.");
+ int major = Integer.parseInt(tokens[0]);
+ int minor = Integer.parseInt(tokens[1]);
+
+ // Check kernel version >= 5.15
+ assertTrue(major >= 5);
+ if (major == 5) {
+ assertTrue(minor >= 15);
+ }
+ }
+
private static class VmShareServiceConnection implements ServiceConnection {
private final CountDownLatch mLatch = new CountDownLatch(1);
@@ -1937,7 +2047,7 @@
return 0;
}
- private void assumeSupportedKernel() {
+ private void assumeSupportedDevice() {
assume()
.withMessage("Skip on 5.4 kernel. b/218303240")
.that(KERNEL_VERSION)
diff --git a/tests/testapk/src/native/testbinary.cpp b/tests/testapk/src/native/testbinary.cpp
index 285dae9..d24ddfd 100644
--- a/tests/testapk/src/native/testbinary.cpp
+++ b/tests/testapk/src/native/testbinary.cpp
@@ -15,6 +15,8 @@
*/
#include <aidl/com/android/microdroid/testservice/BnTestService.h>
+#include <aidl/com/android/microdroid/testservice/BnVmCallback.h>
+#include <aidl/com/android/microdroid/testservice/IAppCallback.h>
#include <android-base/file.h>
#include <android-base/properties.h>
#include <android-base/result.h>
@@ -47,6 +49,8 @@
using android::fs_mgr::ReadFstabFromFile;
using aidl::com::android::microdroid::testservice::BnTestService;
+using aidl::com::android::microdroid::testservice::BnVmCallback;
+using aidl::com::android::microdroid::testservice::IAppCallback;
using ndk::ScopedAStatus;
extern void testlib_sub();
@@ -144,7 +148,25 @@
}
Result<void> start_test_service() {
+ class VmCallbackImpl : public BnVmCallback {
+ private:
+ std::shared_ptr<IAppCallback> mAppCallback;
+
+ public:
+ explicit VmCallbackImpl(const std::shared_ptr<IAppCallback>& appCallback)
+ : mAppCallback(appCallback) {}
+
+ ScopedAStatus echoMessage(const std::string& message) override {
+ std::thread callback_thread{[=, appCallback = mAppCallback] {
+ appCallback->onEchoRequestReceived("Received: " + message);
+ }};
+ callback_thread.detach();
+ return ScopedAStatus::ok();
+ }
+ };
+
class TestService : public BnTestService {
+ public:
ScopedAStatus addInteger(int32_t a, int32_t b, int32_t* out) override {
*out = a + b;
return ScopedAStatus::ok();
@@ -226,7 +248,7 @@
return ScopedAStatus::ok();
}
- virtual ::ScopedAStatus runEchoReverseServer() override {
+ ScopedAStatus runEchoReverseServer() override {
auto result = start_echo_reverse_server();
if (result.ok()) {
return ScopedAStatus::ok();
@@ -284,6 +306,13 @@
return ScopedAStatus::ok();
}
+ ScopedAStatus requestCallback(const std::shared_ptr<IAppCallback>& appCallback) {
+ auto vmCallback = ndk::SharedRefBase::make<VmCallbackImpl>(appCallback);
+ std::thread callback_thread{[=] { appCallback->setVmCallback(vmCallback); }};
+ callback_thread.detach();
+ return ScopedAStatus::ok();
+ }
+
ScopedAStatus quit() override { exit(0); }
};
auto testService = ndk::SharedRefBase::make<TestService>();
diff --git a/tests/vmshareapp/src/java/com/android/microdroid/test/sharevm/VmShareServiceImpl.java b/tests/vmshareapp/src/java/com/android/microdroid/test/sharevm/VmShareServiceImpl.java
index 467b98b..edd6bf5 100644
--- a/tests/vmshareapp/src/java/com/android/microdroid/test/sharevm/VmShareServiceImpl.java
+++ b/tests/vmshareapp/src/java/com/android/microdroid/test/sharevm/VmShareServiceImpl.java
@@ -29,6 +29,7 @@
import com.android.microdroid.test.vmshare.IVmShareTestService;
import com.android.microdroid.testservice.ITestService;
+import com.android.microdroid.testservice.IAppCallback;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
@@ -240,6 +241,11 @@
}
@Override
+ public void requestCallback(IAppCallback appCallback) {
+ throw new UnsupportedOperationException("Not supported");
+ }
+
+ @Override
public void quit() throws RemoteException {
throw new UnsupportedOperationException("Not supported");
}
diff --git a/virtualizationmanager/src/aidl.rs b/virtualizationmanager/src/aidl.rs
index 48e2431..749d75f 100644
--- a/virtualizationmanager/src/aidl.rs
+++ b/virtualizationmanager/src/aidl.rs
@@ -48,7 +48,7 @@
use android_system_virtualmachineservice::aidl::android::system::virtualmachineservice::IVirtualMachineService::{
BnVirtualMachineService, IVirtualMachineService,
};
-use anyhow::{bail, Context, Result};
+use anyhow::{anyhow, bail, Context, Result};
use apkverify::{HashAlgorithm, V4Signature};
use binder::{
self, wait_for_interface, BinderFeatures, ExceptionCode, Interface, ParcelFileDescriptor,
@@ -60,6 +60,7 @@
use microdroid_payload_config::{OsConfig, Task, TaskType, VmPayloadConfig};
use nix::unistd::pipe;
use rpcbinder::RpcServer;
+use rustutils::system_properties;
use semver::VersionReq;
use std::convert::TryInto;
use std::ffi::CStr;
@@ -111,8 +112,9 @@
if !metadata.is_file() {
bail!("input is not a regular file");
}
- let mut sig = V4Signature::create(&mut input, 4096, &[], HashAlgorithm::SHA256)
- .context("failed to create idsig")?;
+ let mut sig =
+ V4Signature::create(&mut input, get_current_sdk()?, 4096, &[], HashAlgorithm::SHA256)
+ .context("failed to create idsig")?;
let mut output = clone_file(idsig_fd)?;
output.set_len(0).context("failed to set_len on the idsig output")?;
@@ -120,6 +122,12 @@
Ok(())
}
+fn get_current_sdk() -> Result<u32> {
+ let current_sdk = system_properties::read("ro.build.version.sdk")?;
+ let current_sdk = current_sdk.ok_or_else(|| anyhow!("SDK version missing"))?;
+ current_sdk.parse().context("Malformed SDK version")
+}
+
pub fn remove_temporary_files(path: &PathBuf) -> Result<()> {
for dir_entry in read_dir(path)? {
remove_file(dir_entry?.path())?;
@@ -717,10 +725,11 @@
/// user devices (W^X).
fn check_label_is_allowed(context: &SeContext) -> Result<()> {
match context.selinux_type()? {
- | "system_file" // immutable dm-verity protected partition
| "apk_data_file" // APKs of an installed app
- | "staging_data_file" // updated/staged APEX images
| "shell_data_file" // test files created via adb shell
+ | "staging_data_file" // updated/staged APEX images
+ | "system_file" // immutable dm-verity protected partition
+ | "virtualizationservice_data_file" // files created by VS / VirtMgr
=> Ok(()),
_ => bail!("Label {} is not allowed", context),
}
diff --git a/virtualizationmanager/src/crosvm.rs b/virtualizationmanager/src/crosvm.rs
index 9db0971..7201670 100644
--- a/virtualizationmanager/src/crosvm.rs
+++ b/virtualizationmanager/src/crosvm.rs
@@ -53,6 +53,7 @@
use rpcbinder::RpcServer;
/// external/crosvm
+use base::AsRawDescriptor;
use base::UnixSeqpacketListener;
use vm_control::{BalloonControlCommand, VmRequest, VmResponse};
@@ -491,6 +492,10 @@
// first, as monitor_vm_exit() takes it as well.
monitor_vm_exit_thread.map(JoinHandle::join);
+ // Now that the VM has been killed, shut down the VirtualMachineService
+ // server to eagerly free up the server threads.
+ self.vm_context.vm_server.shutdown()?;
+
Ok(())
}
@@ -722,8 +727,7 @@
command.arg("--unmap-guest-memory-on-fork");
if config.ramdump.is_some() {
- // Protected VM needs to reserve memory for ramdump here. pvmfw will drop This
- // if ramdump should be disabled (via debug policy). Note that we reserve more
+ // Protected VM needs to reserve memory for ramdump here. Note that we reserve more
// memory for the restricted dma pool.
let ramdump_reserve = RAMDUMP_RESERVED_MIB + swiotlb_size_mib;
command.arg("--params").arg(format!("crashkernel={ramdump_reserve}M"));
@@ -823,7 +827,9 @@
let control_server_socket = UnixSeqpacketListener::bind(crosvm_control_socket_path)
.context("failed to create control server")?;
- command.arg("--socket").arg(add_preserved_fd(&mut preserved_fds, &control_server_socket));
+ command
+ .arg("--socket")
+ .arg(add_preserved_fd(&mut preserved_fds, &control_server_socket.as_raw_descriptor()));
debug!("Preserving FDs {:?}", preserved_fds);
command.preserved_fds(preserved_fds);
diff --git a/virtualizationmanager/src/debug_config.rs b/virtualizationmanager/src/debug_config.rs
index a4ec419..ec3d591 100644
--- a/virtualizationmanager/src/debug_config.rs
+++ b/virtualizationmanager/src/debug_config.rs
@@ -34,25 +34,23 @@
pub fn should_prepare_console_output(debug_level: DebugLevel) -> bool {
debug_level != DebugLevel::NONE
|| get_debug_policy_bool("/proc/device-tree/avf/guest/common/log").unwrap_or_default()
+ || get_debug_policy_bool("/proc/device-tree/avf/guest/microdroid/adb").unwrap_or_default()
+}
+
+/// Get whether debug apexes (MICRODROID_REQUIRED_APEXES_DEBUG) are required.
+pub fn should_include_debug_apexes(debug_level: DebugLevel) -> bool {
+ debug_level != DebugLevel::NONE
+ || get_debug_policy_bool("/proc/device-tree/avf/guest/microdroid/adb").unwrap_or_default()
}
/// Decision to support ramdump
pub fn is_ramdump_needed(config: &VirtualMachineConfig) -> bool {
let enabled_in_dp =
get_debug_policy_bool("/proc/device-tree/avf/guest/common/ramdump").unwrap_or_default();
- let (protected, debuggable) = match config {
- VirtualMachineConfig::RawConfig(config) => {
- // custom VMs are considered debuggable for flexibility
- (config.protectedVm, true)
- }
- VirtualMachineConfig::AppConfig(config) => {
- (config.protectedVm, config.debugLevel == DebugLevel::FULL)
- }
+ let debuggable = match config {
+ VirtualMachineConfig::RawConfig(_) => false,
+ VirtualMachineConfig::AppConfig(config) => config.debugLevel == DebugLevel::FULL,
};
- if protected {
- enabled_in_dp
- } else {
- enabled_in_dp || debuggable
- }
+ enabled_in_dp || debuggable
}
diff --git a/virtualizationmanager/src/payload.rs b/virtualizationmanager/src/payload.rs
index 02e8f8e..99aea01 100644
--- a/virtualizationmanager/src/payload.rs
+++ b/virtualizationmanager/src/payload.rs
@@ -14,6 +14,7 @@
//! Payload disk image
+use crate::debug_config::should_include_debug_apexes;
use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
DiskImage::DiskImage,
Partition::Partition,
@@ -382,7 +383,7 @@
debug_level: DebugLevel,
) -> Vec<&'a ApexInfo> {
let mut additional_apexes: Vec<&str> = MICRODROID_REQUIRED_APEXES.to_vec();
- if debug_level != DebugLevel::NONE {
+ if should_include_debug_apexes(debug_level) {
additional_apexes.extend(MICRODROID_REQUIRED_APEXES_DEBUG.to_vec());
}
diff --git a/vm/src/run.rs b/vm/src/run.rs
index 5d785de..36edc64 100644
--- a/vm/src/run.rs
+++ b/vm/src/run.rs
@@ -152,7 +152,7 @@
}
fn find_empty_payload_apk_path() -> Result<PathBuf, Error> {
- const GLOB_PATTERN: &str = "/apex/com.android.virt/app/**/EmptyPayloadApp.apk";
+ const GLOB_PATTERN: &str = "/apex/com.android.virt/app/**/EmptyPayloadApp*.apk";
let mut entries: Vec<PathBuf> =
glob(GLOB_PATTERN).context("failed to glob")?.filter_map(|e| e.ok()).collect();
if entries.len() > 1 {
diff --git a/vmbase/README.md b/vmbase/README.md
index 3554ae6..552ac31 100644
--- a/vmbase/README.md
+++ b/vmbase/README.md
@@ -25,28 +25,18 @@
```soong
rust_ffi_static {
name: "libvmbase_example",
+ defaults: ["vmbase_ffi_defaults"],
crate_name: "vmbase_example",
srcs: ["src/main.rs"],
- edition: "2021",
- no_stdlibs: true,
- stdlibs: [
- "libcompiler_builtins.rust_sysroot",
- "libcore.rust_sysroot",
- ],
rustlibs: [
"libvmbase",
],
- enabled: false,
- target: {
- android_arm64: {
- enabled: true,
- },
- },
}
```
-Note that stdlibs must be explicitly specified, as we don't want the normal set of libraries used
-for a C++ binary intended to run in Android userspace.
+`vmbase_ffi_defaults`, among other things, specifies the stdlibs including the `compiler_builtins`
+and `core` crate. These must be explicitly specified as we don't want the normal set of libraries
+used for a C++ binary intended to run in Android userspace.
### Entry point
@@ -139,30 +129,18 @@
```soong
cc_binary {
- name: "vmbase_example_elf",
- stem: "vmbase_example",
+ name: "vmbase_example",
+ defaults: ["vmbase_elf_defaults"],
srcs: [
"idmap.S",
],
static_libs: [
- "libvmbase_entry",
"libvmbase_example",
],
- static_executable: true,
- nocrt: true,
- system_shared_libs: ["libc"],
- stl: "none",
linker_scripts: [
"image.ld",
":vmbase_sections",
],
- installable: false,
- enabled: false,
- target: {
- android_arm64: {
- enabled: true,
- },
- },
}
```
@@ -174,9 +152,9 @@
```soong
raw_binary {
- name: "vmbase_example",
- src: ":vmbase_example_elf",
+ name: "vmbase_example_bin",
stem: "vmbase_example.bin",
+ src: ":vmbase_example",
enabled: false,
target: {
android_arm64: {
diff --git a/vmbase/example/src/main.rs b/vmbase/example/src/main.rs
index 3b0e9db..9ec2dc4 100644
--- a/vmbase/example/src/main.rs
+++ b/vmbase/example/src/main.rs
@@ -16,7 +16,6 @@
#![no_main]
#![no_std]
-#![feature(default_alloc_error_handler)]
mod exceptions;
mod layout;
diff --git a/vmbase/example/src/pci.rs b/vmbase/example/src/pci.rs
index c0a2d2b..117cbc8 100644
--- a/vmbase/example/src/pci.rs
+++ b/vmbase/example/src/pci.rs
@@ -20,7 +20,7 @@
use fdtpci::PciInfo;
use log::{debug, info};
use virtio_drivers::{
- device::blk::VirtIOBlk,
+ device::{blk::VirtIOBlk, console::VirtIOConsole},
transport::{
pci::{bus::PciRoot, virtio_device_type, PciTransport},
DeviceType, Transport,
@@ -53,29 +53,41 @@
}
}
- assert_eq!(checked_virtio_device_count, 1);
+ assert_eq!(checked_virtio_device_count, 4);
}
/// Checks the given VirtIO device, if we know how to.
///
/// Returns true if the device was checked, or false if it was ignored.
fn check_virtio_device(transport: impl Transport, device_type: DeviceType) -> bool {
- if device_type == DeviceType::Block {
- let mut blk = VirtIOBlk::<HalImpl, _>::new(transport).expect("failed to create blk driver");
- info!("Found {} KiB block device.", blk.capacity() * SECTOR_SIZE_BYTES as u64 / 1024);
- assert_eq!(blk.capacity(), EXPECTED_SECTOR_COUNT as u64);
- let mut data = [0; SECTOR_SIZE_BYTES * EXPECTED_SECTOR_COUNT];
- for i in 0..EXPECTED_SECTOR_COUNT {
- blk.read_block(i, &mut data[i * SECTOR_SIZE_BYTES..(i + 1) * SECTOR_SIZE_BYTES])
- .expect("Failed to read block device.");
+ match device_type {
+ DeviceType::Block => {
+ let mut blk =
+ VirtIOBlk::<HalImpl, _>::new(transport).expect("failed to create blk driver");
+ info!("Found {} KiB block device.", blk.capacity() * SECTOR_SIZE_BYTES as u64 / 1024);
+ assert_eq!(blk.capacity(), EXPECTED_SECTOR_COUNT as u64);
+ let mut data = [0; SECTOR_SIZE_BYTES * EXPECTED_SECTOR_COUNT];
+ for i in 0..EXPECTED_SECTOR_COUNT {
+ blk.read_block(i, &mut data[i * SECTOR_SIZE_BYTES..(i + 1) * SECTOR_SIZE_BYTES])
+ .expect("Failed to read block device.");
+ }
+ for (i, chunk) in data.chunks(size_of::<u32>()).enumerate() {
+ assert_eq!(chunk, &(i as u32).to_le_bytes());
+ }
+ info!("Read expected data from block device.");
+ true
}
- for (i, chunk) in data.chunks(size_of::<u32>()).enumerate() {
- assert_eq!(chunk, &(i as u32).to_le_bytes());
+ DeviceType::Console => {
+ let mut console = VirtIOConsole::<HalImpl, _>::new(transport)
+ .expect("Failed to create VirtIO console driver");
+ info!("Found console device: {:?}", console.info());
+ for &c in b"Hello VirtIO console\n" {
+ console.send(c).expect("Failed to send character to VirtIO console device");
+ }
+ info!("Wrote to VirtIO console.");
+ true
}
- info!("Read expected data from block device.");
- true
- } else {
- false
+ _ => false,
}
}
diff --git a/vmbase/example/tests/test.rs b/vmbase/example/tests/test.rs
index 930e137..8f0eaa5 100644
--- a/vmbase/example/tests/test.rs
+++ b/vmbase/example/tests/test.rs
@@ -25,7 +25,7 @@
use log::info;
use std::{
fs::File,
- io::{self, BufRead, BufReader, Write},
+ io::{self, BufRead, BufReader, Read, Write},
os::unix::io::FromRawFd,
panic, thread,
};
@@ -90,8 +90,8 @@
gdbPort: 0, // no gdb
});
let console = android_log_fd()?;
- let log = android_log_fd()?;
- let vm = VmInstance::create(service.as_ref(), &config, Some(console), Some(log), None)
+ let (mut log_reader, log_writer) = pipe()?;
+ let vm = VmInstance::create(service.as_ref(), &config, Some(console), Some(log_writer), None)
.context("Failed to create VM")?;
vm.start().context("Failed to start VM")?;
info!("Started example VM.");
@@ -100,15 +100,17 @@
let death_reason = vm.wait_for_death();
assert_eq!(death_reason, DeathReason::Shutdown);
+ // Check that the expected string was written to the log VirtIO console device.
+ let expected = "Hello VirtIO console\n";
+ let mut log_output = String::new();
+ assert_eq!(log_reader.read_to_string(&mut log_output)?, expected.len());
+ assert_eq!(log_output, expected);
+
Ok(())
}
fn android_log_fd() -> io::Result<File> {
- let (reader_fd, writer_fd) = nix::unistd::pipe()?;
-
- // SAFETY: These are new FDs with no previous owner.
- let reader = unsafe { File::from_raw_fd(reader_fd) };
- let writer = unsafe { File::from_raw_fd(writer_fd) };
+ let (reader, writer) = pipe()?;
thread::spawn(|| {
for line in BufReader::new(reader).lines() {
@@ -117,3 +119,13 @@
});
Ok(writer)
}
+
+fn pipe() -> io::Result<(File, File)> {
+ let (reader_fd, writer_fd) = nix::unistd::pipe()?;
+
+ // SAFETY: These are new FDs with no previous owner.
+ let reader = unsafe { File::from_raw_fd(reader_fd) };
+ let writer = unsafe { File::from_raw_fd(writer_fd) };
+
+ Ok((reader, writer))
+}
diff --git a/vmclient/src/lib.rs b/vmclient/src/lib.rs
index 0e3d140..d67d87e 100644
--- a/vmclient/src/lib.rs
+++ b/vmclient/src/lib.rs
@@ -57,7 +57,7 @@
"android.system.virtualizationservice";
const VIRTMGR_PATH: &str = "/apex/com.android.virt/bin/virtmgr";
-const VIRTMGR_THREADS: usize = 16;
+const VIRTMGR_THREADS: usize = 2;
fn posix_pipe() -> Result<(OwnedFd, OwnedFd), io::Error> {
use nix::fcntl::OFlag;
@@ -122,7 +122,6 @@
let session = RpcSession::new();
session.set_file_descriptor_transport_mode(FileDescriptorTransportMode::Unix);
session.set_max_incoming_threads(VIRTMGR_THREADS);
- session.set_max_outgoing_threads(VIRTMGR_THREADS);
session
.setup_unix_domain_bootstrap_client(self.client_fd.as_fd())
.map_err(|_| io::Error::from(io::ErrorKind::ConnectionRefused))