Merge "Pass 'ioremap_guard' through Microdroid cmdline"
diff --git a/.prebuilt_info/prebuilt_info_pvmfw_pvmfw_img.asciipb b/.prebuilt_info/prebuilt_info_pvmfw_pvmfw_img.asciipb
index 445f731..7973ed6 100644
--- a/.prebuilt_info/prebuilt_info_pvmfw_pvmfw_img.asciipb
+++ b/.prebuilt_info/prebuilt_info_pvmfw_pvmfw_img.asciipb
@@ -1,6 +1,6 @@
drops {
android_build_drop {
- build_id: "8191635"
+ build_id: "8231605"
target: "u-boot_pvmfw"
source_file: "pvmfw.img"
}
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
index b659b73..e782bd2 100644
--- a/apex/sign_virt_apex.py
+++ b/apex/sign_virt_apex.py
@@ -250,7 +250,8 @@
RunCommand(args, cmd)
# libavb expects to be able to read the maximum vbmeta size, so we must provide a partition
# which matches this or the read will fail.
- RunCommand(args, ['truncate', '-s', '65536', vbmeta_img])
+ with open(vbmeta_img, 'a') as f:
+ f.truncate(65536)
class TempDirectory(object):
diff --git a/authfs/fd_server/src/aidl.rs b/authfs/fd_server/src/aidl.rs
index f13f249..3a3cdb2 100644
--- a/authfs/fd_server/src/aidl.rs
+++ b/authfs/fd_server/src/aidl.rs
@@ -17,7 +17,7 @@
use anyhow::Result;
use log::error;
use nix::{
- dir::Dir, errno::Errno, fcntl::openat, fcntl::OFlag, sys::stat::fchmod, sys::stat::mkdirat,
+ errno::Errno, fcntl::openat, fcntl::OFlag, sys::stat::fchmod, sys::stat::mkdirat,
sys::stat::mode_t, sys::stat::Mode, sys::statvfs::statvfs, sys::statvfs::Statvfs,
unistd::unlinkat, unistd::UnlinkatFlags,
};
@@ -29,8 +29,9 @@
use std::os::unix::fs::FileExt;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::{Component, Path, PathBuf, MAIN_SEPARATOR};
-use std::sync::{Arc, Mutex};
+use std::sync::{Arc, RwLock};
+use crate::common::OwnedFd;
use crate::fsverity;
use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::{
BnVirtFdService, FsStat::FsStat, IVirtFdService, MAX_REQUESTING_DATA,
@@ -63,21 +64,21 @@
ReadWrite(File),
/// A read-only directory to serve by this server.
- InputDir(Dir),
+ InputDir(OwnedFd),
/// A writable directory to serve by this server.
- OutputDir(Dir),
+ OutputDir(OwnedFd),
}
pub struct FdService {
/// A pool of opened files and directories, which can be looked up by the FD number.
- fd_pool: Arc<Mutex<BTreeMap<i32, FdConfig>>>,
+ fd_pool: Arc<RwLock<BTreeMap<i32, FdConfig>>>,
}
impl FdService {
pub fn new_binder(fd_pool: BTreeMap<i32, FdConfig>) -> Strong<dyn IVirtFdService> {
BnVirtFdService::new_binder(
- FdService { fd_pool: Arc::new(Mutex::new(fd_pool)) },
+ FdService { fd_pool: Arc::new(RwLock::new(fd_pool)) },
BinderFeatures::default(),
)
}
@@ -88,7 +89,7 @@
where
F: FnOnce(&FdConfig) -> BinderResult<R>,
{
- let fd_pool = self.fd_pool.lock().unwrap();
+ let fd_pool = self.fd_pool.read().unwrap();
let fd_config = fd_pool.get(&id).ok_or_else(|| new_errno_error(Errno::EBADF))?;
handle_fn(fd_config)
}
@@ -99,7 +100,7 @@
where
F: FnOnce(&mut FdConfig) -> BinderResult<(i32, FdConfig)>,
{
- let mut fd_pool = self.fd_pool.lock().unwrap();
+ let mut fd_pool = self.fd_pool.write().unwrap();
let fd_config = fd_pool.get_mut(&fd).ok_or_else(|| new_errno_error(Errno::EBADF))?;
let (new_fd, new_fd_config) = create_fn(fd_config)?;
if let btree_map::Entry::Vacant(entry) = fd_pool.entry(new_fd) {
@@ -174,7 +175,7 @@
} else {
Err(new_binder_exception(
ExceptionCode::SERVICE_SPECIFIC,
- "metadata doesn't contain a signature".to_string(),
+ "metadata doesn't contain a signature",
))
}
} else {
@@ -319,14 +320,12 @@
FdConfig::OutputDir(_) => {
let mode = validate_file_mode(mode)?;
mkdirat(dir_fd, basename, mode).map_err(new_errno_error)?;
- let new_dir = Dir::openat(
- dir_fd,
- basename,
- OFlag::O_DIRECTORY | OFlag::O_RDONLY,
- Mode::empty(),
- )
- .map_err(new_errno_error)?;
- Ok((new_dir.as_raw_fd(), FdConfig::OutputDir(new_dir)))
+ let new_dir_fd =
+ openat(dir_fd, basename, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())
+ .map_err(new_errno_error)?;
+ // SAFETY: new_dir_fd is just created and not an error.
+ let fd_owner = unsafe { OwnedFd::from_raw_fd(new_dir_fd) };
+ Ok((new_dir_fd, FdConfig::OutputDir(fd_owner)))
}
_ => Err(new_errno_error(Errno::ENOTDIR)),
})
@@ -336,7 +335,7 @@
validate_basename(basename)?;
self.handle_fd(dir_fd, |config| match config {
- FdConfig::OutputDir(_dir) => {
+ FdConfig::OutputDir(_) => {
unlinkat(Some(dir_fd), basename, UnlinkatFlags::NoRemoveDir)
.map_err(new_errno_error)?;
Ok(())
@@ -350,7 +349,7 @@
validate_basename(basename)?;
self.handle_fd(dir_fd, |config| match config {
- FdConfig::OutputDir(_dir) => {
+ FdConfig::OutputDir(_) => {
unlinkat(Some(dir_fd), basename, UnlinkatFlags::RemoveDir)
.map_err(new_errno_error)?;
Ok(())
diff --git a/authfs/fd_server/src/common.rs b/authfs/fd_server/src/common.rs
new file mode 100644
index 0000000..f836bac
--- /dev/null
+++ b/authfs/fd_server/src/common.rs
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::fs::File;
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+
+// TODO: Remove if/when std::os::unix::io::OwnedFd is standardized.
+pub struct OwnedFd {
+ owner: File,
+}
+
+impl FromRawFd for OwnedFd {
+ unsafe fn from_raw_fd(fd: RawFd) -> Self {
+ OwnedFd { owner: File::from_raw_fd(fd) }
+ }
+}
+
+impl AsRawFd for OwnedFd {
+ fn as_raw_fd(&self) -> RawFd {
+ self.owner.as_raw_fd()
+ }
+}
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index fe0475f..a1d09fc 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -23,12 +23,13 @@
//! client can then request the content of file 9 by offset and size.
mod aidl;
+mod common;
mod fsverity;
use anyhow::{bail, Result};
use binder_common::rpc_server::run_rpc_server;
use log::debug;
-use nix::{dir::Dir, sys::stat::umask, sys::stat::Mode};
+use nix::sys::stat::{umask, Mode};
use std::collections::BTreeMap;
use std::fs::File;
use std::os::unix::io::FromRawFd;
@@ -44,12 +45,12 @@
retval >= 0
}
-fn fd_to_file(fd: i32) -> Result<File> {
+fn fd_to_owned<T: FromRawFd>(fd: i32) -> Result<T> {
if !is_fd_valid(fd) {
bail!("Bad FD: {}", fd);
}
// SAFETY: The caller is supposed to provide valid FDs to this process.
- Ok(unsafe { File::from_raw_fd(fd) })
+ Ok(unsafe { T::from_raw_fd(fd) })
}
fn parse_arg_ro_fds(arg: &str) -> Result<(i32, FdConfig)> {
@@ -61,11 +62,11 @@
Ok((
fds[0],
FdConfig::Readonly {
- file: fd_to_file(fds[0])?,
+ file: fd_to_owned(fds[0])?,
// Alternative metadata source, if provided
alt_metadata: fds
.get(1)
- .map(|fd| fd_to_file(*fd))
+ .map(|fd| fd_to_owned(*fd))
.transpose()?
.and_then(|f| parse_fsverity_metadata(f).ok()),
},
@@ -74,7 +75,7 @@
fn parse_arg_rw_fds(arg: &str) -> Result<(i32, FdConfig)> {
let fd = arg.parse::<i32>()?;
- let file = fd_to_file(fd)?;
+ let file = fd_to_owned::<File>(fd)?;
if file.metadata()?.len() > 0 {
bail!("File is expected to be empty");
}
@@ -83,12 +84,12 @@
fn parse_arg_ro_dirs(arg: &str) -> Result<(i32, FdConfig)> {
let fd = arg.parse::<i32>()?;
- Ok((fd, FdConfig::InputDir(Dir::from_fd(fd)?)))
+ Ok((fd, FdConfig::InputDir(fd_to_owned(fd)?)))
}
fn parse_arg_rw_dirs(arg: &str) -> Result<(i32, FdConfig)> {
let fd = arg.parse::<i32>()?;
- Ok((fd, FdConfig::OutputDir(Dir::from_fd(fd)?)))
+ Ok((fd, FdConfig::OutputDir(fd_to_owned(fd)?)))
}
struct Args {
@@ -147,7 +148,7 @@
}
let ready_fd = if let Some(arg) = matches.value_of("ready-fd") {
let fd = arg.parse::<i32>()?;
- Some(fd_to_file(fd)?)
+ Some(fd_to_owned(fd)?)
} else {
None
};
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
index f1e7529..857c6d9 100644
--- a/authfs/src/fsverity/editor.rs
+++ b/authfs/src/fsverity/editor.rs
@@ -99,6 +99,36 @@
merkle_tree.calculate_fsverity_digest().map_err(|e| io::Error::new(io::ErrorKind::Other, e))
}
+ fn read_backing_chunk_unverified(
+ &self,
+ chunk_index: u64,
+ buf: &mut ChunkBuffer,
+ ) -> io::Result<usize> {
+ self.file.read_chunk(chunk_index, buf)
+ }
+
+ fn read_backing_chunk_verified(
+ &self,
+ chunk_index: u64,
+ buf: &mut ChunkBuffer,
+ merkle_tree_locked: &MerkleLeaves,
+ ) -> io::Result<usize> {
+ debug_assert_usize_is_u64();
+
+ if merkle_tree_locked.is_index_valid(chunk_index as usize) {
+ let size = self.read_backing_chunk_unverified(chunk_index, buf)?;
+
+ // Ensure the returned buffer matches the known hash.
+ let hash = Sha256Hasher::new()?.update(buf)?.finalize()?;
+ if !merkle_tree_locked.is_consistent(chunk_index as usize, &hash) {
+ return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
+ }
+ Ok(size)
+ } else {
+ Ok(0)
+ }
+ }
+
fn new_hash_for_incomplete_write(
&self,
source: &[u8],
@@ -114,7 +144,7 @@
// If previous data exists, read back and verify against the known hash (since the
// storage / remote server is not trusted).
if merkle_tree.is_index_valid(output_chunk_index) {
- self.read_chunk(output_chunk_index as u64, &mut orig_data)?;
+ self.read_backing_chunk_unverified(output_chunk_index as u64, &mut orig_data)?;
// Verify original content
let hash = Sha256Hasher::new()?.update(&orig_data)?.finalize()?;
@@ -239,7 +269,7 @@
let chunk_index = size / CHUNK_SIZE;
if new_tail_size > 0 {
let mut buf: ChunkBuffer = [0; CHUNK_SIZE as usize];
- let s = self.read_chunk(chunk_index, &mut buf)?;
+ let s = self.read_backing_chunk_verified(chunk_index, &mut buf, &merkle_tree)?;
debug_assert!(new_tail_size <= s);
let zeros = vec![0; CHUNK_SIZE as usize - new_tail_size];
@@ -260,7 +290,8 @@
impl<F: ReadByChunk + RandomWrite> ReadByChunk for VerifiedFileEditor<F> {
fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
- self.file.read_chunk(chunk_index, buf)
+ let merkle_tree = self.merkle_tree.read().unwrap();
+ self.read_backing_chunk_verified(chunk_index, buf, &merkle_tree)
}
}
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index 5bc25b0..511db68 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -33,7 +33,7 @@
use std::os::unix::ffi::OsStrExt;
use std::path::{Component, Path, PathBuf};
use std::sync::atomic::{AtomicU64, Ordering};
-use std::sync::{Arc, Mutex};
+use std::sync::{Arc, RwLock};
use std::time::Duration;
use crate::common::{divide_roundup, ChunkedSizeIter, CHUNK_SIZE};
@@ -104,7 +104,7 @@
/// stay alive until the reference count reaches zero.
///
/// Note: This is not to be confused with hardlinks, which AuthFS doesn't currently implement.
- handle_ref_count: u64,
+ handle_ref_count: AtomicU64,
/// Whether the inode is already unlinked, i.e. should be removed, once `handle_ref_count` is
/// down to zero.
@@ -113,11 +113,11 @@
impl InodeState {
fn new(entry: AuthFsEntry) -> Self {
- InodeState { entry, handle_ref_count: 0, unlinked: false }
+ InodeState { entry, handle_ref_count: AtomicU64::new(0), unlinked: false }
}
fn new_with_ref_count(entry: AuthFsEntry, handle_ref_count: u64) -> Self {
- InodeState { entry, handle_ref_count, unlinked: false }
+ InodeState { entry, handle_ref_count: AtomicU64::new(handle_ref_count), unlinked: false }
}
}
@@ -188,7 +188,7 @@
pub struct AuthFs {
/// Table for `Inode` to `InodeState` lookup. This needs to be `Sync` to be used in
/// `fuse::worker::start_message_loop`.
- inode_table: Mutex<BTreeMap<Inode, InodeState>>,
+ inode_table: RwLock<BTreeMap<Inode, InodeState>>,
/// The next available inode number.
next_inode: AtomicU64,
@@ -200,7 +200,7 @@
///
/// Currently, no code locks `dir_handle_table` and `inode_table` at the same time to avoid
/// deadlock.
- dir_handle_table: Mutex<DirHandleTable>,
+ dir_handle_table: RwLock<DirHandleTable>,
/// The next available handle number.
next_handle: AtomicU64,
@@ -222,9 +222,9 @@
);
AuthFs {
- inode_table: Mutex::new(inode_table),
+ inode_table: RwLock::new(inode_table),
next_inode: AtomicU64::new(ROOT_INODE + 1),
- dir_handle_table: Mutex::new(BTreeMap::new()),
+ dir_handle_table: RwLock::new(BTreeMap::new()),
next_handle: AtomicU64::new(1),
remote_fs_stats_reader,
}
@@ -321,7 +321,7 @@
where
F: FnOnce(&AuthFsEntry) -> io::Result<R>,
{
- let inode_table = self.inode_table.lock().unwrap();
+ let inode_table = self.inode_table.read().unwrap();
handle_inode_locked(&inode_table, inode, |inode_state| handle_fn(&inode_state.entry))
}
@@ -343,7 +343,7 @@
where
F: FnOnce(&mut AuthFsEntry, &Path, Inode) -> io::Result<AuthFsEntry>,
{
- let mut inode_table = self.inode_table.lock().unwrap();
+ let mut inode_table = self.inode_table.write().unwrap();
let (new_inode, new_file_entry) = handle_inode_mut_locked(
&mut inode_table,
&parent_inode,
@@ -368,7 +368,7 @@
dir_entries: Vec<AuthFsDirEntry>,
) -> io::Result<(Option<Handle>, FuseOpenOptions)> {
let handle = self.next_handle.fetch_add(1, Ordering::Relaxed);
- let mut dir_handle_table = self.dir_handle_table.lock().unwrap();
+ let mut dir_handle_table = self.dir_handle_table.write().unwrap();
if let btree_map::Entry::Vacant(value) = dir_handle_table.entry(handle) {
value.insert(Arc::new(dir_entries));
Ok((Some(handle), FuseOpenOptions::empty()))
@@ -511,7 +511,7 @@
}
fn lookup(&self, _ctx: Context, parent: Inode, name: &CStr) -> io::Result<Entry> {
- let mut inode_table = self.inode_table.lock().unwrap();
+ let inode_table = self.inode_table.read().unwrap();
// Look up the entry's inode number in parent directory.
let inode =
@@ -528,8 +528,8 @@
})?;
// Create the entry's stat if found.
- let st = handle_inode_mut_locked(
- &mut inode_table,
+ let st = handle_inode_locked(
+ &inode_table,
&inode,
|InodeState { entry, handle_ref_count, .. }| {
let st = match entry {
@@ -551,7 +551,9 @@
AccessMode::Variable(attr.mode()),
),
}?;
- *handle_ref_count += 1;
+ if handle_ref_count.fetch_add(1, Ordering::Relaxed) == u64::MAX {
+ panic!("Handle reference count overflow");
+ }
Ok(st)
},
)?;
@@ -566,20 +568,21 @@
}
fn forget(&self, _ctx: Context, inode: Self::Inode, count: u64) {
- let mut inode_table = self.inode_table.lock().unwrap();
+ let mut inode_table = self.inode_table.write().unwrap();
let delete_now = handle_inode_mut_locked(
&mut inode_table,
&inode,
|InodeState { handle_ref_count, unlinked, .. }| {
- if count > *handle_ref_count {
+ let current = handle_ref_count.get_mut();
+ if count > *current {
error!(
"Trying to decrease refcount of inode {} by {} (> current {})",
- inode, count, *handle_ref_count
+ inode, count, *current
);
panic!(); // log to logcat with error!
}
- *handle_ref_count = handle_ref_count.saturating_sub(count);
- Ok(*unlinked && *handle_ref_count == 0)
+ *current -= count;
+ Ok(*unlinked && *current == 0)
},
);
@@ -764,7 +767,7 @@
_handle: Option<Handle>,
valid: SetattrValid,
) -> io::Result<(libc::stat64, Duration)> {
- let mut inode_table = self.inode_table.lock().unwrap();
+ let mut inode_table = self.inode_table.write().unwrap();
handle_inode_mut_locked(&mut inode_table, &inode, |InodeState { entry, .. }| match entry {
AuthFsEntry::VerifiedNew { editor, attr } => {
check_unsupported_setattr_request(valid)?;
@@ -879,7 +882,7 @@
}
fn unlink(&self, _ctx: Context, parent: Self::Inode, name: &CStr) -> io::Result<()> {
- let mut inode_table = self.inode_table.lock().unwrap();
+ let mut inode_table = self.inode_table.write().unwrap();
handle_inode_mut_locked(
&mut inode_table,
&parent,
@@ -906,7 +909,7 @@
}
fn rmdir(&self, _ctx: Context, parent: Self::Inode, name: &CStr) -> io::Result<()> {
- let mut inode_table = self.inode_table.lock().unwrap();
+ let mut inode_table = self.inode_table.write().unwrap();
// Check before actual removal, with readonly borrow.
handle_inode_locked(&inode_table, &parent, |inode_state| match &inode_state.entry {
@@ -962,7 +965,7 @@
_size: u32,
offset: u64,
) -> io::Result<Self::DirIter> {
- let dir_handle_table = self.dir_handle_table.lock().unwrap();
+ let dir_handle_table = self.dir_handle_table.read().unwrap();
if let Some(entry) = dir_handle_table.get(&handle) {
Ok(DirEntriesSnapshotIterator {
snapshot: entry.clone(),
@@ -980,7 +983,7 @@
_flags: u32,
handle: Self::Handle,
) -> io::Result<()> {
- let mut dir_handle_table = self.dir_handle_table.lock().unwrap();
+ let mut dir_handle_table = self.dir_handle_table.write().unwrap();
if dir_handle_table.remove(&handle).is_none() {
unreachable!("Unknown directory handle {}, inode {}", handle, inode);
}
@@ -1008,7 +1011,7 @@
st.f_bfree = st.f_bavail;
st.f_ffree = st.f_favail;
// Number of inodes on the filesystem
- st.f_files = self.inode_table.lock().unwrap().len() as u64;
+ st.f_files = self.inode_table.read().unwrap().len() as u64;
Ok(st)
}
diff --git a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
index 7df3b3e..5cd4af8 100644
--- a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
+++ b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
@@ -101,8 +101,7 @@
private ExecutorService mThreadPool = Executors.newCachedThreadPool();
@BeforeClassWithInfo
- public static void beforeClassWithDevice(TestInformation testInfo)
- throws DeviceNotAvailableException {
+ public static void beforeClassWithDevice(TestInformation testInfo) throws Exception {
assertNotNull(testInfo.getDevice());
ITestDevice androidDevice = testInfo.getDevice();
sAndroid = new CommandRunner(androidDevice);
@@ -265,8 +264,8 @@
assertTrue(copyFileOnMicrodroid(srcPath, destPath));
// Action
- // Tampering with the first 2 4K block of the backing file.
- sAndroid.run("dd if=/dev/zero of=" + backendPath + " bs=1 count=8192");
+ // Tampering with the first 2 4K-blocks of the backing file.
+ zeroizeFileOnAndroid(backendPath, /* size */ 8192, /* offset */ 0);
// Verify
// Write to a block partially requires a read back to calculate the new hash. It should fail
@@ -289,6 +288,52 @@
}
@Test
+ public void testReadFailedIfDetectsTampering() throws Exception {
+ // Setup
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
+
+ String srcPath = "/system/bin/linker64";
+ String destPath = MOUNT_DIR + "/3";
+ String backendPath = TEST_OUTPUT_DIR + "/out.file";
+ assertTrue(copyFileOnMicrodroid(srcPath, destPath));
+
+ // Action
+ // Tampering with the first 4K-block of the backing file.
+ zeroizeFileOnAndroid(backendPath, /* size */ 4096, /* offset */ 0);
+
+ // Verify
+ // Force dropping the page cache, so that the next read can be validated.
+ runOnMicrodroid("echo 1 > /proc/sys/vm/drop_caches");
+ // A read will fail if the backing data has been tampered.
+ assertFalse(checkReadAtFileOffsetOnMicrodroid(
+ destPath, /* offset */ 0, /* number */ 4096));
+ assertTrue(checkReadAtFileOffsetOnMicrodroid(
+ destPath, /* offset */ 4096, /* number */ 4096));
+ }
+
+ @Test
+ public void testResizeFailedIfDetectsTampering() throws Exception {
+ // Setup
+ runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
+ runAuthFsOnMicrodroid("--remote-new-rw-file 3 --cid " + VMADDR_CID_HOST);
+
+ String outputPath = MOUNT_DIR + "/3";
+ String backendPath = TEST_OUTPUT_DIR + "/out.file";
+ createFileWithOnesOnMicrodroid(outputPath, 8192);
+
+ // Action
+ // Tampering with the last 4K-block of the backing file.
+ zeroizeFileOnAndroid(backendPath, /* size */ 1, /* offset */ 4096);
+
+ // Verify
+ // A resize (to a non-multiple of 4K) will fail if the last backing chunk has been
+ // tampered. The original data is necessary (and has to be verified) to calculate the new
+ // hash with shorter data.
+ assertFalse(resizeFileOnMicrodroid(outputPath, 8000));
+ }
+
+ @Test
public void testFileResize() throws Exception {
// Setup
runFdServerOnAndroid("--open-rw 3:" + TEST_OUTPUT_DIR + "/out.file", "--rw-fds 3");
@@ -304,14 +349,14 @@
backendPath,
"684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
- resizeFileOnMicrodroid(outputPath, 15000);
+ assertTrue(resizeFileOnMicrodroid(outputPath, 15000));
assertEquals(getFileSizeInBytesOnMicrodroid(outputPath), 15000);
expectBackingFileConsistency(
outputPath,
backendPath,
"567c89f62586e0d33369157afdfe99a2fa36cdffb01e91dcdc0b7355262d610d");
- resizeFileOnMicrodroid(outputPath, 5000);
+ assertTrue(resizeFileOnMicrodroid(outputPath, 5000));
assertEquals(getFileSizeInBytesOnMicrodroid(outputPath), 5000);
expectBackingFileConsistency(
outputPath,
@@ -340,7 +385,7 @@
"684ad25fdc2bbb80cbc910dd1bde6d5499ccf860ca6ee44704b77ec445271353");
// Regular file operations work, e.g. resize.
- resizeFileOnMicrodroid(authfsPath, 15000);
+ assertTrue(resizeFileOnMicrodroid(authfsPath, 15000));
assertEquals(getFileSizeInBytesOnMicrodroid(authfsPath), 15000);
expectBackingFileConsistency(
authfsPath,
@@ -698,8 +743,9 @@
assertEquals("Inconsistent mode for " + androidPath + " (android)", expected, actual);
}
- private void resizeFileOnMicrodroid(String path, long size) {
- runOnMicrodroid("truncate -c -s " + size + " " + path);
+ private boolean resizeFileOnMicrodroid(String path, long size) {
+ CommandResult result = runOnMicrodroidForResult("truncate -c -s " + size + " " + path);
+ return result.getStatus() == CommandStatus.SUCCESS;
}
private long getFileSizeInBytesOnMicrodroid(String path) {
@@ -711,12 +757,22 @@
"yes $'\\x01' | tr -d '\\n' | dd bs=1 count=" + numberOfOnes + " of=" + filePath);
}
- private boolean writeZerosAtFileOffsetOnMicrodroid(
- String filePath, long offset, long numberOfZeros, boolean writeThrough) {
- String cmd = "dd if=/dev/zero of=" + filePath + " bs=1 count=" + numberOfZeros;
+ private boolean checkReadAtFileOffsetOnMicrodroid(String filePath, long offset, long size) {
+ String cmd = "dd if=" + filePath + " of=/dev/null bs=1 count=" + size;
if (offset > 0) {
cmd += " skip=" + offset;
}
+ CommandResult result = runOnMicrodroidForResult(cmd);
+ return result.getStatus() == CommandStatus.SUCCESS;
+ }
+
+ private boolean writeZerosAtFileOffsetOnMicrodroid(
+ String filePath, long offset, long numberOfZeros, boolean writeThrough) {
+ String cmd = "dd if=/dev/zero of=" + filePath + " bs=1 count=" + numberOfZeros
+ + " conv=notrunc";
+ if (offset > 0) {
+ cmd += " seek=" + offset;
+ }
if (writeThrough) {
cmd += " direct";
}
@@ -724,6 +780,12 @@
return result.getStatus() == CommandStatus.SUCCESS;
}
+ private void zeroizeFileOnAndroid(String filePath, long size, long offset)
+ throws DeviceNotAvailableException {
+ sAndroid.run("dd if=/dev/zero of=" + filePath + " bs=1 count=" + size + " conv=notrunc"
+ + " seek=" + offset);
+ }
+
private void runAuthFsOnMicrodroid(String flags) {
String cmd = AUTHFS_BIN + " " + MOUNT_DIR + " " + flags;
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index 63a6fd6..072b90b 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -47,9 +47,6 @@
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
-// Enough memory to complete odrefresh in the VM.
-const VM_MEMORY_MIB: i32 = 1024;
-
/// This owns an instance of the CompOS VM.
pub struct VmInstance {
#[allow(dead_code)] // Keeps the VM alive even if we don`t touch it
@@ -69,6 +66,8 @@
pub cpu_set: Option<String>,
/// If present, overrides the path to the VM config JSON file
pub config_path: Option<String>,
+ /// If present, overrides the amount of RAM to give the VM
+ pub memory_mib: Option<i32>,
}
impl VmInstance {
@@ -127,7 +126,7 @@
debugLevel: debug_level,
extraIdsigs: vec![idsig_manifest_apk_fd],
protectedVm: protected_vm,
- memoryMib: VM_MEMORY_MIB,
+ memoryMib: parameters.memory_mib.unwrap_or(0), // 0 means use the default
numCpus: parameters.cpus.map_or(1, NonZeroU32::get) as i32,
cpuAffinity: parameters.cpu_set.clone(),
});
diff --git a/compos/composd/src/instance_manager.rs b/compos/composd/src/instance_manager.rs
index f1289e8..587314c 100644
--- a/compos/composd/src/instance_manager.rs
+++ b/compos/composd/src/instance_manager.rs
@@ -32,6 +32,9 @@
use std::sync::{Arc, Mutex, Weak};
use virtualizationservice::IVirtualizationService::IVirtualizationService;
+// Enough memory to complete odrefresh in the VM.
+const VM_MEMORY_MIB: i32 = 1024;
+
pub struct InstanceManager {
service: Strong<dyn IVirtualizationService>,
state: Mutex<State>,
@@ -95,7 +98,7 @@
}
};
let cpu_set = system_properties::read(DEX2OAT_CPU_SET_PROP_NAME)?;
- Ok(VmParameters { cpus, cpu_set, ..Default::default() })
+ Ok(VmParameters { cpus, cpu_set, memory_mib: Some(VM_MEMORY_MIB), ..Default::default() })
}
// Ensures we only run one instance at a time.
diff --git a/microdroid/README.md b/microdroid/README.md
index b7c70d5..783f300 100644
--- a/microdroid/README.md
+++ b/microdroid/README.md
@@ -31,7 +31,7 @@
```sh
banchan com.android.virt aosp_arm64
-m apps_only dist
+UNBUNDLED_BUILD_SDKS_FROM_SOURCES=true m apps_only dist
adb install out/dist/com.android.virt.apex
adb reboot
```
@@ -103,27 +103,15 @@
// directory.
```
-Finally, you build and sign the APK.
+Finally, you build the APK.
```sh
TARGET_BUILD_APPS=MyApp m apps_only dist
-m apksigner
-apksigner sign --ks path_to_keystore out/dist/MyApp.apk
```
-`path_to_keystore` should be replaced with the actual path to the keystore,
-which can be created as follows:
-
-```sh
-keytool -keystore my_keystore -genkey -alias my_key
-```
-
-Make sure that `.apk.idsig` file is also generated in the same directory as the
-signed APK.
-
## Running the app on microdroid
-First of all, install the signed APK to the target device.
+First of all, install the APK to the target device.
```sh
adb install out/dist/MyApp.apk
@@ -142,14 +130,6 @@
```
It shall report a cryptic path similar to `/data/app/~~OgZq==/com.acme.app-HudMahQ==/base.apk`.
-Push idsig of the APK to the device.
-
-```sh
-TEST_ROOT=/data/local/tmp/virt
-adb shell mkdir $TEST_ROOT
-adb push out/dist/MyApp.apk.idsig $TEST_ROOT/
-```
-
Execute the following commands to launch a VM. The VM will boot to microdroid
and then automatically execute your app (the shared library
`MyMicrodroidApp.so`).
diff --git a/pvmfw/pvmfw.img b/pvmfw/pvmfw.img
index 5e99b5f..d7e64c0 100644
--- a/pvmfw/pvmfw.img
+++ b/pvmfw/pvmfw.img
Binary files differ
diff --git a/tests/common.cc b/tests/common.cc
index 9602283..5d32351 100644
--- a/tests/common.cc
+++ b/tests/common.cc
@@ -14,14 +14,22 @@
* limitations under the License.
*/
+#include <android/sysprop/HypervisorProperties.sysprop.h>
+
#include "virt/VirtualizationTest.h"
+using android::sysprop::HypervisorProperties::hypervisor_protected_vm_supported;
+using android::sysprop::HypervisorProperties::hypervisor_vm_supported;
+
namespace {
bool isVmSupported() {
- const std::array<const char *, 4> needed_files = {
- "/dev/kvm",
- "/dev/vhost-vsock",
+ bool has_capability = hypervisor_vm_supported().value_or(false) ||
+ hypervisor_protected_vm_supported().value_or(false);
+ if (!has_capability) {
+ return false;
+ }
+ const std::array<const char *, 2> needed_files = {
"/apex/com.android.virt/bin/crosvm",
"/apex/com.android.virt/bin/virtualizationservice",
};
diff --git a/tests/hostside/Android.bp b/tests/hostside/Android.bp
index fa9da9d..dfc2f2b 100644
--- a/tests/hostside/Android.bp
+++ b/tests/hostside/Android.bp
@@ -10,7 +10,6 @@
"general-tests",
],
libs: [
- "gson-prebuilt-jar",
"tradefed",
],
static_libs: [
diff --git a/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java b/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
index 0f6204c..8e86fd1 100644
--- a/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
+++ b/tests/hostside/helper/java/android/virt/test/VirtualizationTestCaseBase.java
@@ -20,11 +20,13 @@
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
import com.android.compatibility.common.tradefed.build.CompatibilityBuildHelper;
import com.android.tradefed.build.IBuildInfo;
import com.android.tradefed.device.DeviceNotAvailableException;
import com.android.tradefed.device.ITestDevice;
+import com.android.tradefed.device.TestDevice;
import com.android.tradefed.log.LogUtil.CLog;
import com.android.tradefed.testtype.junit4.BaseHostJUnit4Test;
import com.android.tradefed.util.CommandResult;
@@ -97,15 +99,10 @@
}
}
- public static void testIfDeviceIsCapable(ITestDevice androidDevice)
- throws DeviceNotAvailableException {
- CommandRunner android = new CommandRunner(androidDevice);
-
- // Checks the preconditions to run microdroid. If the condition is not satisfied
- // don't run the test (instead of failing)
- android.assumeSuccess("ls /dev/kvm");
- android.assumeSuccess("ls /dev/vhost-vsock");
- android.assumeSuccess("ls /apex/com.android.virt");
+ public static void testIfDeviceIsCapable(ITestDevice androidDevice) throws Exception {
+ assumeTrue("Need an actual TestDevice", androidDevice instanceof TestDevice);
+ TestDevice testDevice = (TestDevice) androidDevice;
+ assumeTrue("Requires VM support", testDevice.supportsMicrodroid());
}
// Run an arbitrary command in the host side and returns the result
diff --git a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
index 58935d8..51d62cb 100644
--- a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
+++ b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
@@ -35,16 +35,15 @@
import com.android.tradefed.util.FileUtil;
import com.android.tradefed.util.RunUtil;
-import com.google.gson.Gson;
-import com.google.gson.annotations.SerializedName;
-
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import java.io.File;
-import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -115,34 +114,8 @@
.contains("android.permission.MANAGE_VIRTUAL_MACHINE permission"));
}
- // Helper classes for (de)serialization of VM raw configs
- static class VmRawConfig {
- String bootloader;
- List<Disk> disks;
- int memory_mib;
- @SerializedName("protected")
- boolean isProtected;
- String platform_version;
- }
-
- static class Disk {
- List<Partition> partitions;
- boolean writable;
- public void addPartition(String label, String path) {
- if (partitions == null) {
- partitions = new ArrayList<Partition>();
- }
- Partition partition = new Partition();
- partition.label = label;
- partition.path = path;
- partitions.add(partition);
- }
- }
-
- static class Partition {
- String label;
- String path;
- boolean writable;
+ private static JSONObject newPartition(String label, String path) {
+ return new JSONObject(Map.of("label", label, "path", path));
}
private void resignVirtApex(File virtApexDir, File signingKey, Map<String, File> keyOverrides) {
@@ -195,7 +168,7 @@
private String runMicrodroidWithResignedImages(File key, Map<String, File> keyOverrides,
boolean isProtected, boolean daemonize, String consolePath)
- throws DeviceNotAvailableException, IOException {
+ throws DeviceNotAvailableException, IOException, JSONException {
CommandRunner android = new CommandRunner(getDevice());
File virtApexDir = FileUtil.createTempDir("virt_apex");
@@ -244,44 +217,45 @@
// - its idsig
// Load etc/microdroid.json
- Gson gson = new Gson();
File microdroidConfigFile = new File(virtApexEtcDir, "microdroid.json");
- VmRawConfig config = gson.fromJson(new FileReader(microdroidConfigFile),
- VmRawConfig.class);
+ JSONObject config = new JSONObject(FileUtil.readStringFromFile(microdroidConfigFile));
// Replace paths so that the config uses re-signed images from TEST_ROOT
- config.bootloader = config.bootloader.replace(VIRT_APEX, TEST_ROOT);
- for (Disk disk : config.disks) {
- for (Partition part : disk.partitions) {
- part.path = part.path.replace(VIRT_APEX, TEST_ROOT);
+ config.put("bootloader", config.getString("bootloader").replace(VIRT_APEX, TEST_ROOT));
+ JSONArray disks = config.getJSONArray("disks");
+ for (int diskIndex = 0; diskIndex < disks.length(); diskIndex++) {
+ JSONObject disk = disks.getJSONObject(diskIndex);
+ JSONArray partitions = disk.getJSONArray("partitions");
+ for (int partIndex = 0; partIndex < partitions.length(); partIndex++) {
+ JSONObject part = partitions.getJSONObject(partIndex);
+ part.put("path", part.getString("path").replace(VIRT_APEX, TEST_ROOT));
}
}
// Add partitions to the second disk
- Disk secondDisk = config.disks.get(1);
- secondDisk.addPartition("vbmeta",
- TEST_ROOT + "etc/fs/microdroid_vbmeta_bootconfig.img");
- secondDisk.addPartition("bootconfig",
- TEST_ROOT + "etc/microdroid_bootconfig.full_debuggable");
- secondDisk.addPartition("vm-instance", instanceImgPath);
+ final String vbmetaPath = TEST_ROOT + "etc/fs/microdroid_vbmeta_bootconfig.img";
+ final String bootconfigPath = TEST_ROOT + "etc/microdroid_bootconfig.full_debuggable";
+ disks.getJSONObject(1).getJSONArray("partitions")
+ .put(newPartition("vbmeta", vbmetaPath))
+ .put(newPartition("bootconfig", bootconfigPath))
+ .put(newPartition("vm-instance", instanceImgPath));
// Add payload image disk with partitions:
// - payload-metadata
// - apexes: com.android.os.statsd, com.android.adbd
// - apk and idsig
- Disk payloadDisk = new Disk();
- payloadDisk.addPartition("payload-metadata", payloadMetadataPath);
- payloadDisk.addPartition("microdroid-apex-0", statsdApexPath);
- payloadDisk.addPartition("microdroid-apex-1", adbdApexPath);
- payloadDisk.addPartition("microdroid-apk", apkPath);
- payloadDisk.addPartition("microdroid-apk-idsig", idSigPath);
- config.disks.add(payloadDisk);
+ disks.put(new JSONObject().put("writable", false).put("partitions", new JSONArray()
+ .put(newPartition("payload-metadata", payloadMetadataPath))
+ .put(newPartition("microdroid-apex-0", statsdApexPath))
+ .put(newPartition("microdroid-apex-1", adbdApexPath))
+ .put(newPartition("microdroid-apk", apkPath))
+ .put(newPartition("microdroid-apk-idsig", idSigPath))));
- config.isProtected = isProtected;
+ config.put("protected", isProtected);
// Write updated raw config
final String configPath = TEST_ROOT + "raw_config.json";
- getDevice().pushString(gson.toJson(config), configPath);
+ getDevice().pushString(config.toString(), configPath);
final String logPath = TEST_ROOT + "log";
final String ret = android.runWithTimeout(
diff --git a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
index 27e1846..8df853d 100644
--- a/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
+++ b/tests/testapk/src/java/com/android/microdroid/test/MicrodroidTests.java
@@ -443,6 +443,8 @@
UUID.fromString("7e8221e7-03e6-4969-948b-73a4c809a4f2");
private static final UUID U_BOOT_ENV_PARTITION_UUID =
UUID.fromString("0ab72d30-86ae-4d05-81b2-c1760be2b1f9");
+ private static final UUID PVM_FW_PARTITION_UUID =
+ UUID.fromString("90d2174a-038a-4bc6-adf3-824848fc5825");
private static final long BLOCK_SIZE = 512;
// Find the starting offset which holds the data of a partition having UUID.
@@ -486,42 +488,98 @@
return payloadStarted.getNow(false);
}
- @Test
- public void bootFailsWhenInstanceDiskIsCompromised()
+ private RandomAccessFile prepareInstanceImage(String vmName)
throws VirtualMachineException, InterruptedException, IOException {
- assume().withMessage("Skip on Cuttlefish. b/195765441")
- .that(android.os.Build.DEVICE)
- .isNotEqualTo("vsoc_x86_64");
-
VirtualMachineConfig config = mInner.newVmConfigBuilder("assets/vm_config.json")
.debugLevel(DebugLevel.NONE)
.build();
// Remove any existing VM so we can start from scratch
- VirtualMachine oldVm = mInner.mVmm.getOrCreate("test_vm_integrity", config);
+ VirtualMachine oldVm = mInner.mVmm.getOrCreate(vmName, config);
oldVm.delete();
- mInner.mVmm.getOrCreate("test_vm_integrity", config);
+ mInner.mVmm.getOrCreate(vmName, config);
- assertThat(tryBootVm("test_vm_integrity")).isTrue();
+ assertThat(tryBootVm(vmName)).isTrue();
- // Launch the same VM after flipping a bit of the instance image.
- // Flip actual data, as flipping trivial bits like the magic string isn't interesting.
File vmRoot = new File(mInner.mContext.getFilesDir(), "vm");
- File vmDir = new File(vmRoot, "test_vm_integrity");
+ File vmDir = new File(vmRoot, vmName);
File instanceImgPath = new File(vmDir, "instance.img");
- RandomAccessFile instanceFile = new RandomAccessFile(instanceImgPath, "rw");
+ return new RandomAccessFile(instanceImgPath, "rw");
- // partitions may or may not exist
- for (UUID uuid :
- new UUID[] {
- MICRODROID_PARTITION_UUID, U_BOOT_AVB_PARTITION_UUID, U_BOOT_ENV_PARTITION_UUID
- }) {
- OptionalLong offset = findPartitionDataOffset(instanceFile, uuid);
- if (!offset.isPresent()) continue;
+ }
- flipBit(instanceFile, offset.getAsLong());
- assertThat(tryBootVm("test_vm_integrity")).isFalse();
- flipBit(instanceFile, offset.getAsLong());
+ private void assertThatPartitionIsMissing(UUID partitionUuid)
+ throws VirtualMachineException, InterruptedException, IOException {
+ RandomAccessFile instanceFile = prepareInstanceImage("test_vm_integrity");
+ assertThat(findPartitionDataOffset(instanceFile, partitionUuid).isPresent())
+ .isFalse();
+ }
+
+ // Flips a bit of given partition, and then see if boot fails.
+ private void assertThatBootFailsAfterCompromisingPartition(UUID partitionUuid)
+ throws VirtualMachineException, InterruptedException, IOException {
+ RandomAccessFile instanceFile = prepareInstanceImage("test_vm_integrity");
+ OptionalLong offset = findPartitionDataOffset(instanceFile, partitionUuid);
+ assertThat(offset.isPresent()).isTrue();
+
+ flipBit(instanceFile, offset.getAsLong());
+ assertThat(tryBootVm("test_vm_integrity")).isFalse();
+ }
+
+ @Test
+ public void bootFailsWhenMicrodroidDataIsCompromised()
+ throws VirtualMachineException, InterruptedException, IOException {
+ assume().withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ assertThatBootFailsAfterCompromisingPartition(MICRODROID_PARTITION_UUID);
+ }
+
+ @Test
+ public void bootFailsWhenUBootAvbDataIsCompromised()
+ throws VirtualMachineException, InterruptedException, IOException {
+ assume().withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ if (mProtectedVm) {
+ // TODO(b/218461230): uncomment this after u-boot update
+ // assertThatBootFailsAfterCompromisingPartition(U_BOOT_AVB_PARTITION_UUID);
+ } else {
+ // non-protected VM shouldn't have u-boot avb data
+ assertThatPartitionIsMissing(U_BOOT_AVB_PARTITION_UUID);
+ }
+ }
+
+ @Test
+ public void bootFailsWhenUBootEnvDataIsCompromised()
+ throws VirtualMachineException, InterruptedException, IOException {
+ assume().withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ if (mProtectedVm) {
+ // TODO(b/218461230): uncomment this after u-boot update
+ // assertThatBootFailsAfterCompromisingPartition(U_BOOT_ENV_PARTITION_UUID);
+ } else {
+ // non-protected VM shouldn't have u-boot env data
+ assertThatPartitionIsMissing(U_BOOT_ENV_PARTITION_UUID);
+ }
+ }
+
+ @Test
+ public void bootFailsWhenPvmFwDataIsCompromised()
+ throws VirtualMachineException, InterruptedException, IOException {
+ assume().withMessage("Skip on Cuttlefish. b/195765441")
+ .that(android.os.Build.DEVICE)
+ .isNotEqualTo("vsoc_x86_64");
+
+ if (mProtectedVm) {
+ assertThatBootFailsAfterCompromisingPartition(PVM_FW_PARTITION_UUID);
+ } else {
+ // non-protected VM shouldn't have pvmfw data
+ assertThatPartitionIsMissing(PVM_FW_PARTITION_UUID);
}
}
}
diff --git a/virtualizationservice/Android.bp b/virtualizationservice/Android.bp
index 981a041..9b2b740 100644
--- a/virtualizationservice/Android.bp
+++ b/virtualizationservice/Android.bp
@@ -34,6 +34,7 @@
"liblog_rust",
"libmicrodroid_metadata",
"libmicrodroid_payload_config",
+ "libnix",
"libonce_cell",
"libregex",
"librustutils",
diff --git a/virtualizationservice/aidl/android/system/virtualizationservice/DeathReason.aidl b/virtualizationservice/aidl/android/system/virtualizationservice/DeathReason.aidl
index d736f1b..7b80fc9 100644
--- a/virtualizationservice/aidl/android/system/virtualizationservice/DeathReason.aidl
+++ b/virtualizationservice/aidl/android/system/virtualizationservice/DeathReason.aidl
@@ -34,4 +34,12 @@
REBOOT = 5,
/** The VM or crosvm crashed. */
CRASH = 6,
+ /** The pVM firmware failed to verify the VM because the public key doesn't match. */
+ PVM_FIRMWARE_PUBLIC_KEY_MISMATCH = 7,
+ /** The pVM firmware failed to verify the VM because the instance image changed. */
+ PVM_FIRMWARE_INSTANCE_IMAGE_CHANGED = 8,
+ /** The bootloader failed to verify the VM because the public key doesn't match. */
+ BOOTLOADER_PUBLIC_KEY_MISMATCH = 9,
+ /** The bootloader failed to verify the VM because the instance image changed. */
+ BOOTLOADER_INSTANCE_IMAGE_CHANGED = 10,
}
diff --git a/virtualizationservice/src/crosvm.rs b/virtualizationservice/src/crosvm.rs
index 94cb78f..f1b179e 100644
--- a/virtualizationservice/src/crosvm.rs
+++ b/virtualizationservice/src/crosvm.rs
@@ -20,12 +20,13 @@
use command_fds::CommandFdExt;
use log::{debug, error, info};
use semver::{Version, VersionReq};
+use nix::{fcntl::OFlag, unistd::pipe2};
use shared_child::SharedChild;
use std::fs::{remove_dir_all, File};
-use std::io;
+use std::io::{self, Read};
use std::mem;
use std::num::NonZeroU32;
-use std::os::unix::io::{AsRawFd, RawFd};
+use std::os::unix::io::{AsRawFd, RawFd, FromRawFd};
use std::path::PathBuf;
use std::process::{Command, ExitStatus};
use std::sync::{Arc, Mutex};
@@ -114,12 +115,14 @@
fn start(&mut self, instance: Arc<VmInstance>) -> Result<(), Error> {
let state = mem::replace(self, VmState::Failed);
if let VmState::NotStarted { config } = state {
+ let (failure_pipe_read, failure_pipe_write) = create_pipe()?;
+
// If this fails and returns an error, `self` will be left in the `Failed` state.
- let child = Arc::new(run_vm(config)?);
+ let child = Arc::new(run_vm(config, failure_pipe_write)?);
let child_clone = child.clone();
thread::spawn(move || {
- instance.monitor(child_clone);
+ instance.monitor(child_clone, failure_pipe_read);
});
// If it started correctly, update the state.
@@ -198,7 +201,7 @@
///
/// This takes a separate reference to the `SharedChild` rather than using the one in
/// `self.vm_state` to avoid holding the lock on `vm_state` while it is running.
- fn monitor(&self, child: Arc<SharedChild>) {
+ fn monitor(&self, child: Arc<SharedChild>, mut failure_pipe_read: File) {
let result = child.wait();
match &result {
Err(e) => error!("Error waiting for crosvm({}) instance to die: {}", child.id(), e),
@@ -210,7 +213,16 @@
// Ensure that the mutex is released before calling the callbacks.
drop(vm_state);
- self.callbacks.callback_on_died(self.cid, death_reason(&result));
+ let mut failure_string = String::new();
+ let failure_read_result = failure_pipe_read.read_to_string(&mut failure_string);
+ if let Err(e) = &failure_read_result {
+ error!("Error reading VM failure reason from pipe: {}", e);
+ }
+ if !failure_string.is_empty() {
+ info!("VM returned failure reason '{}'", failure_string);
+ }
+
+ self.callbacks.callback_on_died(self.cid, death_reason(&result, &failure_string));
// Delete temporary files.
if let Err(e) = remove_dir_all(&self.temporary_directory) {
@@ -250,8 +262,21 @@
}
}
-fn death_reason(result: &Result<ExitStatus, io::Error>) -> DeathReason {
+fn death_reason(result: &Result<ExitStatus, io::Error>, failure_reason: &str) -> DeathReason {
if let Ok(status) = result {
+ match failure_reason {
+ "PVM_FIRMWARE_PUBLIC_KEY_MISMATCH" => {
+ return DeathReason::PVM_FIRMWARE_PUBLIC_KEY_MISMATCH
+ }
+ "PVM_FIRMWARE_INSTANCE_IMAGE_CHANGED" => {
+ return DeathReason::PVM_FIRMWARE_INSTANCE_IMAGE_CHANGED
+ }
+ "BOOTLOADER_PUBLIC_KEY_MISMATCH" => return DeathReason::BOOTLOADER_PUBLIC_KEY_MISMATCH,
+ "BOOTLOADER_INSTANCE_IMAGE_CHANGED" => {
+ return DeathReason::BOOTLOADER_INSTANCE_IMAGE_CHANGED
+ }
+ _ => {}
+ }
match status.code() {
None => DeathReason::KILLED,
Some(0) => DeathReason::SHUTDOWN,
@@ -266,7 +291,7 @@
}
/// Starts an instance of `crosvm` to manage a new VM.
-fn run_vm(config: CrosvmConfig) -> Result<SharedChild, Error> {
+fn run_vm(config: CrosvmConfig, failure_pipe_write: File) -> Result<SharedChild, Error> {
validate_config(&config)?;
let mut command = Command::new(CROSVM_PATH);
@@ -306,27 +331,25 @@
// Setup the serial devices.
// 1. uart device: used as the output device by bootloaders and as early console by linux
- // 2. virtio-console device: used as the console device where kmsg is redirected to
- // 3. virtio-console device: used as the androidboot.console device (not used currently)
- // 4. virtio-console device: used as the logcat output
+ // 2. uart device: used to report the reason for the VM failing.
+ // 3. virtio-console device: used as the console device where kmsg is redirected to
+ // 4. virtio-console device: used as the androidboot.console device (not used currently)
+ // 5. virtio-console device: used as the logcat output
//
// When [console|log]_fd is not specified, the devices are attached to sink, which means what's
// written there is discarded.
- let mut format_serial_arg = |fd: &Option<File>| {
- let path = fd.as_ref().map(|fd| add_preserved_fd(&mut preserved_fds, fd));
- let type_arg = path.as_ref().map_or("type=sink", |_| "type=file");
- let path_arg = path.as_ref().map_or(String::new(), |path| format!(",path={}", path));
- format!("{}{}", type_arg, path_arg)
- };
- let console_arg = format_serial_arg(&config.console_fd);
- let log_arg = format_serial_arg(&config.log_fd);
+ let console_arg = format_serial_arg(&mut preserved_fds, &config.console_fd);
+ let log_arg = format_serial_arg(&mut preserved_fds, &config.log_fd);
+ let failure_serial_path = add_preserved_fd(&mut preserved_fds, &failure_pipe_write);
// Warning: Adding more serial devices requires you to shift the PCI device ID of the boot
// disks in bootconfig.x86_64. This is because x86 crosvm puts serial devices and the block
// devices in the same PCI bus and serial devices comes before the block devices. Arm crosvm
// doesn't have the issue.
// /dev/ttyS0
- command.arg(format!("--serial={},hardware=serial", &console_arg));
+ command.arg(format!("--serial={},hardware=serial,num=1", &console_arg));
+ // /dev/ttyS1
+ command.arg(format!("--serial=type=file,path={},hardware=serial,num=2", &failure_serial_path));
// /dev/hvc0
command.arg(format!("--serial={},hardware=virtio-console,num=1", &console_arg));
// /dev/hvc1 (not used currently)
@@ -393,3 +416,22 @@
preserved_fds.push(fd);
format!("/proc/self/fd/{}", fd)
}
+
+/// Adds the file descriptor for `file` (if any) to `preserved_fds`, and returns the appropriate
+/// string for a crosvm `--serial` flag. If `file` is none, creates a dummy sink device.
+fn format_serial_arg(preserved_fds: &mut Vec<RawFd>, file: &Option<File>) -> String {
+ if let Some(file) = file {
+ format!("type=file,path={}", add_preserved_fd(preserved_fds, file))
+ } else {
+ "type=sink".to_string()
+ }
+}
+
+/// Creates a new pipe with the `O_CLOEXEC` flag set, and returns the read side and write side.
+fn create_pipe() -> Result<(File, File), Error> {
+ let (raw_read, raw_write) = pipe2(OFlag::O_CLOEXEC)?;
+ // SAFETY: We are the sole owners of these fds as they were just created.
+ let read_fd = unsafe { File::from_raw_fd(raw_read) };
+ let write_fd = unsafe { File::from_raw_fd(raw_write) };
+ Ok((read_fd, write_fd))
+}
diff --git a/virtualizationservice/src/main.rs b/virtualizationservice/src/main.rs
index 43e46a3..7bfb531 100644
--- a/virtualizationservice/src/main.rs
+++ b/virtualizationservice/src/main.rs
@@ -40,7 +40,10 @@
fn main() {
android_logger::init_once(
- android_logger::Config::default().with_tag(LOG_TAG).with_min_level(Level::Info),
+ android_logger::Config::default()
+ .with_tag(LOG_TAG)
+ .with_min_level(Level::Info)
+ .with_log_id(android_logger::LogId::System),
);
clear_temporary_files().expect("Failed to delete old temporary files");