Merge "set_requesting_sid is now on IBinderInternal."
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index a6b1f95..00f34b9 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -4,6 +4,7 @@
bpfmt = true
clang_format = true
jsonlint = true
+google_java_format = true
pylint3 = true
rustfmt = true
xmllint = true
diff --git a/authfs/Android.bp b/authfs/Android.bp
index 4a20a0c..85f2abb 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -52,7 +52,13 @@
name: "authfs_device_test_src_lib",
defaults: ["authfs_defaults"],
test_suites: ["device-tests"],
- data: [
+ data: [":authfs_test_files"],
+}
+
+filegroup {
+ name: "authfs_test_files",
+ srcs: [
+ "testdata/cert.der",
"testdata/input.4k",
"testdata/input.4k.fsv_sig",
"testdata/input.4k.merkle_dump",
diff --git a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
index 628ee3c..189f43a 100644
--- a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
+++ b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
@@ -45,4 +45,10 @@
/** Returns the fs-verity signature of the given file ID. */
byte[] readFsveritySignature(int id);
+
+ /**
+ * Writes the buffer to the given file ID from the file's offset. Returns the number of bytes
+ * written.
+ */
+ int writeFile(int id, in byte[] buf, long offset);
}
diff --git a/authfs/fd_server/Android.bp b/authfs/fd_server/Android.bp
index 6f010ce..9c810a8 100644
--- a/authfs/fd_server/Android.bp
+++ b/authfs/fd_server/Android.bp
@@ -7,6 +7,7 @@
srcs: ["src/main.rs"],
rustlibs: [
"authfs_aidl_interface-rust",
+ "libandroid_logger",
"libanyhow",
"libbinder_rs",
"libclap",
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index b078846..44817d5 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -42,7 +42,8 @@
BnVirtFdService, IVirtFdService, ERROR_IO, ERROR_UNKNOWN_FD, MAX_REQUESTING_DATA,
};
use authfs_aidl_interface::binder::{
- add_service, ExceptionCode, Interface, ProcessState, Result as BinderResult, Status, Strong,
+ add_service, ExceptionCode, Interface, ProcessState, Result as BinderResult, Status,
+ StatusCode, Strong,
};
const SERVICE_NAME: &str = "authfs_fd_server";
@@ -70,38 +71,41 @@
}
}
-/// Configuration of a read-only file to serve by this server. The file is supposed to be verifiable
-/// with the associated fs-verity metadata.
-struct ReadonlyFdConfig {
- /// The file to read from. fs-verity metadata can be retrieved from this file's FD.
- file: File,
+/// Configuration of a file descriptor to be served/exposed/shared.
+enum FdConfig {
+ /// A read-only file to serve by this server. The file is supposed to be verifiable with the
+ /// associated fs-verity metadata.
+ Readonly {
+ /// The file to read from. fs-verity metadata can be retrieved from this file's FD.
+ file: File,
- /// Alternative Merkle tree stored in another file.
- alt_merkle_file: Option<File>,
+ /// Alternative Merkle tree stored in another file.
+ alt_merkle_tree: Option<File>,
- /// Alternative signature stored in another file.
- alt_signature_file: Option<File>,
+ /// Alternative signature stored in another file.
+ alt_signature: Option<File>,
+ },
+
+ /// A readable/writable file to serve by this server. This backing file should just be a
+ /// regular file and does not have any specific property.
+ ReadWrite(File),
}
struct FdService {
- /// A pool of read-only files
- fd_pool: BTreeMap<i32, ReadonlyFdConfig>,
+ /// A pool of opened files, may be readonly or read-writable.
+ fd_pool: BTreeMap<i32, FdConfig>,
}
impl FdService {
- pub fn new_binder(fd_pool: BTreeMap<i32, ReadonlyFdConfig>) -> Strong<dyn IVirtFdService> {
+ pub fn new_binder(fd_pool: BTreeMap<i32, FdConfig>) -> Strong<dyn IVirtFdService> {
let result = BnVirtFdService::new_binder(FdService { fd_pool });
result.as_binder().set_requesting_sid(false);
result
}
- fn get_file_config(&self, id: i32) -> BinderResult<&ReadonlyFdConfig> {
+ fn get_file_config(&self, id: i32) -> BinderResult<&FdConfig> {
self.fd_pool.get(&id).ok_or_else(|| Status::from(ERROR_UNKNOWN_FD))
}
-
- fn get_file(&self, id: i32) -> BinderResult<&File> {
- Ok(&self.get_file_config(id)?.file)
- }
}
impl Interface for FdService {}
@@ -111,38 +115,88 @@
let size: usize = validate_and_cast_size(size)?;
let offset: u64 = validate_and_cast_offset(offset)?;
- read_into_buf(self.get_file(id)?, size, offset).map_err(|e| {
- error!("readFile: read error: {}", e);
- Status::from(ERROR_IO)
- })
+ match self.get_file_config(id)? {
+ FdConfig::Readonly { file, .. } | FdConfig::ReadWrite(file) => {
+ read_into_buf(&file, size, offset).map_err(|e| {
+ error!("readFile: read error: {}", e);
+ Status::from(ERROR_IO)
+ })
+ }
+ }
}
fn readFsverityMerkleTree(&self, id: i32, offset: i64, size: i32) -> BinderResult<Vec<u8>> {
let size: usize = validate_and_cast_size(size)?;
let offset: u64 = validate_and_cast_offset(offset)?;
- if let Some(file) = &self.get_file_config(id)?.alt_merkle_file {
- read_into_buf(&file, size, offset).map_err(|e| {
- error!("readFsverityMerkleTree: read error: {}", e);
- Status::from(ERROR_IO)
- })
- } else {
- // TODO(victorhsieh) retrieve from the fd when the new ioctl is ready
- Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Not implemented yet"))
+ match &self.get_file_config(id)? {
+ FdConfig::Readonly { alt_merkle_tree, .. } => {
+ if let Some(file) = &alt_merkle_tree {
+ read_into_buf(&file, size, offset).map_err(|e| {
+ error!("readFsverityMerkleTree: read error: {}", e);
+ Status::from(ERROR_IO)
+ })
+ } else {
+ // TODO(victorhsieh) retrieve from the fd when the new ioctl is ready
+ Err(new_binder_exception(
+ ExceptionCode::UNSUPPORTED_OPERATION,
+ "Not implemented yet",
+ ))
+ }
+ }
+ FdConfig::ReadWrite(_file) => {
+ // For a writable file, Merkle tree is not expected to be served since Auth FS
+ // doesn't trust it anyway. Auth FS may keep the Merkle tree privately for its own
+ // use.
+ Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
+ }
}
}
fn readFsveritySignature(&self, id: i32) -> BinderResult<Vec<u8>> {
- if let Some(file) = &self.get_file_config(id)?.alt_signature_file {
- // Supposedly big enough buffer size to store signature.
- let size = MAX_REQUESTING_DATA as usize;
- read_into_buf(&file, size, 0).map_err(|e| {
- error!("readFsveritySignature: read error: {}", e);
- Status::from(ERROR_IO)
- })
- } else {
- // TODO(victorhsieh) retrieve from the fd when the new ioctl is ready
- Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Not implemented yet"))
+ match &self.get_file_config(id)? {
+ FdConfig::Readonly { alt_signature, .. } => {
+ if let Some(file) = &alt_signature {
+ // Supposedly big enough buffer size to store signature.
+ let size = MAX_REQUESTING_DATA as usize;
+ read_into_buf(&file, size, 0).map_err(|e| {
+ error!("readFsveritySignature: read error: {}", e);
+ Status::from(ERROR_IO)
+ })
+ } else {
+ // TODO(victorhsieh) retrieve from the fd when the new ioctl is ready
+ Err(new_binder_exception(
+ ExceptionCode::UNSUPPORTED_OPERATION,
+ "Not implemented yet",
+ ))
+ }
+ }
+ FdConfig::ReadWrite(_file) => {
+ // There is no signature for a writable file.
+ Err(new_binder_exception(ExceptionCode::UNSUPPORTED_OPERATION, "Unsupported"))
+ }
+ }
+ }
+
+ fn writeFile(&self, id: i32, buf: &[u8], offset: i64) -> BinderResult<i32> {
+ match &self.get_file_config(id)? {
+ FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
+ FdConfig::ReadWrite(file) => {
+ let offset: u64 = offset.try_into().map_err(|_| {
+ new_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT, "Invalid offset")
+ })?;
+ // Check buffer size just to make `as i32` safe below.
+ if buf.len() > i32::MAX as usize {
+ return Err(new_binder_exception(
+ ExceptionCode::ILLEGAL_ARGUMENT,
+ "Buffer size is too big",
+ ));
+ }
+ Ok(file.write_at(buf, offset).map_err(|e| {
+ error!("writeFile: write error: {}", e);
+ Status::from(ERROR_IO)
+ })? as i32)
+ }
}
}
}
@@ -169,29 +223,42 @@
Ok(unsafe { File::from_raw_fd(fd) })
}
-fn parse_arg_ro_fds(arg: &str) -> Result<(i32, ReadonlyFdConfig)> {
+fn parse_arg_ro_fds(arg: &str) -> Result<(i32, FdConfig)> {
let result: Result<Vec<i32>, _> = arg.split(':').map(|x| x.parse::<i32>()).collect();
let fds = result?;
if fds.len() > 3 {
bail!("Too many options: {}", arg);
}
-
Ok((
fds[0],
- ReadonlyFdConfig {
+ FdConfig::Readonly {
file: fd_to_file(fds[0])?,
- alt_merkle_file: fds.get(1).map(|fd| fd_to_file(*fd)).transpose()?,
- alt_signature_file: fds.get(2).map(|fd| fd_to_file(*fd)).transpose()?,
+ // Alternative Merkle tree, if provided
+ alt_merkle_tree: fds.get(1).map(|fd| fd_to_file(*fd)).transpose()?,
+ // Alternative signature, if provided
+ alt_signature: fds.get(2).map(|fd| fd_to_file(*fd)).transpose()?,
},
))
}
-fn parse_args() -> Result<BTreeMap<i32, ReadonlyFdConfig>> {
+fn parse_arg_rw_fds(arg: &str) -> Result<(i32, FdConfig)> {
+ let fd = arg.parse::<i32>()?;
+ let file = fd_to_file(fd)?;
+ if file.metadata()?.len() > 0 {
+ bail!("File is expected to be empty");
+ }
+ Ok((fd, FdConfig::ReadWrite(file)))
+}
+
+fn parse_args() -> Result<BTreeMap<i32, FdConfig>> {
#[rustfmt::skip]
let matches = clap::App::new("fd_server")
.arg(clap::Arg::with_name("ro-fds")
.long("ro-fds")
- .required(true)
+ .multiple(true)
+ .number_of_values(1))
+ .arg(clap::Arg::with_name("rw-fds")
+ .long("rw-fds")
.multiple(true)
.number_of_values(1))
.get_matches();
@@ -203,10 +270,20 @@
fd_pool.insert(fd, config);
}
}
+ if let Some(args) = matches.values_of("rw-fds") {
+ for arg in args {
+ let (fd, config) = parse_arg_rw_fds(arg)?;
+ fd_pool.insert(fd, config);
+ }
+ }
Ok(fd_pool)
}
fn main() -> Result<()> {
+ android_logger::init_once(
+ android_logger::Config::default().with_tag("fd_server").with_min_level(log::Level::Debug),
+ );
+
let fd_pool = parse_args()?;
ProcessState::start_thread_pool();
diff --git a/authfs/src/common.rs b/authfs/src/common.rs
index 522397f..6556fde 100644
--- a/authfs/src/common.rs
+++ b/authfs/src/common.rs
@@ -20,3 +20,59 @@
pub fn divide_roundup(dividend: u64, divisor: u64) -> u64 {
(dividend + divisor - 1) / divisor
}
+
+/// Given `offset` and `length`, generates (offset, size) tuples that together form the same length,
+/// and aligned to `alignment`.
+pub struct ChunkedSizeIter {
+ remaining: usize,
+ offset: u64,
+ alignment: usize,
+}
+
+impl ChunkedSizeIter {
+ pub fn new(remaining: usize, offset: u64, alignment: usize) -> Self {
+ ChunkedSizeIter { remaining, offset, alignment }
+ }
+}
+
+impl Iterator for ChunkedSizeIter {
+ type Item = (u64, usize);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.remaining == 0 {
+ return None;
+ }
+ let chunk_data_size = std::cmp::min(
+ self.remaining,
+ self.alignment - (self.offset % self.alignment as u64) as usize,
+ );
+ let retval = (self.offset, chunk_data_size);
+ self.offset += chunk_data_size as u64;
+ self.remaining = self.remaining.saturating_sub(chunk_data_size);
+ Some(retval)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn collect_chunk_read_iter(remaining: usize, offset: u64) -> Vec<(u64, usize)> {
+ ChunkedSizeIter::new(remaining, offset, 4096).collect::<Vec<_>>()
+ }
+
+ #[test]
+ fn test_chunk_read_iter() {
+ assert_eq!(collect_chunk_read_iter(4096, 0), [(0, 4096)]);
+ assert_eq!(collect_chunk_read_iter(8192, 0), [(0, 4096), (4096, 4096)]);
+ assert_eq!(collect_chunk_read_iter(8192, 4096), [(4096, 4096), (8192, 4096)]);
+
+ assert_eq!(
+ collect_chunk_read_iter(16384, 1),
+ [(1, 4095), (4096, 4096), (8192, 4096), (12288, 4096), (16384, 1)]
+ );
+
+ assert_eq!(collect_chunk_read_iter(0, 0), []);
+ assert_eq!(collect_chunk_read_iter(0, 100), []);
+ }
+}
diff --git a/authfs/src/file.rs b/authfs/src/file.rs
new file mode 100644
index 0000000..89fbd9d
--- /dev/null
+++ b/authfs/src/file.rs
@@ -0,0 +1,49 @@
+mod local_file;
+mod remote_file;
+
+pub use local_file::LocalFileReader;
+pub use remote_file::{RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
+
+use std::io;
+
+use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService;
+use authfs_aidl_interface::binder::{get_interface, Strong};
+
+// TODO(victorhsieh): use remote binder.
+pub fn get_local_binder() -> Strong<dyn IVirtFdService::IVirtFdService> {
+ let service_name = "authfs_fd_server";
+ get_interface(&service_name).expect("Cannot reach authfs_fd_server binder service")
+}
+
+/// A trait for reading data by chunks. The data is assumed readonly and has fixed length. Chunks
+/// can be read by specifying the chunk index. Only the last chunk may have incomplete chunk size.
+pub trait ReadOnlyDataByChunk {
+ /// Read the `chunk_index`-th chunk to `buf`. Each slice/chunk has size `CHUNK_SIZE` except for
+ /// the last one, which can be an incomplete chunk. `buf` is currently required to be large
+ /// enough to hold a full chunk of data. Reading beyond the file size (including empty file)
+ /// will crash.
+ fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize>;
+}
+
+/// A trait to write a buffer to the destination at a given offset. The implementation does not
+/// necessarily own or maintain the destination state.
+///
+/// NB: The trait is required in a member of `fusefs::AuthFs`, which is required to be Sync and
+/// immutable (this the member).
+pub trait RandomWrite {
+ /// Writes `buf` to the destination at `offset`. Returns the written size, which may not be the
+ /// full buffer.
+ fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize>;
+
+ /// Writes the full `buf` to the destination at `offset`.
+ fn write_all_at(&self, buf: &[u8], offset: u64) -> io::Result<()> {
+ let mut input_offset = 0;
+ let mut output_offset = offset;
+ while input_offset < buf.len() {
+ let size = self.write_at(&buf[input_offset..], output_offset)?;
+ input_offset += size;
+ output_offset += size as u64;
+ }
+ Ok(())
+ }
+}
diff --git a/authfs/src/reader.rs b/authfs/src/file/local_file.rs
similarity index 64%
rename from authfs/src/reader.rs
rename to authfs/src/file/local_file.rs
index 0242afa..0692767 100644
--- a/authfs/src/reader.rs
+++ b/authfs/src/file/local_file.rs
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,24 +14,13 @@
* limitations under the License.
*/
-//! A module for reading data by chunks.
-
use std::fs::File;
use std::io::Result;
use std::os::unix::fs::FileExt;
+use super::ReadOnlyDataByChunk;
use crate::common::CHUNK_SIZE;
-/// A trait for reading data by chunks. The data is assumed readonly and has fixed length. Chunks
-/// can be read by specifying the chunk index. Only the last chunk may have incomplete chunk size.
-pub trait ReadOnlyDataByChunk {
- /// Read the `chunk_index`-th chunk to `buf`. Each slice/chunk has size `CHUNK_SIZE` except for
- /// the last one, which can be an incomplete chunk. `buf` is currently required to be large
- /// enough to hold a full chunk of data. Reading beyond the file size (including empty file)
- /// will crash.
- fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> Result<usize>;
-}
-
fn chunk_index_to_range(size: u64, chunk_index: u64) -> Result<(u64, u64)> {
let start = chunk_index * CHUNK_SIZE;
assert!(start < size);
@@ -40,16 +29,16 @@
}
/// A read-only file that can be read by chunks.
-pub struct ChunkedFileReader {
+pub struct LocalFileReader {
file: File,
size: u64,
}
-impl ChunkedFileReader {
- /// Creates a `ChunkedFileReader` to read from for the specified `path`.
- pub fn new(file: File) -> Result<ChunkedFileReader> {
+impl LocalFileReader {
+ /// Creates a `LocalFileReader` to read from for the specified `path`.
+ pub fn new(file: File) -> Result<LocalFileReader> {
let size = file.metadata()?.len();
- Ok(ChunkedFileReader { file, size })
+ Ok(LocalFileReader { file, size })
}
pub fn len(&self) -> u64 {
@@ -57,7 +46,7 @@
}
}
-impl ReadOnlyDataByChunk for ChunkedFileReader {
+impl ReadOnlyDataByChunk for LocalFileReader {
fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> Result<usize> {
debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
let (start, end) = chunk_index_to_range(self.size, chunk_index)?;
@@ -73,7 +62,7 @@
#[test]
fn test_read_4k_file() -> Result<()> {
- let file_reader = ChunkedFileReader::new(File::open("testdata/input.4k")?)?;
+ let file_reader = LocalFileReader::new(File::open("testdata/input.4k")?)?;
let mut buf = [0u8; 4096];
let size = file_reader.read_chunk(0, &mut buf)?;
assert_eq!(size, buf.len());
@@ -82,7 +71,7 @@
#[test]
fn test_read_4k1_file() -> Result<()> {
- let file_reader = ChunkedFileReader::new(File::open("testdata/input.4k1")?)?;
+ let file_reader = LocalFileReader::new(File::open("testdata/input.4k1")?)?;
let mut buf = [0u8; 4096];
let size = file_reader.read_chunk(0, &mut buf)?;
assert_eq!(size, buf.len());
@@ -93,7 +82,7 @@
#[test]
fn test_read_4m_file() -> Result<()> {
- let file_reader = ChunkedFileReader::new(File::open("testdata/input.4m")?)?;
+ let file_reader = LocalFileReader::new(File::open("testdata/input.4m")?)?;
for index in 0..file_reader.len() / 4096 {
let mut buf = [0u8; 4096];
let size = file_reader.read_chunk(index, &mut buf)?;
@@ -105,7 +94,7 @@
#[test]
#[should_panic]
fn test_read_beyond_file_size() {
- let file_reader = ChunkedFileReader::new(File::open("testdata/input.4k").unwrap()).unwrap();
+ let file_reader = LocalFileReader::new(File::open("testdata/input.4k").unwrap()).unwrap();
let mut buf = [0u8; 4096];
let _ = file_reader.read_chunk(1u64, &mut buf); // should panic
}
@@ -115,7 +104,7 @@
fn test_read_empty_file() {
let mut temp_file = temp_dir();
temp_file.push("authfs_test_empty_file");
- let file_reader = ChunkedFileReader::new(File::create(temp_file).unwrap()).unwrap();
+ let file_reader = LocalFileReader::new(File::create(temp_file).unwrap()).unwrap();
let mut buf = [0u8; 4096];
let _ = file_reader.read_chunk(0, &mut buf); // should panic
}
diff --git a/authfs/src/file/remote_file.rs b/authfs/src/file/remote_file.rs
new file mode 100644
index 0000000..dbf6bd9
--- /dev/null
+++ b/authfs/src/file/remote_file.rs
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::convert::TryFrom;
+use std::io;
+use std::io::Write;
+use std::sync::{Arc, Mutex};
+
+use super::{RandomWrite, ReadOnlyDataByChunk};
+use crate::common::CHUNK_SIZE;
+
+use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService;
+use authfs_aidl_interface::binder::Strong;
+
+type VirtFdService = Strong<dyn IVirtFdService::IVirtFdService>;
+
+fn remote_read_chunk(
+ service: &Arc<Mutex<VirtFdService>>,
+ remote_fd: i32,
+ chunk_index: u64,
+ mut buf: &mut [u8],
+) -> io::Result<usize> {
+ let offset = i64::try_from(chunk_index * CHUNK_SIZE)
+ .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
+
+ let chunk = service
+ .lock()
+ .unwrap()
+ .readFile(remote_fd, offset, buf.len() as i32)
+ .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
+ buf.write(&chunk)
+}
+
+pub struct RemoteFileReader {
+ // This needs to have Sync trait to be used in fuse::worker::start_message_loop.
+ service: Arc<Mutex<VirtFdService>>,
+ file_fd: i32,
+}
+
+impl RemoteFileReader {
+ pub fn new(service: Arc<Mutex<VirtFdService>>, file_fd: i32) -> Self {
+ RemoteFileReader { service, file_fd }
+ }
+}
+
+impl ReadOnlyDataByChunk for RemoteFileReader {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
+ remote_read_chunk(&self.service, self.file_fd, chunk_index, buf)
+ }
+}
+
+pub struct RemoteMerkleTreeReader {
+ // This needs to be a Sync to be used in fuse::worker::start_message_loop.
+ // TODO(victorhsieh): change to Strong<> once binder supports it.
+ service: Arc<Mutex<VirtFdService>>,
+ file_fd: i32,
+}
+
+impl RemoteMerkleTreeReader {
+ pub fn new(service: Arc<Mutex<VirtFdService>>, file_fd: i32) -> Self {
+ RemoteMerkleTreeReader { service, file_fd }
+ }
+}
+
+impl ReadOnlyDataByChunk for RemoteMerkleTreeReader {
+ fn read_chunk(&self, chunk_index: u64, mut buf: &mut [u8]) -> io::Result<usize> {
+ let offset = i64::try_from(chunk_index * CHUNK_SIZE)
+ .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
+
+ let chunk = self
+ .service
+ .lock()
+ .unwrap()
+ .readFsverityMerkleTree(self.file_fd, offset, buf.len() as i32)
+ .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
+ buf.write(&chunk)
+ }
+}
+
+pub struct RemoteFileEditor {
+ // This needs to have Sync trait to be used in fuse::worker::start_message_loop.
+ service: Arc<Mutex<VirtFdService>>,
+ file_fd: i32,
+}
+
+impl RemoteFileEditor {
+ pub fn new(service: Arc<Mutex<VirtFdService>>, file_fd: i32) -> Self {
+ RemoteFileEditor { service, file_fd }
+ }
+}
+
+impl RandomWrite for RemoteFileEditor {
+ fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+ let offset =
+ i64::try_from(offset).map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
+ let size = self
+ .service
+ .lock()
+ .unwrap()
+ .writeFile(self.file_fd, &buf, offset)
+ .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
+ Ok(size as usize) // within range because size is supposed to <= buf.len(), which is a usize
+ }
+}
+
+impl ReadOnlyDataByChunk for RemoteFileEditor {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
+ remote_read_chunk(&self.service, self.file_fd, chunk_index, buf)
+ }
+}
diff --git a/authfs/src/fsverity.rs b/authfs/src/fsverity.rs
index 37d96c1..1515574 100644
--- a/authfs/src/fsverity.rs
+++ b/authfs/src/fsverity.rs
@@ -16,8 +16,9 @@
mod builder;
mod common;
+mod editor;
mod sys;
mod verifier;
-pub use self::builder::MerkleLeaves;
-pub use self::verifier::FsverityChunkedFileReader;
+pub use editor::VerifiedFileEditor;
+pub use verifier::VerifiedFileReader;
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
index 607d3a7..94b9718 100644
--- a/authfs/src/fsverity/builder.rs
+++ b/authfs/src/fsverity/builder.rs
@@ -47,13 +47,17 @@
.collect()
}
-#[allow(dead_code)]
impl MerkleLeaves {
/// Creates a `MerkleLeaves` instance with empty data.
pub fn new() -> Self {
Self { leaves: Vec::new(), file_size: 0 }
}
+ /// Gets size of the file represented by `MerkleLeaves`.
+ pub fn file_size(&self) -> u64 {
+ self.file_size
+ }
+
/// Updates the hash of the `index`-th leaf, and increase the size to `size_at_least` if the
/// current size is smaller.
pub fn update_hash(&mut self, index: usize, hash: &Sha256Hash, size_at_least: u64) {
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
new file mode 100644
index 0000000..543e9ac
--- /dev/null
+++ b/authfs/src/fsverity/editor.rs
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! A module for writing to a file from a trusted world to an untrusted storage.
+//!
+//! Architectural Model:
+//! * Trusted world: the writer, a signing secret, has some memory, but NO persistent storage.
+//! * Untrusted world: persistent storage, assuming untrusted.
+//! * IPC mechanism between trusted and untrusted world
+//!
+//! Use cases:
+//! * In the trusted world, we want to generate a large file, sign it, and share the signature for
+//! a third party to verify the file.
+//! * In the trusted world, we want to read a previously signed file back with signature check
+//! without having to touch the whole file.
+//!
+//! Requirements:
+//! * Communication between trusted and untrusted world is not cheap, and files can be large.
+//! * A file write pattern may not be sequential, neither does read.
+//!
+//! Considering the above, a technique similar to fs-verity is used. fs-verity uses an alternative
+//! hash function, a Merkle tree, to calculate the hash of file content. A file update at any
+//! location will propagate the hash update from the leaf to the root node. Unlike fs-verity, which
+//! assumes static files, to support write operation, we need to allow the file (thus tree) to
+//! update.
+//!
+//! For the trusted world to generate a large file with random write and hash it, the writer needs
+//! to hold some private information and update the Merkle tree during a file write (or even when
+//! the Merkle tree needs to be stashed to the untrusted storage).
+//!
+//! A write to a file must update the root hash. In order for the root hash to update, a tree
+//! walk to update from the write location to the root node is necessary. Importantly, in case when
+//! (part of) the Merkle tree needs to be read from the untrusted storage (e.g. not yet verified in
+//! cache), the original path must be verified by the trusted signature before the update to happen.
+//!
+//! Denial-of-service is a known weakness if the untrusted storage decides to simply remove the
+//! file. But there is nothing we can do in this architecture.
+//!
+//! Rollback attack is another possible attack, but can be addressed with a rollback counter when
+//! possible.
+
+use std::io;
+use std::sync::{Arc, RwLock};
+
+use super::builder::MerkleLeaves;
+use crate::common::{ChunkedSizeIter, CHUNK_SIZE};
+use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+use crate::file::{RandomWrite, ReadOnlyDataByChunk};
+
+// Implement the conversion from `CryptoError` to `io::Error` just to avoid manual error type
+// mapping below.
+impl From<CryptoError> for io::Error {
+ fn from(error: CryptoError) -> Self {
+ io::Error::new(io::ErrorKind::Other, error)
+ }
+}
+
+/// VerifiedFileEditor provides an integrity layer to an underlying read-writable file, which may
+/// not be stored in a trusted environment. Only new, empty files are currently supported.
+pub struct VerifiedFileEditor<F: ReadOnlyDataByChunk + RandomWrite> {
+ file: F,
+ merkle_tree: Arc<RwLock<MerkleLeaves>>,
+}
+
+impl<F: ReadOnlyDataByChunk + RandomWrite> VerifiedFileEditor<F> {
+ /// Wraps a supposedly new file for integrity protection.
+ pub fn new(file: F) -> Self {
+ Self { file, merkle_tree: Arc::new(RwLock::new(MerkleLeaves::new())) }
+ }
+
+ /// Calculates the fs-verity digest of the current file.
+ #[allow(dead_code)]
+ pub fn calculate_fsverity_digest(&self) -> io::Result<Sha256Hash> {
+ let merkle_tree = self.merkle_tree.read().unwrap();
+ merkle_tree.calculate_fsverity_digest().map_err(|e| io::Error::new(io::ErrorKind::Other, e))
+ }
+
+ fn new_hash_for_incomplete_write(
+ &self,
+ source: &[u8],
+ offset_from_alignment: usize,
+ output_chunk_index: usize,
+ merkle_tree: &mut MerkleLeaves,
+ ) -> io::Result<Sha256Hash> {
+ // The buffer is initialized to 0 purposely. To calculate the block hash, the data is
+ // 0-padded to the block size. When a chunk read is less than a chunk, the initial value
+ // conveniently serves the padding purpose.
+ let mut orig_data = [0u8; CHUNK_SIZE as usize];
+
+ // If previous data exists, read back and verify against the known hash (since the
+ // storage / remote server is not trusted).
+ if merkle_tree.is_index_valid(output_chunk_index) {
+ self.read_chunk(output_chunk_index as u64, &mut orig_data)?;
+
+ // Verify original content
+ let hash = Sha256Hasher::new()?.update(&orig_data)?.finalize()?;
+ if !merkle_tree.is_consistent(output_chunk_index, &hash) {
+ return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
+ }
+ }
+
+ Ok(Sha256Hasher::new()?
+ .update(&orig_data[..offset_from_alignment])?
+ .update(source)?
+ .update(&orig_data[offset_from_alignment + source.len()..])?
+ .finalize()?)
+ }
+
+ fn new_chunk_hash(
+ &self,
+ source: &[u8],
+ offset_from_alignment: usize,
+ current_size: usize,
+ output_chunk_index: usize,
+ merkle_tree: &mut MerkleLeaves,
+ ) -> io::Result<Sha256Hash> {
+ if current_size as u64 == CHUNK_SIZE {
+ // Case 1: If the chunk is a complete one, just calculate the hash, regardless of
+ // write location.
+ Ok(Sha256Hasher::new()?.update(source)?.finalize()?)
+ } else {
+ // Case 2: For an incomplete write, calculate the hash based on previous data (if
+ // any).
+ self.new_hash_for_incomplete_write(
+ source,
+ offset_from_alignment,
+ output_chunk_index,
+ merkle_tree,
+ )
+ }
+ }
+
+ pub fn size(&self) -> u64 {
+ self.merkle_tree.read().unwrap().file_size()
+ }
+}
+
+impl<F: ReadOnlyDataByChunk + RandomWrite> RandomWrite for VerifiedFileEditor<F> {
+ fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+ // Since we don't need to support 32-bit CPU, make an assert to make conversion between
+ // u64 and usize easy below. Otherwise, we need to check `divide_roundup(offset + buf.len()
+ // <= usize::MAX` or handle `TryInto` errors.
+ debug_assert!(usize::MAX as u64 == u64::MAX, "Only 64-bit arch is supported");
+
+ // The write range may not be well-aligned with the chunk boundary. There are various cases
+ // to deal with:
+ // 1. A write of a full 4K chunk.
+ // 2. A write of an incomplete chunk, possibly beyond the original EOF.
+ //
+ // Note that a write beyond EOF can create a hole. But we don't need to handle it here
+ // because holes are zeros, and leaves in MerkleLeaves are hashes of 4096-zeros by
+ // default.
+
+ // Now iterate on the input data, considering the alignment at the destination.
+ for (output_offset, current_size) in
+ ChunkedSizeIter::new(buf.len(), offset, CHUNK_SIZE as usize)
+ {
+ // Lock the tree for the whole write for now. There may be room to improve to increase
+ // throughput.
+ let mut merkle_tree = self.merkle_tree.write().unwrap();
+
+ let offset_in_buf = (output_offset - offset) as usize;
+ let source = &buf[offset_in_buf as usize..offset_in_buf as usize + current_size];
+ let output_chunk_index = (output_offset / CHUNK_SIZE) as usize;
+ let offset_from_alignment = (output_offset % CHUNK_SIZE) as usize;
+
+ let new_hash = match self.new_chunk_hash(
+ source,
+ offset_from_alignment,
+ current_size,
+ output_chunk_index,
+ &mut merkle_tree,
+ ) {
+ Ok(hash) => hash,
+ Err(e) => {
+ // Return early when any error happens before the right. Even if the hash is not
+ // consistent for the current chunk, we can still consider the earlier writes
+ // successful. Note that nothing persistent has been done in this iteration.
+ let written = output_offset - offset;
+ if written > 0 {
+ return Ok(written as usize);
+ }
+ return Err(e);
+ }
+ };
+
+ // A failed, partial write here will make the backing file inconsistent to the (old)
+ // hash. Nothing can be done within this writer, but at least it still maintains the
+ // (original) integrity for the file. To matches what write(2) describes for an error
+ // case (though it's about direct I/O), "Partial data may be written ... should be
+ // considered inconsistent", an error below is propagated.
+ self.file.write_all_at(&source, output_offset)?;
+
+ // Update the hash only after the write succeeds. Note that this only attempts to keep
+ // the tree consistent to what has been written regardless the actual state beyond the
+ // writer.
+ let size_at_least = offset.saturating_add(buf.len() as u64);
+ merkle_tree.update_hash(output_chunk_index, &new_hash, size_at_least);
+ }
+ Ok(buf.len())
+ }
+}
+
+impl<F: ReadOnlyDataByChunk + RandomWrite> ReadOnlyDataByChunk for VerifiedFileEditor<F> {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
+ self.file.read_chunk(chunk_index, buf)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ // Test data below can be generated by:
+ // $ perl -e 'print "\x{00}" x 6000' > foo
+ // $ perl -e 'print "\x{01}" x 5000' >> foo
+ // $ fsverity digest foo
+ use super::*;
+ use anyhow::Result;
+ use std::cell::RefCell;
+ use std::convert::TryInto;
+
+ struct InMemoryEditor {
+ data: RefCell<Vec<u8>>,
+ fail_read: bool,
+ }
+
+ impl InMemoryEditor {
+ pub fn new() -> InMemoryEditor {
+ InMemoryEditor { data: RefCell::new(Vec::new()), fail_read: false }
+ }
+ }
+
+ impl RandomWrite for InMemoryEditor {
+ fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
+ let begin: usize =
+ offset.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+ let end = begin + buf.len();
+ if end > self.data.borrow().len() {
+ self.data.borrow_mut().resize(end, 0);
+ }
+ self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(&buf);
+ Ok(buf.len())
+ }
+ }
+
+ impl ReadOnlyDataByChunk for InMemoryEditor {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
+ debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
+
+ if self.fail_read {
+ return Err(io::Error::new(io::ErrorKind::Other, "test!"));
+ }
+
+ let borrowed = self.data.borrow();
+ let chunk = &borrowed
+ .chunks(CHUNK_SIZE as usize)
+ .nth(chunk_index as usize)
+ .ok_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::InvalidInput,
+ format!("read_chunk out of bound: index {}", chunk_index),
+ )
+ })?;
+ buf[..chunk.len()].copy_from_slice(&chunk);
+ Ok(chunk.len())
+ }
+ }
+
+ #[test]
+ fn test_writer() -> Result<()> {
+ let writer = InMemoryEditor::new();
+ let buf = [1; 4096];
+ assert_eq!(writer.data.borrow().len(), 0);
+
+ assert_eq!(writer.write_at(&buf, 16384)?, 4096);
+ assert_eq!(writer.data.borrow()[16384..16384 + 4096], buf);
+
+ assert_eq!(writer.write_at(&buf, 2048)?, 4096);
+ assert_eq!(writer.data.borrow()[2048..2048 + 4096], buf);
+
+ assert_eq!(writer.data.borrow().len(), 16384 + 4096);
+ Ok(())
+ }
+
+ #[test]
+ fn test_verified_writer_no_write() -> Result<()> {
+ // Verify fs-verity hash without any write.
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95")
+ .as_slice()
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn test_verified_writer_from_zero() -> Result<()> {
+ // Verify a write of a full chunk.
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
+ .as_slice()
+ );
+
+ // Verify a write of across multiple chunks.
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 4097], 0)?, 4097);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52")
+ .as_slice()
+ );
+
+ // Verify another write of across multiple chunks.
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 10000], 0)?, 10000);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("7545409b556071554d18973a29b96409588c7cda4edd00d5586b27a11e1a523b")
+ .as_slice()
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn test_verified_writer_unaligned() -> Result<()> {
+ // Verify small, unaligned write beyond EOF.
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 5], 3)?, 5);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("a23fc5130d3d7b3323fc4b4a5e79d5d3e9ddf3a3f5872639e867713512c6702f")
+ .as_slice()
+ );
+
+ // Verify bigger, unaligned write beyond EOF.
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 6000], 4000)?, 6000);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("d16d4c1c186d757e646f76208b21254f50d7f07ea07b1505ff48b2a6f603f989")
+ .as_slice()
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn test_verified_writer_with_hole() -> Result<()> {
+ // Verify an aligned write beyond EOF with holes.
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("4df2aefd8c2a9101d1d8770dca3ede418232eabce766bb8e020395eae2e97103")
+ .as_slice()
+ );
+
+ // Verify an unaligned write beyond EOF with holes.
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 5000], 6000)?, 5000);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("47d5da26f6934484e260630a69eb2eebb21b48f69bc8fbf8486d1694b7dba94f")
+ .as_slice()
+ );
+
+ // Just another example with a small write.
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 5], 16381)?, 5);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("8bd118821fb4aff26bb4b51d485cc481a093c68131b7f4f112e9546198449752")
+ .as_slice()
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn test_verified_writer_various_writes() -> Result<()> {
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
+ assert_eq!(file.write_at(&[1; 2048], 4096 + 2048)?, 2048);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
+ .as_slice()
+ );
+ assert_eq!(file.write_at(&[1; 2048], 2048)?, 2048);
+ assert_eq!(file.write_at(&[1; 2048], 4096)?, 2048);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
+ .as_slice()
+ );
+ assert_eq!(file.write_at(&[0; 2048], 2048)?, 2048);
+ assert_eq!(file.write_at(&[0; 2048], 4096)?, 2048);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
+ .as_slice()
+ );
+ assert_eq!(file.write_at(&[1; 4096], 2048)?, 4096);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
+ .as_slice()
+ );
+ assert_eq!(file.write_at(&[1; 2048], 8192)?, 2048);
+ assert_eq!(file.write_at(&[1; 2048], 8192 + 2048)?, 2048);
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("23cbac08371e6ee838ebcc7ae6512b939d2226e802337be7b383c3e046047d24")
+ .as_slice()
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn test_verified_writer_inconsistent_read() -> Result<()> {
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
+
+ // Replace the expected hash of the first/0-th chunk. An incomplete write will fail when it
+ // detects the inconsistent read.
+ {
+ let mut merkle_tree = file.merkle_tree.write().unwrap();
+ let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
+ merkle_tree.update_hash(0, &overriding_hash, 8192);
+ }
+ assert!(file.write_at(&[1; 1], 2048).is_err());
+
+ // A write of full chunk can still succeed. Also fixed the inconsistency.
+ assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
+
+ // Replace the expected hash of the second/1-th chunk. A write range from previous chunk can
+ // still succeed, but returns early due to an inconsistent read but still successfully. A
+ // resumed write will fail since no bytes can be written due to the same inconsistency.
+ {
+ let mut merkle_tree = file.merkle_tree.write().unwrap();
+ let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
+ merkle_tree.update_hash(1, &overriding_hash, 8192);
+ }
+ assert_eq!(file.write_at(&[10; 8000], 0)?, 4096);
+ assert!(file.write_at(&[10; 8000 - 4096], 4096).is_err());
+ Ok(())
+ }
+
+ #[test]
+ fn test_verified_writer_failed_read_back() -> Result<()> {
+ let mut writer = InMemoryEditor::new();
+ writer.fail_read = true;
+ let file = VerifiedFileEditor::new(writer);
+ assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
+
+ // When a read back is needed, a read failure will fail to write.
+ assert!(file.write_at(&[1; 1], 2048).is_err());
+ Ok(())
+ }
+
+ fn to_u8_vec(hex_str: &str) -> Vec<u8> {
+ assert!(hex_str.len() % 2 == 0);
+ (0..hex_str.len())
+ .step_by(2)
+ .map(|i| u8::from_str_radix(&hex_str[i..i + 2], 16).unwrap())
+ .collect()
+ }
+}
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index fd108f5..4af360f 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -22,7 +22,7 @@
use crate::auth::Authenticator;
use crate::common::{divide_roundup, CHUNK_SIZE};
use crate::crypto::{CryptoError, Sha256Hasher};
-use crate::reader::ReadOnlyDataByChunk;
+use crate::file::ReadOnlyDataByChunk;
const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
@@ -125,21 +125,21 @@
Ok(formatted_digest)
}
-pub struct FsverityChunkedFileReader<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> {
+pub struct VerifiedFileReader<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> {
chunked_file: F,
file_size: u64,
merkle_tree: M,
root_hash: HashBuffer,
}
-impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> FsverityChunkedFileReader<F, M> {
+impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> VerifiedFileReader<F, M> {
pub fn new<A: Authenticator>(
authenticator: &A,
chunked_file: F,
file_size: u64,
sig: Vec<u8>,
merkle_tree: M,
- ) -> Result<FsverityChunkedFileReader<F, M>, FsverityError> {
+ ) -> Result<VerifiedFileReader<F, M>, FsverityError> {
let mut buf = [0u8; CHUNK_SIZE as usize];
let size = merkle_tree.read_chunk(0, &mut buf)?;
if buf.len() != size {
@@ -149,7 +149,7 @@
let formatted_digest = build_fsverity_formatted_digest(&root_hash, file_size)?;
let valid = authenticator.verify(&sig, &formatted_digest)?;
if valid {
- Ok(FsverityChunkedFileReader { chunked_file, file_size, merkle_tree, root_hash })
+ Ok(VerifiedFileReader { chunked_file, file_size, merkle_tree, root_hash })
} else {
Err(FsverityError::BadSignature)
}
@@ -157,7 +157,7 @@
}
impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> ReadOnlyDataByChunk
- for FsverityChunkedFileReader<F, M>
+ for VerifiedFileReader<F, M>
{
fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
@@ -176,13 +176,12 @@
mod tests {
use super::*;
use crate::auth::FakeAuthenticator;
- use crate::reader::{ChunkedFileReader, ReadOnlyDataByChunk};
+ use crate::file::{LocalFileReader, ReadOnlyDataByChunk};
use anyhow::Result;
- use std::fs::File;
+ use std::fs::{self, File};
use std::io::Read;
- type LocalFsverityChunkedFileReader =
- FsverityChunkedFileReader<ChunkedFileReader, ChunkedFileReader>;
+ type LocalVerifiedFileReader = VerifiedFileReader<LocalFileReader, LocalFileReader>;
fn total_chunk_number(file_size: u64) -> u64 {
(file_size + 4095) / 4096
@@ -193,21 +192,15 @@
content_path: &str,
merkle_tree_path: &str,
signature_path: &str,
- ) -> Result<(LocalFsverityChunkedFileReader, u64)> {
- let file_reader = ChunkedFileReader::new(File::open(content_path)?)?;
+ ) -> Result<(LocalVerifiedFileReader, u64)> {
+ let file_reader = LocalFileReader::new(File::open(content_path)?)?;
let file_size = file_reader.len();
- let merkle_tree = ChunkedFileReader::new(File::open(merkle_tree_path)?)?;
+ let merkle_tree = LocalFileReader::new(File::open(merkle_tree_path)?)?;
let mut sig = Vec::new();
let _ = File::open(signature_path)?.read_to_end(&mut sig)?;
let authenticator = FakeAuthenticator::always_succeed();
Ok((
- FsverityChunkedFileReader::new(
- &authenticator,
- file_reader,
- file_size,
- sig,
- merkle_tree,
- )?,
+ VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree)?,
file_size,
))
}
@@ -280,18 +273,12 @@
#[test]
fn invalid_signature() -> Result<()> {
let authenticator = FakeAuthenticator::always_fail();
- let file_reader = ChunkedFileReader::new(File::open("testdata/input.4m")?)?;
+ let file_reader = LocalFileReader::new(File::open("testdata/input.4m")?)?;
let file_size = file_reader.len();
- let merkle_tree = ChunkedFileReader::new(File::open("testdata/input.4m.merkle_dump")?)?;
- let sig = include_bytes!("../../testdata/input.4m.fsv_sig").to_vec();
- assert!(FsverityChunkedFileReader::new(
- &authenticator,
- file_reader,
- file_size,
- sig,
- merkle_tree
- )
- .is_err());
+ let merkle_tree = LocalFileReader::new(File::open("testdata/input.4m.merkle_dump")?)?;
+ let sig = fs::read("testdata/input.4m.fsv_sig")?;
+ assert!(VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree)
+ .is_err());
Ok(())
}
}
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index f5dd6ec..13ec87d 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -26,31 +26,30 @@
use std::path::Path;
use std::time::Duration;
-use fuse::filesystem::{Context, DirEntry, DirectoryIterator, Entry, FileSystem, ZeroCopyWriter};
+use fuse::filesystem::{
+ Context, DirEntry, DirectoryIterator, Entry, FileSystem, FsOptions, ZeroCopyReader,
+ ZeroCopyWriter,
+};
use fuse::mount::MountOption;
-use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::fsverity::FsverityChunkedFileReader;
-use crate::reader::{ChunkedFileReader, ReadOnlyDataByChunk};
-use crate::remote_file::{RemoteChunkedFileReader, RemoteFsverityMerkleTreeReader};
+use crate::common::{divide_roundup, ChunkedSizeIter, CHUNK_SIZE};
+use crate::file::{
+ LocalFileReader, RandomWrite, ReadOnlyDataByChunk, RemoteFileEditor, RemoteFileReader,
+ RemoteMerkleTreeReader,
+};
+use crate::fsverity::{VerifiedFileEditor, VerifiedFileReader};
const DEFAULT_METADATA_TIMEOUT: std::time::Duration = Duration::from_secs(5);
pub type Inode = u64;
type Handle = u64;
-type RemoteFsverityChunkedFileReader =
- FsverityChunkedFileReader<RemoteChunkedFileReader, RemoteFsverityMerkleTreeReader>;
-
-// A debug only type where everything are stored as local files.
-type FileBackedFsverityChunkedFileReader =
- FsverityChunkedFileReader<ChunkedFileReader, ChunkedFileReader>;
-
pub enum FileConfig {
- LocalVerifiedFile(FileBackedFsverityChunkedFileReader, u64),
- LocalUnverifiedFile(ChunkedFileReader, u64),
- RemoteVerifiedFile(RemoteFsverityChunkedFileReader, u64),
- RemoteUnverifiedFile(RemoteChunkedFileReader, u64),
+ LocalVerifiedReadonlyFile(VerifiedFileReader<LocalFileReader, LocalFileReader>, u64),
+ LocalUnverifiedReadonlyFile(LocalFileReader, u64),
+ RemoteVerifiedReadonlyFile(VerifiedFileReader<RemoteFileReader, RemoteMerkleTreeReader>, u64),
+ RemoteUnverifiedReadonlyFile(RemoteFileReader, u64),
+ RemoteVerifiedNewFile(VerifiedFileEditor<RemoteFileEditor>),
}
struct AuthFs {
@@ -92,11 +91,20 @@
}
}
-fn create_stat(ino: libc::ino_t, file_size: u64) -> io::Result<libc::stat64> {
+enum FileMode {
+ ReadOnly,
+ ReadWrite,
+}
+
+fn create_stat(ino: libc::ino_t, file_size: u64, file_mode: FileMode) -> io::Result<libc::stat64> {
let mut st = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() };
st.st_ino = ino;
- st.st_mode = libc::S_IFREG | libc::S_IRUSR | libc::S_IRGRP | libc::S_IROTH;
+ st.st_mode = match file_mode {
+ // Until needed, let's just grant the owner access.
+ FileMode::ReadOnly => libc::S_IFREG | libc::S_IRUSR,
+ FileMode::ReadWrite => libc::S_IFREG | libc::S_IRUSR | libc::S_IWUSR,
+ };
st.st_dev = 0;
st.st_nlink = 1;
st.st_uid = 0;
@@ -111,35 +119,6 @@
Ok(st)
}
-/// An iterator that generates (offset, size) for a chunked read operation, where offset is the
-/// global file offset, and size is the amount of read from the offset.
-struct ChunkReadIter {
- remaining: usize,
- offset: u64,
-}
-
-impl ChunkReadIter {
- pub fn new(remaining: usize, offset: u64) -> Self {
- ChunkReadIter { remaining, offset }
- }
-}
-
-impl Iterator for ChunkReadIter {
- type Item = (u64, usize);
-
- fn next(&mut self) -> Option<Self::Item> {
- if self.remaining == 0 {
- return None;
- }
- let chunk_data_size =
- std::cmp::min(self.remaining, (CHUNK_SIZE - self.offset % CHUNK_SIZE) as usize);
- let retval = (self.offset, chunk_data_size);
- self.offset += chunk_data_size as u64;
- self.remaining = self.remaining.saturating_sub(chunk_data_size);
- Some(retval)
- }
-}
-
fn offset_to_chunk_index(offset: u64) -> u64 {
offset / CHUNK_SIZE
}
@@ -153,7 +132,7 @@
) -> io::Result<usize> {
let remaining = file_size.saturating_sub(offset);
let size_to_read = std::cmp::min(size as usize, remaining as usize);
- let total = ChunkReadIter::new(size_to_read, offset).try_fold(
+ let total = ChunkedSizeIter::new(size_to_read, offset, CHUNK_SIZE as usize).try_fold(
0,
|total, (current_offset, planned_data_size)| {
// TODO(victorhsieh): There might be a non-trivial way to avoid this copy. For example,
@@ -197,6 +176,12 @@
self.max_write
}
+ fn init(&self, _capable: FsOptions) -> io::Result<FsOptions> {
+ // Enable writeback cache for better performance especially since our bandwidth to the
+ // backend service is limited.
+ Ok(FsOptions::WRITEBACK_CACHE)
+ }
+
fn lookup(&self, _ctx: Context, _parent: Inode, name: &CStr) -> io::Result<Entry> {
// Only accept file name that looks like an integrer. Files in the pool are simply exposed
// by their inode number. Also, there is currently no directory structure.
@@ -206,10 +191,15 @@
// be static.
let inode = num.parse::<Inode>().map_err(|_| io::Error::from_raw_os_error(libc::ENOENT))?;
let st = match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedFile(_, file_size)
- | FileConfig::LocalUnverifiedFile(_, file_size)
- | FileConfig::RemoteUnverifiedFile(_, file_size)
- | FileConfig::RemoteVerifiedFile(_, file_size) => create_stat(inode, *file_size)?,
+ FileConfig::LocalVerifiedReadonlyFile(_, file_size)
+ | FileConfig::LocalUnverifiedReadonlyFile(_, file_size)
+ | FileConfig::RemoteUnverifiedReadonlyFile(_, file_size)
+ | FileConfig::RemoteVerifiedReadonlyFile(_, file_size) => {
+ create_stat(inode, *file_size, FileMode::ReadOnly)?
+ }
+ FileConfig::RemoteVerifiedNewFile(file) => {
+ create_stat(inode, file.size(), FileMode::ReadWrite)?
+ }
};
Ok(Entry {
inode,
@@ -228,10 +218,15 @@
) -> io::Result<(libc::stat64, Duration)> {
Ok((
match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedFile(_, file_size)
- | FileConfig::LocalUnverifiedFile(_, file_size)
- | FileConfig::RemoteUnverifiedFile(_, file_size)
- | FileConfig::RemoteVerifiedFile(_, file_size) => create_stat(inode, *file_size)?,
+ FileConfig::LocalVerifiedReadonlyFile(_, file_size)
+ | FileConfig::LocalUnverifiedReadonlyFile(_, file_size)
+ | FileConfig::RemoteUnverifiedReadonlyFile(_, file_size)
+ | FileConfig::RemoteVerifiedReadonlyFile(_, file_size) => {
+ create_stat(inode, *file_size, FileMode::ReadOnly)?
+ }
+ FileConfig::RemoteVerifiedNewFile(file) => {
+ create_stat(inode, file.size(), FileMode::ReadWrite)?
+ }
},
DEFAULT_METADATA_TIMEOUT,
))
@@ -244,21 +239,29 @@
flags: u32,
) -> io::Result<(Option<Self::Handle>, fuse::sys::OpenOptions)> {
// Since file handle is not really used in later operations (which use Inode directly),
- // return None as the handle..
+ // return None as the handle.
match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedFile(_, _) | FileConfig::RemoteVerifiedFile(_, _) => {
+ FileConfig::LocalVerifiedReadonlyFile(_, _)
+ | FileConfig::RemoteVerifiedReadonlyFile(_, _) => {
check_access_mode(flags, libc::O_RDONLY)?;
// Once verified, and only if verified, the file content can be cached. This is not
- // really needed for a local file, but is the behavior of RemoteVerifiedFile later.
+ // really needed for a local file, but is the behavior of RemoteVerifiedReadonlyFile
+ // later.
Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
}
- FileConfig::LocalUnverifiedFile(_, _) | FileConfig::RemoteUnverifiedFile(_, _) => {
+ FileConfig::LocalUnverifiedReadonlyFile(_, _)
+ | FileConfig::RemoteUnverifiedReadonlyFile(_, _) => {
check_access_mode(flags, libc::O_RDONLY)?;
// Do not cache the content. This type of file is supposed to be verified using
// dm-verity. The filesystem mount over dm-verity already is already cached, so use
// direct I/O here to avoid double cache.
Ok((None, fuse::sys::OpenOptions::DIRECT_IO))
}
+ FileConfig::RemoteVerifiedNewFile(_) => {
+ // No need to check access modes since all the modes are allowed to the
+ // read-writable file.
+ Ok((None, fuse::sys::OpenOptions::KEEP_CACHE))
+ }
}
}
@@ -274,18 +277,45 @@
_flags: u32,
) -> io::Result<usize> {
match self.get_file_config(&inode)? {
- FileConfig::LocalVerifiedFile(file, file_size) => {
+ FileConfig::LocalVerifiedReadonlyFile(file, file_size) => {
read_chunks(w, file, *file_size, offset, size)
}
- FileConfig::LocalUnverifiedFile(file, file_size) => {
+ FileConfig::LocalUnverifiedReadonlyFile(file, file_size) => {
read_chunks(w, file, *file_size, offset, size)
}
- FileConfig::RemoteVerifiedFile(file, file_size) => {
+ FileConfig::RemoteVerifiedReadonlyFile(file, file_size) => {
read_chunks(w, file, *file_size, offset, size)
}
- FileConfig::RemoteUnverifiedFile(file, file_size) => {
+ FileConfig::RemoteUnverifiedReadonlyFile(file, file_size) => {
read_chunks(w, file, *file_size, offset, size)
}
+ FileConfig::RemoteVerifiedNewFile(file) => {
+ // Note that with FsOptions::WRITEBACK_CACHE, it's possible for the kernel to
+ // request a read even if the file is open with O_WRONLY.
+ read_chunks(w, file, file.size(), offset, size)
+ }
+ }
+ }
+
+ fn write<R: io::Read + ZeroCopyReader>(
+ &self,
+ _ctx: Context,
+ inode: Self::Inode,
+ _handle: Self::Handle,
+ mut r: R,
+ size: u32,
+ offset: u64,
+ _lock_owner: Option<u64>,
+ _delayed_write: bool,
+ _flags: u32,
+ ) -> io::Result<usize> {
+ match self.get_file_config(&inode)? {
+ FileConfig::RemoteVerifiedNewFile(file) => {
+ let mut buf = vec![0; size as usize];
+ r.read_exact(&mut buf)?;
+ file.write_at(&buf, offset)
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
}
}
}
@@ -325,27 +355,3 @@
AuthFs::new(file_pool, max_write),
)
}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- fn collect_chunk_read_iter(remaining: usize, offset: u64) -> Vec<(u64, usize)> {
- ChunkReadIter::new(remaining, offset).collect::<Vec<_>>()
- }
-
- #[test]
- fn test_chunk_read_iter() {
- assert_eq!(collect_chunk_read_iter(4096, 0), [(0, 4096)]);
- assert_eq!(collect_chunk_read_iter(8192, 0), [(0, 4096), (4096, 4096)]);
- assert_eq!(collect_chunk_read_iter(8192, 4096), [(4096, 4096), (8192, 4096)]);
-
- assert_eq!(
- collect_chunk_read_iter(16384, 1),
- [(1, 4095), (4096, 4096), (8192, 4096), (12288, 4096), (16384, 1)]
- );
-
- assert_eq!(collect_chunk_read_iter(0, 0), []);
- assert_eq!(collect_chunk_read_iter(0, 100), []);
- }
-}
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 41b922d..a4b0d40 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -38,16 +38,14 @@
mod auth;
mod common;
mod crypto;
+mod file;
mod fsverity;
mod fusefs;
-mod reader;
-mod remote_file;
use auth::FakeAuthenticator;
-use fsverity::FsverityChunkedFileReader;
+use file::{LocalFileReader, RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader};
+use fsverity::{VerifiedFileEditor, VerifiedFileReader};
use fusefs::{FileConfig, Inode};
-use reader::ChunkedFileReader;
-use remote_file::{RemoteChunkedFileReader, RemoteFsverityMerkleTreeReader};
#[derive(StructOpt)]
struct Args {
@@ -55,31 +53,38 @@
#[structopt(parse(from_os_str))]
mount_point: PathBuf,
- /// A verifiable read-only file. Can be multiple.
+ /// A read-only remote file with integrity check. Can be multiple.
///
/// For example, `--remote-verified-file 5:10:1234:/path/to/cert` tells the filesystem to
/// associate entry 5 with a remote file 10 of size 1234 bytes, and need to be verified against
/// the /path/to/cert.
- #[structopt(long, parse(try_from_str = parse_remote_verified_file_option))]
- remote_verified_file: Vec<RemoteVerifiedFileConfig>,
+ #[structopt(long, parse(try_from_str = parse_remote_ro_file_option))]
+ remote_ro_file: Vec<OptionRemoteRoFile>,
- /// An unverifiable read-only file. Can be multiple.
+ /// A read-only remote file without integrity check. Can be multiple.
///
/// For example, `--remote-unverified-file 5:10:1234` tells the filesystem to associate entry 5
/// with a remote file 10 of size 1234 bytes.
- #[structopt(long, parse(try_from_str = parse_remote_unverified_file_option))]
- remote_unverified_file: Vec<RemoteUnverifiedFileConfig>,
+ #[structopt(long, parse(try_from_str = parse_remote_ro_file_unverified_option))]
+ remote_ro_file_unverified: Vec<OptionRemoteRoFileUnverified>,
- /// Debug only. A readonly file to be protected by fs-verity. Can be multiple.
- #[structopt(long, parse(try_from_str = parse_local_verified_file_option))]
- local_verified_file: Vec<LocalVerifiedFileConfig>,
+ /// A new read-writable remote file with integrity check. Can be multiple.
+ ///
+ /// For example, `--remote-new-verified-file 12:34` tells the filesystem to associate entry 12
+ /// with a remote file 34.
+ #[structopt(long, parse(try_from_str = parse_remote_new_rw_file_option))]
+ remote_new_rw_file: Vec<OptionRemoteRwFile>,
- /// Debug only. An unverified read-only file. Can be multiple.
- #[structopt(long, parse(try_from_str = parse_local_unverified_file_option))]
- local_unverified_file: Vec<LocalUnverifiedFileConfig>,
+ /// Debug only. A read-only local file with integrity check. Can be multiple.
+ #[structopt(long, parse(try_from_str = parse_local_file_ro_option))]
+ local_ro_file: Vec<OptionLocalFileRo>,
+
+ /// Debug only. A read-only local file without integrity check. Can be multiple.
+ #[structopt(long, parse(try_from_str = parse_local_ro_file_unverified_ro_option))]
+ local_ro_file_unverified: Vec<OptionLocalRoFileUnverified>,
}
-struct RemoteVerifiedFileConfig {
+struct OptionRemoteRoFile {
ino: Inode,
/// ID to refer to the remote file.
@@ -94,7 +99,7 @@
_certificate_path: PathBuf,
}
-struct RemoteUnverifiedFileConfig {
+struct OptionRemoteRoFileUnverified {
ino: Inode,
/// ID to refer to the remote file.
@@ -104,7 +109,14 @@
file_size: u64,
}
-struct LocalVerifiedFileConfig {
+struct OptionRemoteRwFile {
+ ino: Inode,
+
+ /// ID to refer to the remote file.
+ remote_id: i32,
+}
+
+struct OptionLocalFileRo {
ino: Inode,
/// Local path of the backing file.
@@ -121,19 +133,19 @@
_certificate_path: PathBuf,
}
-struct LocalUnverifiedFileConfig {
+struct OptionLocalRoFileUnverified {
ino: Inode,
/// Local path of the backing file.
file_path: PathBuf,
}
-fn parse_remote_verified_file_option(option: &str) -> Result<RemoteVerifiedFileConfig> {
+fn parse_remote_ro_file_option(option: &str) -> Result<OptionRemoteRoFile> {
let strs: Vec<&str> = option.split(':').collect();
if strs.len() != 4 {
bail!("Invalid option: {}", option);
}
- Ok(RemoteVerifiedFileConfig {
+ Ok(OptionRemoteRoFile {
ino: strs[0].parse::<Inode>()?,
remote_id: strs[1].parse::<i32>()?,
file_size: strs[2].parse::<u64>()?,
@@ -141,24 +153,35 @@
})
}
-fn parse_remote_unverified_file_option(option: &str) -> Result<RemoteUnverifiedFileConfig> {
+fn parse_remote_ro_file_unverified_option(option: &str) -> Result<OptionRemoteRoFileUnverified> {
let strs: Vec<&str> = option.split(':').collect();
if strs.len() != 3 {
bail!("Invalid option: {}", option);
}
- Ok(RemoteUnverifiedFileConfig {
+ Ok(OptionRemoteRoFileUnverified {
ino: strs[0].parse::<Inode>()?,
remote_id: strs[1].parse::<i32>()?,
file_size: strs[2].parse::<u64>()?,
})
}
-fn parse_local_verified_file_option(option: &str) -> Result<LocalVerifiedFileConfig> {
+fn parse_remote_new_rw_file_option(option: &str) -> Result<OptionRemoteRwFile> {
+ let strs: Vec<&str> = option.split(':').collect();
+ if strs.len() != 2 {
+ bail!("Invalid option: {}", option);
+ }
+ Ok(OptionRemoteRwFile {
+ ino: strs[0].parse::<Inode>().unwrap(),
+ remote_id: strs[1].parse::<i32>().unwrap(),
+ })
+}
+
+fn parse_local_file_ro_option(option: &str) -> Result<OptionLocalFileRo> {
let strs: Vec<&str> = option.split(':').collect();
if strs.len() != 5 {
bail!("Invalid option: {}", option);
}
- Ok(LocalVerifiedFileConfig {
+ Ok(OptionLocalFileRo {
ino: strs[0].parse::<Inode>()?,
file_path: PathBuf::from(strs[1]),
merkle_tree_dump_path: PathBuf::from(strs[2]),
@@ -167,92 +190,95 @@
})
}
-fn parse_local_unverified_file_option(option: &str) -> Result<LocalUnverifiedFileConfig> {
+fn parse_local_ro_file_unverified_ro_option(option: &str) -> Result<OptionLocalRoFileUnverified> {
let strs: Vec<&str> = option.split(':').collect();
if strs.len() != 2 {
bail!("Invalid option: {}", option);
}
- Ok(LocalUnverifiedFileConfig {
+ Ok(OptionLocalRoFileUnverified {
ino: strs[0].parse::<Inode>()?,
file_path: PathBuf::from(strs[1]),
})
}
fn new_config_remote_verified_file(remote_id: i32, file_size: u64) -> Result<FileConfig> {
- let service = remote_file::server::get_local_service();
+ let service = file::get_local_binder();
let signature = service.readFsveritySignature(remote_id).context("Failed to read signature")?;
let service = Arc::new(Mutex::new(service));
let authenticator = FakeAuthenticator::always_succeed();
- Ok(FileConfig::RemoteVerifiedFile(
- FsverityChunkedFileReader::new(
+ Ok(FileConfig::RemoteVerifiedReadonlyFile(
+ VerifiedFileReader::new(
&authenticator,
- RemoteChunkedFileReader::new(Arc::clone(&service), remote_id),
+ RemoteFileReader::new(Arc::clone(&service), remote_id),
file_size,
signature,
- RemoteFsverityMerkleTreeReader::new(Arc::clone(&service), remote_id),
+ RemoteMerkleTreeReader::new(Arc::clone(&service), remote_id),
)?,
file_size,
))
}
fn new_config_remote_unverified_file(remote_id: i32, file_size: u64) -> Result<FileConfig> {
- let file_reader = RemoteChunkedFileReader::new(
- Arc::new(Mutex::new(remote_file::server::get_local_service())),
- remote_id,
- );
- Ok(FileConfig::RemoteUnverifiedFile(file_reader, file_size))
+ let file_reader =
+ RemoteFileReader::new(Arc::new(Mutex::new(file::get_local_binder())), remote_id);
+ Ok(FileConfig::RemoteUnverifiedReadonlyFile(file_reader, file_size))
}
-fn new_config_local_verified_file(
+fn new_config_local_ro_file(
protected_file: &PathBuf,
merkle_tree_dump: &PathBuf,
signature: &PathBuf,
) -> Result<FileConfig> {
let file = File::open(&protected_file)?;
let file_size = file.metadata()?.len();
- let file_reader = ChunkedFileReader::new(file)?;
- let merkle_tree_reader = ChunkedFileReader::new(File::open(merkle_tree_dump)?)?;
+ let file_reader = LocalFileReader::new(file)?;
+ let merkle_tree_reader = LocalFileReader::new(File::open(merkle_tree_dump)?)?;
let authenticator = FakeAuthenticator::always_succeed();
let mut sig = Vec::new();
let _ = File::open(signature)?.read_to_end(&mut sig)?;
- let file_reader = FsverityChunkedFileReader::new(
- &authenticator,
- file_reader,
- file_size,
- sig,
- merkle_tree_reader,
- )?;
- Ok(FileConfig::LocalVerifiedFile(file_reader, file_size))
+ let file_reader =
+ VerifiedFileReader::new(&authenticator, file_reader, file_size, sig, merkle_tree_reader)?;
+ Ok(FileConfig::LocalVerifiedReadonlyFile(file_reader, file_size))
}
-fn new_config_local_unverified_file(file_path: &PathBuf) -> Result<FileConfig> {
- let file_reader = ChunkedFileReader::new(File::open(file_path)?)?;
+fn new_config_local_ro_file_unverified(file_path: &PathBuf) -> Result<FileConfig> {
+ let file_reader = LocalFileReader::new(File::open(file_path)?)?;
let file_size = file_reader.len();
- Ok(FileConfig::LocalUnverifiedFile(file_reader, file_size))
+ Ok(FileConfig::LocalUnverifiedReadonlyFile(file_reader, file_size))
+}
+
+fn new_config_remote_new_verified_file(remote_id: i32) -> Result<FileConfig> {
+ let remote_file =
+ RemoteFileEditor::new(Arc::new(Mutex::new(file::get_local_binder())), remote_id);
+ Ok(FileConfig::RemoteVerifiedNewFile(VerifiedFileEditor::new(remote_file)))
}
fn prepare_file_pool(args: &Args) -> Result<BTreeMap<Inode, FileConfig>> {
let mut file_pool = BTreeMap::new();
- for config in &args.remote_verified_file {
+ for config in &args.remote_ro_file {
file_pool.insert(
config.ino,
new_config_remote_verified_file(config.remote_id, config.file_size)?,
);
}
- for config in &args.remote_unverified_file {
+ for config in &args.remote_ro_file_unverified {
file_pool.insert(
config.ino,
new_config_remote_unverified_file(config.remote_id, config.file_size)?,
);
}
- for config in &args.local_verified_file {
+ for config in &args.remote_new_rw_file {
+ file_pool.insert(config.ino, new_config_remote_new_verified_file(config.remote_id)?);
+ }
+
+ for config in &args.local_ro_file {
file_pool.insert(
config.ino,
- new_config_local_verified_file(
+ new_config_local_ro_file(
&config.file_path,
&config.merkle_tree_dump_path,
&config.signature_path,
@@ -260,8 +286,8 @@
);
}
- for config in &args.local_unverified_file {
- file_pool.insert(config.ino, new_config_local_unverified_file(&config.file_path)?);
+ for config in &args.local_ro_file_unverified {
+ file_pool.insert(config.ino, new_config_local_ro_file_unverified(&config.file_path)?);
}
Ok(file_pool)
diff --git a/authfs/src/remote_file.rs b/authfs/src/remote_file.rs
deleted file mode 100644
index 01e803c..0000000
--- a/authfs/src/remote_file.rs
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use std::convert::TryFrom;
-use std::io;
-use std::io::Write;
-use std::sync::{Arc, Mutex};
-
-use crate::common::CHUNK_SIZE;
-use crate::reader::ReadOnlyDataByChunk;
-
-use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService;
-use authfs_aidl_interface::binder::Strong;
-
-type VirtFdService = Strong<dyn IVirtFdService::IVirtFdService>;
-
-pub mod server {
- // TODO(victorhsieh): use remote binder.
- pub fn get_local_service() -> super::VirtFdService {
- let service_name = "authfs_fd_server";
- authfs_aidl_interface::binder::get_interface(&service_name)
- .expect("Cannot reach authfs_fd_server binder service")
- }
-}
-
-pub struct RemoteChunkedFileReader {
- // This needs to have Sync trait to be used in fuse::worker::start_message_loop.
- service: Arc<Mutex<VirtFdService>>,
- file_fd: i32,
-}
-
-impl RemoteChunkedFileReader {
- pub fn new(service: Arc<Mutex<VirtFdService>>, file_fd: i32) -> Self {
- RemoteChunkedFileReader { service, file_fd }
- }
-}
-
-impl ReadOnlyDataByChunk for RemoteChunkedFileReader {
- fn read_chunk(&self, chunk_index: u64, mut buf: &mut [u8]) -> io::Result<usize> {
- let offset = i64::try_from(chunk_index * CHUNK_SIZE)
- .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
-
- let service = Arc::clone(&self.service);
- let chunk = service
- .lock()
- .unwrap()
- .readFile(self.file_fd, offset, buf.len() as i32)
- .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
- buf.write(&chunk)
- }
-}
-
-pub struct RemoteFsverityMerkleTreeReader {
- // This needs to be a Sync to be used in fuse::worker::start_message_loop.
- // TODO(victorhsieh): change to Strong<> once binder supports it.
- service: Arc<Mutex<VirtFdService>>,
- file_fd: i32,
-}
-
-impl RemoteFsverityMerkleTreeReader {
- pub fn new(service: Arc<Mutex<VirtFdService>>, file_fd: i32) -> Self {
- RemoteFsverityMerkleTreeReader { service, file_fd }
- }
-}
-
-impl ReadOnlyDataByChunk for RemoteFsverityMerkleTreeReader {
- fn read_chunk(&self, chunk_index: u64, mut buf: &mut [u8]) -> io::Result<usize> {
- let offset = i64::try_from(chunk_index * CHUNK_SIZE)
- .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
-
- let service = Arc::clone(&self.service);
- let chunk = service
- .lock()
- .unwrap()
- .readFsverityMerkleTree(self.file_fd, offset, buf.len() as i32)
- .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
- buf.write(&chunk)
- }
-}
diff --git a/authfs/tests/Android.bp b/authfs/tests/Android.bp
new file mode 100644
index 0000000..56e54f2
--- /dev/null
+++ b/authfs/tests/Android.bp
@@ -0,0 +1,11 @@
+java_test_host {
+ name: "AuthFsHostTest",
+ srcs: ["java/**/*.java"],
+ libs: [
+ "tradefed",
+ "compatibility-tradefed",
+ "compatibility-host-util",
+ ],
+ test_suites: ["general-tests"],
+ data: [":authfs_test_files"],
+}
diff --git a/authfs/tests/AndroidTest.xml b/authfs/tests/AndroidTest.xml
new file mode 100644
index 0000000..485e392
--- /dev/null
+++ b/authfs/tests/AndroidTest.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<configuration description="Config for authfs tests">
+ <!-- Since Android does not support user namespace, we need root to access /dev/fuse and also
+ to set up the mount. -->
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer"/>
+
+ <!-- Basic checks that the device has all the prerequisites. -->
+ <target_preparer class="com.android.tradefed.targetprep.RunCommandTargetPreparer">
+ <option name="throw-if-cmd-fail" value="true" />
+ <!-- Make sure kernel has FUSE enabled. -->
+ <option name="run-command" value="ls /dev/fuse" />
+ <!-- Make sure necessary executables are installed. -->
+ <option name="run-command" value="ls /apex/com.android.virt/bin/fd_server" />
+ <option name="run-command" value="ls /apex/com.android.virt/bin/authfs" />
+ <!-- Prepare test directory. -->
+ <option name="run-command" value="mkdir -p /data/local/tmp/authfs/mnt" />
+ <option name="teardown-command" value="rm -rf /data/local/tmp/authfs" />
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="abort-on-push-failure" value="true" />
+ <option name="push-file" key="cert.der" value="/data/local/tmp/authfs/cert.der" />
+ <option name="push-file" key="input.4m" value="/data/local/tmp/authfs/input.4m" />
+ <option name="push-file" key="input.4k1" value="/data/local/tmp/authfs/input.4k1" />
+ <option name="push-file" key="input.4k" value="/data/local/tmp/authfs/input.4k" />
+ <option name="push-file" key="input.4m.fsv_sig"
+ value="/data/local/tmp/authfs/input.4m.fsv_sig" />
+ <option name="push-file" key="input.4k1.fsv_sig"
+ value="/data/local/tmp/authfs/input.4k1.fsv_sig" />
+ <option name="push-file" key="input.4k.fsv_sig"
+ value="/data/local/tmp/authfs/input.4k.fsv_sig" />
+ <option name="push-file" key="input.4m.merkle_dump"
+ value="/data/local/tmp/authfs/input.4m.merkle_dump" />
+ <option name="push-file" key="input.4m.merkle_dump.bad"
+ value="/data/local/tmp/authfs/input.4m.merkle_dump.bad" />
+ <option name="push-file" key="input.4k1.merkle_dump"
+ value="/data/local/tmp/authfs/input.4k1.merkle_dump" />
+ <option name="push-file" key="input.4k.merkle_dump"
+ value="/data/local/tmp/authfs/input.4k.merkle_dump" />
+ </target_preparer>
+
+ <test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
+ <option name="jar" value="AuthFsHostTest.jar" />
+ </test>
+</configuration>
diff --git a/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
new file mode 100644
index 0000000..3837dd3
--- /dev/null
+++ b/authfs/tests/java/src/com/android/fs/AuthFsHostTest.java
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.virt.fs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import android.platform.test.annotations.RootPermissionTest;
+
+import com.android.tradefed.device.DeviceNotAvailableException;
+import com.android.tradefed.device.ITestDevice;
+import com.android.tradefed.log.LogUtil.CLog;
+import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
+import com.android.tradefed.testtype.junit4.BaseHostJUnit4Test;
+import com.android.tradefed.util.CommandResult;
+import com.android.tradefed.util.CommandStatus;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+@RootPermissionTest
+@RunWith(DeviceJUnit4ClassRunner.class)
+public final class AuthFsHostTest extends BaseHostJUnit4Test {
+
+ /** Test directory where data are located */
+ private static final String TEST_DIR = "/data/local/tmp/authfs";
+
+ /** Mount point of authfs during the test */
+ private static final String MOUNT_DIR = "/data/local/tmp/authfs/mnt";
+
+ private static final String FD_SERVER_BIN = "/apex/com.android.virt/bin/fd_server";
+ private static final String AUTHFS_BIN = "/apex/com.android.virt/bin/authfs";
+
+ /** Plenty of time for authfs to get ready */
+ private static final int TIME_BUDGET_AUTHFS_SETUP = 1500; // ms
+
+ private ITestDevice mDevice;
+ private ExecutorService mThreadPool;
+
+ @Before
+ public void setUp() {
+ mDevice = getDevice();
+ mThreadPool = Executors.newCachedThreadPool();
+ }
+
+ @After
+ public void tearDown() throws DeviceNotAvailableException {
+ mDevice.executeShellV2Command("killall authfs fd_server");
+ mDevice.executeShellV2Command("umount " + MOUNT_DIR);
+ mDevice.executeShellV2Command("rm -f " + TEST_DIR);
+ }
+
+ @Test
+ public void testReadWithFsverityVerification_LocalFile()
+ throws DeviceNotAvailableException, InterruptedException {
+ // Setup
+ runAuthFsInBackground(
+ "--local-ro-file-unverified 3:input.4m"
+ + " --local-ro-file 4:input.4m:input.4m.merkle_dump:input.4m.fsv_sig:cert.der"
+ + " --local-ro-file 5:input.4k1:input.4k1.merkle_dump:input.4k1.fsv_sig:cert.der"
+ + " --local-ro-file 6:input.4k:input.4k.merkle_dump:input.4k.fsv_sig:cert.der"
+ );
+ Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+ // Action
+ String actualHashUnverified4m = computeFileHashInGuest(MOUNT_DIR + "/3");
+ String actualHash4m = computeFileHashInGuest(MOUNT_DIR + "/4");
+ String actualHash4k1 = computeFileHashInGuest(MOUNT_DIR + "/5");
+ String actualHash4k = computeFileHashInGuest(MOUNT_DIR + "/6");
+
+ // Verify
+ String expectedHash4m = computeFileHash(TEST_DIR + "/input.4m");
+ String expectedHash4k1 = computeFileHash(TEST_DIR + "/input.4k1");
+ String expectedHash4k = computeFileHash(TEST_DIR + "/input.4k");
+
+ assertEquals("Inconsistent hash from /authfs/3: ", expectedHash4m, actualHashUnverified4m);
+ assertEquals("Inconsistent hash from /authfs/4: ", expectedHash4m, actualHash4m);
+ assertEquals("Inconsistent hash from /authfs/5: ", expectedHash4k1, actualHash4k1);
+ assertEquals("Inconsistent hash from /authfs/6: ", expectedHash4k, actualHash4k);
+ }
+
+ @Test
+ public void testReadWithFsverityVerification_RemoteFile()
+ throws DeviceNotAvailableException, InterruptedException {
+ // Setup
+ runFdServerInBackground(
+ "3<input.4m 4<input.4m.merkle_dump 5<input.4m.fsv_sig 6<input.4m",
+ "--ro-fds 3:4:5 --ro-fds 6"
+ );
+ runAuthFsInBackground(
+ "--remote-ro-file-unverified 10:6:4194304 --remote-ro-file 11:3:4194304:cert.der"
+ );
+ Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+ // Action
+ String actualHashUnverified4m = computeFileHashInGuest(MOUNT_DIR + "/10");
+ String actualHash4m = computeFileHashInGuest(MOUNT_DIR + "/11");
+
+ // Verify
+ String expectedHash4m = computeFileHash(TEST_DIR + "/input.4m");
+
+ assertEquals("Inconsistent hash from /authfs/10: ", expectedHash4m, actualHashUnverified4m);
+ assertEquals("Inconsistent hash from /authfs/11: ", expectedHash4m, actualHash4m);
+ }
+
+ // Separate the test from the above simply because exec in shell does not allow open too many
+ // files.
+ @Test
+ public void testReadWithFsverityVerification_RemoteSmallerFile()
+ throws DeviceNotAvailableException, InterruptedException {
+ // Setup
+ runFdServerInBackground(
+ "3<input.4k 4<input.4k.merkle_dump 5<input.4k.fsv_sig"
+ + " 6<input.4k1 7<input.4k1.merkle_dump 8<input.4k1.fsv_sig",
+ "--ro-fds 3:4:5 --ro-fds 6:7:8"
+ );
+ runAuthFsInBackground(
+ "--remote-ro-file 10:3:4096:cert.der --remote-ro-file 11:6:4097:cert.der"
+ );
+ Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+ // Action
+ String actualHash4k = computeFileHashInGuest(MOUNT_DIR + "/10");
+ String actualHash4k1 = computeFileHashInGuest(MOUNT_DIR + "/11");
+
+ // Verify
+ String expectedHash4k = computeFileHash(TEST_DIR + "/input.4k");
+ String expectedHash4k1 = computeFileHash(TEST_DIR + "/input.4k1");
+
+ assertEquals("Inconsistent hash from /authfs/10: ", expectedHash4k, actualHash4k);
+ assertEquals("Inconsistent hash from /authfs/11: ", expectedHash4k1, actualHash4k1);
+ }
+
+ @Test
+ public void testReadWithFsverityVerification_TamperedMerkleTree()
+ throws DeviceNotAvailableException, InterruptedException {
+ // Setup
+ runFdServerInBackground(
+ "3<input.4m 4<input.4m.merkle_dump.bad 5<input.4m.fsv_sig",
+ "--ro-fds 3:4:5"
+ );
+ runAuthFsInBackground("--remote-ro-file 10:3:4096:cert.der");
+ Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+ // Verify
+ assertFalse(copyFileInGuest(MOUNT_DIR + "/10", "/dev/null"));
+ }
+
+ @Test
+ public void testWriteThroughCorrectly()
+ throws DeviceNotAvailableException, InterruptedException {
+ // Setup
+ runFdServerInBackground("3<>output", "--rw-fds 3");
+ runAuthFsInBackground("--remote-new-rw-file 20:3");
+ Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+ // Action
+ String srcPath = "/system/bin/linker";
+ String destPath = MOUNT_DIR + "/20";
+ String backendPath = TEST_DIR + "/output";
+ assertTrue(copyFileInGuest(srcPath, destPath));
+
+ // Verify
+ String expectedHash = computeFileHashInGuest(srcPath);
+ String actualHash = computeFileHash(backendPath);
+ assertEquals("Inconsistent file hash on the backend storage", expectedHash, actualHash);
+
+ String actualHashFromAuthFs = computeFileHashInGuest(destPath);
+ assertEquals("Inconsistent file hash when reads from authfs", expectedHash,
+ actualHashFromAuthFs);
+ }
+
+ @Test
+ public void testWriteFailedIfDetectsTampering()
+ throws DeviceNotAvailableException, InterruptedException {
+ // Setup
+ runFdServerInBackground("3<>/output", "--rw-fds 3");
+ runAuthFsInBackground("--remote-new-rw-file 20:3");
+ Thread.sleep(TIME_BUDGET_AUTHFS_SETUP);
+
+ String srcPath = "/system/bin/linker";
+ String destPath = MOUNT_DIR + "/20";
+ String backendPath = TEST_DIR + "/output";
+ assertTrue(copyFileInGuest(srcPath, destPath));
+
+ // Action
+ // Tampering with the first 2 4K block of the backing file.
+ expectRemoteCommandToSucceed("dd if=/dev/zero of=" + backendPath + " bs=1 count=8192");
+
+ // Verify
+ // Write to a block partially requires a read back to calculate the new hash. It should fail
+ // when the content is inconsistent to the known hash. Use direct I/O to avoid simply
+ // writing to the filesystem cache.
+ expectRemoteCommandToFail("dd if=/dev/zero of=" + destPath + " bs=1 count=1024 direct");
+
+ // A full 4K write does not require to read back, so write can succeed even if the backing
+ // block has already been tampered.
+ expectRemoteCommandToSucceed(
+ "dd if=/dev/zero of=" + destPath + " bs=1 count=4096 skip=4096");
+
+ // Otherwise, a partial write with correct backing file should still succeed.
+ expectRemoteCommandToSucceed(
+ "dd if=/dev/zero of=" + destPath + " bs=1 count=1024 skip=8192");
+ }
+
+ // TODO(b/178874539): This does not really run in the guest VM. Send the shell command to the
+ // guest VM when authfs works across VM boundary.
+ private String computeFileHashInGuest(String path) throws DeviceNotAvailableException {
+ return computeFileHash(path);
+ }
+
+ private boolean copyFileInGuest(String src, String dest) throws DeviceNotAvailableException {
+ // TODO(b/182576497): cp returns error because close(2) returns ENOSYS in the current authfs
+ // implementation. We should probably fix that since programs can expect close(2) return 0.
+ String cmd = "cat " + src + " > " + dest;
+ CommandResult result = mDevice.executeShellV2Command(cmd);
+ return result.getStatus() == CommandStatus.SUCCESS;
+ }
+
+ private String computeFileHash(String path) throws DeviceNotAvailableException {
+ String result = expectRemoteCommandToSucceed("sha256sum " + path);
+ String[] tokens = result.split("\\s");
+ if (tokens.length > 0) {
+ return tokens[0];
+ } else {
+ CLog.e("Unrecognized output by sha256sum: " + result);
+ return "";
+ }
+ }
+
+ private void runAuthFsInBackground(String flags) throws DeviceNotAvailableException {
+ String cmd = "cd " + TEST_DIR + " && " + AUTHFS_BIN + " " + MOUNT_DIR + " " + flags;
+
+ mThreadPool.submit(() -> {
+ try {
+ CLog.i("Starting authfs");
+ expectRemoteCommandToSucceed(cmd);
+ } catch (DeviceNotAvailableException e) {
+ CLog.e("Error running authfs", e);
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ private void runFdServerInBackground(String execParamsForOpeningFds, String flags)
+ throws DeviceNotAvailableException {
+ String cmd = "cd " + TEST_DIR + " && exec " + execParamsForOpeningFds + " " + FD_SERVER_BIN
+ + " " + flags;
+ mThreadPool.submit(() -> {
+ try {
+ CLog.i("Starting fd_server");
+ expectRemoteCommandToSucceed(cmd);
+ } catch (DeviceNotAvailableException e) {
+ CLog.e("Error running fd_server", e);
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ private String expectRemoteCommandToSucceed(String cmd) throws DeviceNotAvailableException {
+ CommandResult result = mDevice.executeShellV2Command(cmd);
+ assertEquals("`" + cmd + "` failed: " + result.getStderr(), CommandStatus.SUCCESS,
+ result.getStatus());
+ CLog.d("Stdout: " + result.getStdout());
+ return result.getStdout().trim();
+ }
+
+ private void expectRemoteCommandToFail(String cmd) throws DeviceNotAvailableException {
+ CommandResult result = mDevice.executeShellV2Command(cmd);
+ assertNotEquals("Unexpected success from `" + cmd + "`: " + result.getStdout(),
+ result.getStatus(), CommandStatus.SUCCESS);
+ }
+}
diff --git a/authfs/tools/device-test.sh b/authfs/tools/device-test.sh
deleted file mode 100755
index 5cf5f10..0000000
--- a/authfs/tools/device-test.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/system/bin/sh
-
-# TODO(victorhsieh): Create a standard Android test for continuous integration.
-#
-# How to run this test:
-#
-# Setup:
-# $ adb push testdata/input.4m* /data/local/tmp
-#
-# Shell 1:
-# $ adb shell 'cd /data/local/tmp && exec 9</system/bin/sh 8<input.4m 7<input.4m.merkle_dump 6<input.4m.fsv_sig 5<input.4m 4<input.4m.merkle_dump.bad 3<input.4m.fsv_sig fd_server --ro-fds 9 --ro-fds 8:7:6 --ro-fds 5:4:3'
-#
-# Shell 2:
-# $ adb push tools/device-test.sh /data/local/tmp/ && adb shell /data/local/tmp/device-test.sh
-
-# Run with -u to enter new namespace.
-if [[ $1 == "-u" ]]; then
- exec unshare -mUr $0
-fi
-
-cd /data/local/tmp
-
-MOUNTPOINT=/data/local/tmp/authfs
-trap "umount ${MOUNTPOINT}" EXIT;
-mkdir -p ${MOUNTPOINT}
-
-size=$(du -b /system/bin/sh |awk '{print $1}')
-size2=$(du -b input.4m |awk '{print $1}')
-
-echo "Mounting authfs in background ..."
-
-# TODO(170494765): Replace /dev/null (currently not used) with a valid
-# certificate.
-authfs \
- ${MOUNTPOINT} \
- --local-verified-file 2:input.4m:input.4m.merkle_dump:input.4m.fsv_sig:/dev/null \
- --local-verified-file 3:input.4k1:input.4k1.merkle_dump:input.4k1.fsv_sig:/dev/null \
- --local-verified-file 4:input.4k:input.4k.merkle_dump:input.4k.fsv_sig:/dev/null \
- --local-unverified-file 5:/system/bin/sh \
- --remote-unverified-file 6:9:${size} \
- --remote-verified-file 7:8:${size2}:/dev/null \
- --remote-verified-file 8:5:${size2}:/dev/null \
- &
-sleep 0.1
-
-echo "Accessing files in authfs ..."
-md5sum ${MOUNTPOINT}/2 input.4m
-echo
-md5sum ${MOUNTPOINT}/3 input.4k1
-echo
-md5sum ${MOUNTPOINT}/4 input.4k
-echo
-md5sum ${MOUNTPOINT}/5 /system/bin/sh
-md5sum ${MOUNTPOINT}/6
-echo
-md5sum ${MOUNTPOINT}/7 input.4m
-echo
-echo Checking error cases...
-cat /data/local/tmp/authfs/8 2>&1 |grep -q ": I/O error" || echo "Failed to catch the problem"
-echo "Done!"
diff --git a/authfs/tools/test.sh b/authfs/tools/test.sh
deleted file mode 100755
index 9ed3a99..0000000
--- a/authfs/tools/test.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-
-# Run with -u to enter new namespace.
-if [[ $1 == "-u" ]]; then
- exec unshare -m -U -r $0
-fi
-
-trap "umount /tmp/mnt" EXIT;
-mkdir -p /tmp/mnt
-
-echo "Mounting authfs in background ..."
-strace -o authfs.strace target/debug/authfs \
- /tmp/mnt \
- --local-verified-file 2:testdata/input.4m:testdata/input.4m.merkle_dump:testdata/input.4m.fsv_sig \
- --local-verified-file 3:testdata/input.4k1:testdata/input.4k1.merkle_dump:testdata/input.4k1.fsv_sig \
- --local-verified-file 4:testdata/input.4k:testdata/input.4k.merkle_dump:testdata/input.4k.fsv_sig \
- --local-unverified-file 5:testdata/input.4k \
- &
-sleep 0.1
-
-echo "Accessing files in authfs ..."
-echo
-md5sum /tmp/mnt/2 testdata/input.4m
-echo
-md5sum /tmp/mnt/3 testdata/input.4k1
-echo
-md5sum /tmp/mnt/4 /tmp/mnt/5 testdata/input.4k
-echo
-dd if=/tmp/mnt/2 bs=1000 skip=100 count=50 status=none |md5sum
-dd if=testdata/input.4m bs=1000 skip=100 count=50 status=none |md5sum
-echo
-tac /tmp/mnt/4 |md5sum
-tac /tmp/mnt/5 |md5sum
-tac testdata/input.4k |md5sum
-echo
-test -f /tmp/mnt/2 || echo 'FAIL: an expected file is missing'
-test -f /tmp/mnt/0 && echo 'FAIL: unexpected file presents'
-test -f /tmp/mnt/1 && echo 'FAIL: unexpected file presents, 1 is root dir'
-test -f /tmp/mnt/100 && echo 'FAIL: unexpected file presents'
-test -f /tmp/mnt/foo && echo 'FAIL: unexpected file presents'
-test -f /tmp/mnt/dir/3 && echo 'FAIL: unexpected file presents'
-echo "Done!"
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 3eee6f4..e818420 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -37,6 +37,7 @@
use_avb: true,
avb_private_key: "microdroid.pem",
avb_algorithm: "SHA256_RSA4096",
+ partition_name: "system",
deps: [
"init_second_stage",
"microdroid_init_rc",
@@ -162,6 +163,8 @@
dtb_prebuilt: "dummy_dtb.img",
header_version: "4",
partition_name: "boot",
+ use_avb: true,
+ avb_private_key: "microdroid.pem",
}
android_filesystem {
@@ -189,6 +192,8 @@
header_version: "4",
vendor_boot: true,
partition_name: "vendor_boot",
+ use_avb: true,
+ avb_private_key: "microdroid.pem",
}
android_filesystem {
@@ -287,3 +292,34 @@
relative_install_path: "selinux",
installable: false,
}
+
+vbmeta {
+ name: "microdroid_vbmeta",
+ partition_name: "vbmeta",
+ private_key: "microdroid.pem",
+ partitions: [
+ "microdroid_vendor",
+ "microdroid_vendor_boot-5.10",
+ ],
+ chained_partitions: [
+ {
+ name: "vbmeta_system",
+ rollback_index_location: 1,
+ private_key: "microdroid.pem",
+ },
+ {
+ name: "boot",
+ rollback_index_location: 2,
+ private_key: "microdroid.pem",
+ },
+ ],
+}
+
+vbmeta {
+ name: "microdroid_vbmeta_system",
+ partition_name: "vbmeta_system",
+ private_key: "microdroid.pem",
+ partitions: [
+ "microdroid",
+ ],
+}
diff --git a/microdroid/README.md b/microdroid/README.md
index fe0843f..363d623 100644
--- a/microdroid/README.md
+++ b/microdroid/README.md
@@ -22,6 +22,8 @@
$ m microdroid_boot-5.10
$ m microdroid_vendor_boot-5.10
$ m microdroid_uboot_env
+$ m microdroid_vbmeta
+$ m microdroid_vbmeta_system
```
## Installing
@@ -34,6 +36,8 @@
$ adb push $ANDROID_PRODUCT_OUT/system/etc/microdroid_super.img /data/local/tmp/super.img
$ adb push $ANDROID_PRODUCT_OUT/system/etc/microdroid_boot-5.10.img /data/local/tmp/boot.img
$ adb push $ANDROID_PRODUCT_OUT/system/etc/microdroid_vendor_boot-5.10.img /data/local/tmp/vendor_boot.img
+$ adb push $ANDROID_PRODUCT_OUT/system/etc/microdroid_vbmeta.img /data/local/tmp/vbmeta.img
+$ adb push $ANDROID_PRODUCT_OUT/system/etc/microdroid_vbmeta_system.img /data/local/tmp/vbmeta_system.img
$ adb shell mkdir /data/local/tmp/cuttlefish_runtime.1/
$ adb push $ANDROID_PRODUCT_OUT/system/etc/uboot_env.img /data/local/tmp/cuttlefish_runtime.1/
$ adb shell mkdir -p /data/local/tmp/etc/cvd_config
@@ -41,8 +45,6 @@
$ dd if=/dev/zero of=empty.img bs=4k count=600
$ mkfs.ext4 -F empty.img
$ adb push empty.img /data/local/tmp/userdata.img
-$ adb push empty.img /data/local/tmp/vbmeta.img
-$ adb push empty.img /data/local/tmp/vbmeta_system.img
$ adb push empty.img /data/local/tmp/cache.img
```
@@ -52,7 +54,7 @@
future, this shall be done via [`virtmanager`](../virtmanager/).
```
-$ adb shell 'HOME=/data/local/tmp; PATH=$PATH:/apex/com.android.virt/bin; assemble_cvd < /dev/null'
+$ adb shell 'HOME=/data/local/tmp; PATH=$PATH:/apex/com.android.virt/bin; assemble_cvd -protected_vm < /dev/null'
$ adb shell 'cd /data/local/tmp; /apex/com.android.virt/bin/crosvm run --cid=5 --disable-sandbox --bios=bootloader --serial=type=stdout --disk=cuttlefish_runtime/composite.img'
```
diff --git a/tests/hostside/Android.bp b/tests/hostside/Android.bp
index e07459a..c030e8d 100644
--- a/tests/hostside/Android.bp
+++ b/tests/hostside/Android.bp
@@ -14,6 +14,8 @@
":microdroid_uboot_env",
":cuttlefish_crosvm_bootloader",
":MicrodroidHostTestCase_EmptyImage",
+ ":microdroid_vbmeta",
+ ":microdroid_vbmeta_system",
],
}
diff --git a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
index 7a45a8c..32b3c11 100644
--- a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
+++ b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
@@ -75,8 +75,8 @@
pushFile("microdroid_vendor_boot-5.10.img", "vendor_boot.img");
pushFile("uboot_env.img", "cuttlefish_runtime.1/uboot_env.img");
pushFile("empty.img", "userdata.img");
- pushFile("empty.img", "vbmeta.img");
- pushFile("empty.img", "vbmeta_system.img");
+ pushFile("microdroid_vbmeta.img", "vbmeta.img");
+ pushFile("microdroid_vbmeta_system.img", "vbmeta_system.img");
pushFile("empty.img", "cache.img");
getDevice().executeShellCommand("mkdir -p " + TEST_ROOT + "etc/cvd_config");
getDevice().pushString("{}", TEST_ROOT + "etc/cvd_config/cvd_config_phone.json");
@@ -84,7 +84,7 @@
// Run assemble_cvd to create composite.img
getDevice().executeShellCommand("HOME=" + TEST_ROOT + "; "
+ "PATH=$PATH:" + VIRT_APEX + "bin; "
- + VIRT_APEX + "bin/assemble_cvd < /dev/null");
+ + VIRT_APEX + "bin/assemble_cvd -protected_vm < /dev/null");
// Make sure that composite.img is created
final String compositeImg = TEST_ROOT + "cuttlefish_runtime/composite.img";
diff --git a/tests/vsock_test.cc b/tests/vsock_test.cc
index 74e984f..57a03ca 100644
--- a/tests/vsock_test.cc
+++ b/tests/vsock_test.cc
@@ -21,6 +21,7 @@
#include <linux/vm_sockets.h>
#include <iostream>
+#include <optional>
#include "android-base/file.h"
#include "android-base/logging.h"
@@ -57,7 +58,7 @@
ASSERT_EQ(ret, 0) << strerror(errno);
sp<IVirtualMachine> vm;
- status = mVirtManager->startVm(String16(kVmConfigPath), &vm);
+ status = mVirtManager->startVm(String16(kVmConfigPath), std::nullopt, &vm);
ASSERT_TRUE(status.isOk()) << "Error starting VM: " << status;
int32_t cid;
diff --git a/virtmanager/aidl/android/system/virtmanager/IVirtManager.aidl b/virtmanager/aidl/android/system/virtmanager/IVirtManager.aidl
index a401fe6..79010da 100644
--- a/virtmanager/aidl/android/system/virtmanager/IVirtManager.aidl
+++ b/virtmanager/aidl/android/system/virtmanager/IVirtManager.aidl
@@ -19,8 +19,11 @@
import android.system.virtmanager.VirtualMachineDebugInfo;
interface IVirtManager {
- /** Start the VM with the given config file, and return a handle to it. */
- IVirtualMachine startVm(String configPath);
+ /**
+ * Start the VM with the given config file, and return a handle to it. If `logFd` is provided
+ * then console logs from the VM will be sent to it.
+ */
+ IVirtualMachine startVm(String configPath, in @nullable ParcelFileDescriptor logFd);
/**
* Get a list of all currently running VMs. This method is only intended for debug purposes,
diff --git a/virtmanager/src/aidl.rs b/virtmanager/src/aidl.rs
index b7595a9..8105051 100644
--- a/virtmanager/src/aidl.rs
+++ b/virtmanager/src/aidl.rs
@@ -22,8 +22,11 @@
BnVirtualMachine, IVirtualMachine,
};
use android_system_virtmanager::aidl::android::system::virtmanager::VirtualMachineDebugInfo::VirtualMachineDebugInfo;
-use android_system_virtmanager::binder::{self, Interface, StatusCode, Strong, ThreadState};
+use android_system_virtmanager::binder::{
+ self, Interface, ParcelFileDescriptor, StatusCode, Strong, ThreadState,
+};
use log::error;
+use std::fs::File;
use std::sync::{Arc, Mutex, Weak};
pub const BINDER_SERVICE_IDENTIFIER: &str = "android.system.virtmanager";
@@ -44,10 +47,17 @@
/// Create and start a new VM with the given configuration, assigning it the next available CID.
///
/// Returns a binder `IVirtualMachine` object referring to it, as a handle for the client.
- fn startVm(&self, config_path: &str) -> binder::Result<Strong<dyn IVirtualMachine>> {
+ fn startVm(
+ &self,
+ config_path: &str,
+ log_fd: Option<&ParcelFileDescriptor>,
+ ) -> binder::Result<Strong<dyn IVirtualMachine>> {
let state = &mut *self.state.lock().unwrap();
let cid = state.next_cid;
- let instance = Arc::new(start_vm(config_path, cid)?);
+ let log_fd = log_fd
+ .map(|fd| fd.as_ref().try_clone().map_err(|_| StatusCode::UNKNOWN_ERROR))
+ .transpose()?;
+ let instance = Arc::new(start_vm(config_path, cid, log_fd)?);
// TODO(qwandor): keep track of which CIDs are currently in use so that we can reuse them.
state.next_cid = state.next_cid.checked_add(1).ok_or(StatusCode::UNKNOWN_ERROR)?;
state.add_vm(Arc::downgrade(&instance));
@@ -140,12 +150,12 @@
/// Start a new VM instance from the given VM config filename. This assumes the VM is not already
/// running.
-fn start_vm(config_path: &str, cid: Cid) -> binder::Result<VmInstance> {
+fn start_vm(config_path: &str, cid: Cid, log_fd: Option<File>) -> binder::Result<VmInstance> {
let config = VmConfig::load(config_path).map_err(|e| {
error!("Failed to load VM config {}: {:?}", config_path, e);
StatusCode::BAD_VALUE
})?;
- Ok(VmInstance::start(&config, cid, config_path).map_err(|e| {
+ Ok(VmInstance::start(&config, cid, config_path, log_fd).map_err(|e| {
error!("Failed to start VM {}: {:?}", config_path, e);
StatusCode::UNKNOWN_ERROR
})?)
diff --git a/virtmanager/src/crosvm.rs b/virtmanager/src/crosvm.rs
index 4ae1fcd..814a1a7 100644
--- a/virtmanager/src/crosvm.rs
+++ b/virtmanager/src/crosvm.rs
@@ -18,6 +18,7 @@
use crate::Cid;
use anyhow::Error;
use log::{debug, error, info};
+use std::fs::File;
use std::process::{Child, Command};
const CROSVM_PATH: &str = "/apex/com.android.virt/bin/crosvm";
@@ -42,8 +43,13 @@
/// Start an instance of `crosvm` to manage a new VM. The `crosvm` instance will be killed when
/// the `VmInstance` is dropped.
- pub fn start(config: &VmConfig, cid: Cid, config_path: &str) -> Result<VmInstance, Error> {
- let child = run_vm(config, cid)?;
+ pub fn start(
+ config: &VmConfig,
+ cid: Cid,
+ config_path: &str,
+ log_fd: Option<File>,
+ ) -> Result<VmInstance, Error> {
+ let child = run_vm(config, cid, log_fd)?;
Ok(VmInstance::new(child, cid, config_path))
}
}
@@ -64,14 +70,18 @@
}
/// Start an instance of `crosvm` to manage a new VM.
-fn run_vm(config: &VmConfig, cid: Cid) -> Result<Child, Error> {
+fn run_vm(config: &VmConfig, cid: Cid, log_fd: Option<File>) -> Result<Child, Error> {
config.validate()?;
let mut command = Command::new(CROSVM_PATH);
// TODO(qwandor): Remove --disable-sandbox.
command.arg("run").arg("--disable-sandbox").arg("--cid").arg(cid.to_string());
- // TODO(jiyong): Don't redirect console to the host syslog
- command.arg("--serial=type=syslog");
+ if let Some(log_fd) = log_fd {
+ command.stdout(log_fd);
+ } else {
+ // Ignore console output.
+ command.arg("--serial=type=sink");
+ }
if let Some(bootloader) = &config.bootloader {
command.arg("--bios").arg(bootloader);
}
diff --git a/virtmanager/src/main.rs b/virtmanager/src/main.rs
index 3ea33d9..486efeb 100644
--- a/virtmanager/src/main.rs
+++ b/virtmanager/src/main.rs
@@ -33,9 +33,9 @@
type Cid = u32;
fn main() {
- android_logger::init_once(android_logger::Config::default().with_tag(LOG_TAG).with_min_level(
- if env!("TARGET_BUILD_VARIANT") == "user" { Level::Info } else { Level::Trace },
- ));
+ android_logger::init_once(
+ android_logger::Config::default().with_tag(LOG_TAG).with_min_level(Level::Trace),
+ );
let virt_manager = VirtManager::default();
let virt_manager = BnVirtManager::new_binder(virt_manager);
diff --git a/vm/Android.bp b/vm/Android.bp
index 0de6cae..248af4d 100644
--- a/vm/Android.bp
+++ b/vm/Android.bp
@@ -12,7 +12,9 @@
"libanyhow",
"libbinder_rs",
"libenv_logger",
+ "liblibc",
"liblog_rust",
+ "libstructopt",
],
apex_available: [
"com.android.virt",
diff --git a/vm/src/main.rs b/vm/src/main.rs
index df375e4..34031f7 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -17,27 +17,39 @@
mod sync;
use android_system_virtmanager::aidl::android::system::virtmanager::IVirtManager::IVirtManager;
-use android_system_virtmanager::binder::{get_interface, ProcessState, Strong};
-use anyhow::{bail, Context, Error};
+use android_system_virtmanager::binder::{
+ get_interface, ParcelFileDescriptor, ProcessState, Strong,
+};
+use anyhow::{Context, Error};
// TODO: Import these via android_system_virtmanager::binder once https://r.android.com/1619403 is
// submitted.
use binder::{DeathRecipient, IBinder};
-use std::env;
-use std::process::exit;
+use std::fs::File;
+use std::io;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::path::PathBuf;
+use structopt::clap::AppSettings;
+use structopt::StructOpt;
use sync::AtomicFlag;
const VIRT_MANAGER_BINDER_SERVICE_IDENTIFIER: &str = "android.system.virtmanager";
+#[derive(StructOpt)]
+#[structopt(no_version, global_settings = &[AppSettings::DisableVersion])]
+enum Opt {
+ /// Run a virtual machine
+ Run {
+ /// Path to VM config JSON
+ #[structopt(parse(from_os_str))]
+ config: PathBuf,
+ },
+ /// List running virtual machines
+ List,
+}
+
fn main() -> Result<(), Error> {
env_logger::init();
-
- let args: Vec<_> = env::args().collect();
- if args.len() < 2 {
- eprintln!("Usage:");
- eprintln!(" {} run <vm_config.json>", args[0]);
- eprintln!(" {} list", args[0]);
- exit(1);
- }
+ let opt = Opt::from_args();
// We need to start the thread pool for Binder to work properly, especially link_to_death.
ProcessState::start_thread_pool();
@@ -45,16 +57,18 @@
let virt_manager = get_interface(VIRT_MANAGER_BINDER_SERVICE_IDENTIFIER)
.context("Failed to find Virt Manager service")?;
- match args[1].as_ref() {
- "run" if args.len() == 3 => command_run(virt_manager, &args[2]),
- "list" if args.len() == 2 => command_list(virt_manager),
- command => bail!("Invalid command '{}' or wrong number of arguments", command),
+ match opt {
+ Opt::Run { config } => command_run(virt_manager, &config),
+ Opt::List => command_list(virt_manager),
}
}
/// Run a VM from the given configuration file.
-fn command_run(virt_manager: Strong<dyn IVirtManager>, config_filename: &str) -> Result<(), Error> {
- let vm = virt_manager.startVm(config_filename).context("Failed to start VM")?;
+fn command_run(virt_manager: Strong<dyn IVirtManager>, config_path: &PathBuf) -> Result<(), Error> {
+ let config_filename = config_path.to_str().context("Failed to parse VM config path")?;
+ let stdout_file = ParcelFileDescriptor::new(duplicate_stdout()?);
+ let vm =
+ virt_manager.startVm(config_filename, Some(&stdout_file)).context("Failed to start VM")?;
let cid = vm.getCid().context("Failed to get CID")?;
println!("Started VM from {} with CID {}.", config_filename, cid);
@@ -85,3 +99,18 @@
dead.wait();
Ok(())
}
+
+/// Safely duplicate the standard output file descriptor.
+fn duplicate_stdout() -> io::Result<File> {
+ let stdout_fd = io::stdout().as_raw_fd();
+ // Safe because this just duplicates a file descriptor which we know to be valid, and we check
+ // for an error.
+ let dup_fd = unsafe { libc::dup(stdout_fd) };
+ if dup_fd < 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ // Safe because we have just duplicated the file descriptor so we own it, and `from_raw_fd`
+ // takes ownership of it.
+ Ok(unsafe { File::from_raw_fd(dup_fd) })
+ }
+}