authfs: support resizing file
There are three major changes:
1. Add resize() to MerkleLeaves as the simple hash container.
2. Handle resize for a writable file. The special case is that the hash
of the new last chunk can change, and the only way to calculate is to
read the original data back.
3. Handle the size change in FUSE.
Bug: 186265793
Test: atest
Test: dex2oat succeeds (with more local changes)
Change-Id: I1ec20b6f0c69fd3ec24a6d04fc34583962265479
diff --git a/authfs/Android.bp b/authfs/Android.bp
index 85f2abb..8dabbd9 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -11,11 +11,13 @@
edition: "2018",
rustlibs: [
"authfs_aidl_interface-rust",
+ "libandroid_logger",
"libanyhow",
"libauthfs_crypto_bindgen",
"libcfg_if",
"libfuse_rust",
"liblibc",
+ "liblog_rust",
"libstructopt",
"libthiserror",
],
diff --git a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
index 189f43a..d3c0979 100644
--- a/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
+++ b/authfs/aidl/com/android/virt/fs/IVirtFdService.aidl
@@ -51,4 +51,7 @@
* written.
*/
int writeFile(int id, in byte[] buf, long offset);
+
+ /** Resizes the file backed by the given file ID to the new size. */
+ void resize(int id, long size);
}
diff --git a/authfs/fd_server/src/main.rs b/authfs/fd_server/src/main.rs
index 99b6e9e..d35feb1 100644
--- a/authfs/fd_server/src/main.rs
+++ b/authfs/fd_server/src/main.rs
@@ -209,6 +209,24 @@
}
}
}
+
+ fn resize(&self, id: i32, size: i64) -> BinderResult<()> {
+ match &self.get_file_config(id)? {
+ FdConfig::Readonly { .. } => Err(StatusCode::INVALID_OPERATION.into()),
+ FdConfig::ReadWrite(file) => {
+ if size < 0 {
+ return Err(new_binder_exception(
+ ExceptionCode::ILLEGAL_ARGUMENT,
+ "Invalid size to resize to",
+ ));
+ }
+ file.set_len(size as u64).map_err(|e| {
+ error!("resize: set_len error: {}", e);
+ Status::from(ERROR_IO)
+ })
+ }
+ }
+ }
}
fn read_into_buf(file: &File, max_size: usize, offset: u64) -> io::Result<Vec<u8>> {
diff --git a/authfs/src/file.rs b/authfs/src/file.rs
index 9ff8ea5..4b43786 100644
--- a/authfs/src/file.rs
+++ b/authfs/src/file.rs
@@ -49,4 +49,7 @@
}
Ok(())
}
+
+ /// Resizes the file to the new size.
+ fn resize(&self, size: u64) -> io::Result<()>;
}
diff --git a/authfs/src/file/remote_file.rs b/authfs/src/file/remote_file.rs
index 9d614f5..bd99893 100644
--- a/authfs/src/file/remote_file.rs
+++ b/authfs/src/file/remote_file.rs
@@ -118,6 +118,17 @@
.map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
Ok(size as usize) // within range because size is supposed to <= buf.len(), which is a usize
}
+
+ fn resize(&self, size: u64) -> io::Result<()> {
+ let size =
+ i64::try_from(size).map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
+ self.service
+ .lock()
+ .unwrap()
+ .resize(self.file_fd, size)
+ .map_err(|e| io::Error::new(io::ErrorKind::Other, e.get_description()))?;
+ Ok(())
+ }
}
impl ReadByChunk for RemoteFileEditor {
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
index dd1bd04..1842425 100644
--- a/authfs/src/fsverity/builder.rs
+++ b/authfs/src/fsverity/builder.rs
@@ -15,7 +15,7 @@
*/
use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
-use crate::common::CHUNK_SIZE;
+use crate::common::{divide_roundup, CHUNK_SIZE};
use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
const HASH_SIZE: usize = Sha256Hasher::HASH_SIZE;
@@ -55,6 +55,19 @@
self.file_size
}
+ /// Grows the `MerkleLeaves` to the new file size. To keep the consistency, if any new leaves
+ /// are added, the leaves/hashes are as if the extended content is all zero.
+ ///
+ /// However, when the change shrinks the leaf number, `MerkleLeaves` does not know if the hash
+ /// of the last chunk has changed, or what the new value should be. As the result, it is up to
+ /// the caller to fix the last leaf if needed.
+ pub fn resize(&mut self, new_file_size: usize) {
+ let new_file_size = new_file_size as u64;
+ let leaves_size = divide_roundup(new_file_size, CHUNK_SIZE);
+ self.leaves.resize(leaves_size as usize, Sha256Hasher::HASH_OF_4096_ZEROS);
+ self.file_size = new_file_size;
+ }
+
/// Updates the hash of the `index`-th leaf, and increase the size to `size_at_least` if the
/// current size is smaller.
pub fn update_hash(&mut self, index: usize, hash: &Sha256Hash, size_at_least: u64) {
@@ -196,6 +209,41 @@
Ok(())
}
+ #[test]
+ fn merkle_tree_grow_leaves() -> Result<()> {
+ let mut tree = MerkleLeaves::new();
+ tree.update_hash(0, &[42; HASH_SIZE], CHUNK_SIZE); // fake hash of a CHUNK_SIZE block
+
+ // Grows the leaves
+ tree.resize(4096 * 3 - 100);
+
+ assert!(tree.is_index_valid(0));
+ assert!(tree.is_index_valid(1));
+ assert!(tree.is_index_valid(2));
+ assert!(!tree.is_index_valid(3));
+ assert!(tree.is_consistent(1, &Sha256Hasher::HASH_OF_4096_ZEROS));
+ assert!(tree.is_consistent(2, &Sha256Hasher::HASH_OF_4096_ZEROS));
+ Ok(())
+ }
+
+ #[test]
+ fn merkle_tree_shrink_leaves() -> Result<()> {
+ let mut tree = MerkleLeaves::new();
+ tree.update_hash(0, &[42; HASH_SIZE], CHUNK_SIZE);
+ tree.update_hash(0, &[42; HASH_SIZE], CHUNK_SIZE * 3);
+
+ // Shrink the leaves
+ tree.resize(CHUNK_SIZE as usize * 2 - 100);
+
+ assert!(tree.is_index_valid(0));
+ assert!(tree.is_index_valid(1));
+ assert!(!tree.is_index_valid(2));
+ // The second chunk is a hole and full of zero. When shrunk, with zero padding, the hash
+ // happens to be consistent to a full-zero chunk.
+ assert!(tree.is_consistent(1, &Sha256Hasher::HASH_OF_4096_ZEROS));
+ Ok(())
+ }
+
fn generate_fsverity_digest_sequentially(test_data: &[u8]) -> Result<Sha256Hash> {
let mut tree = MerkleLeaves::new();
for (index, chunk) in test_data.chunks(CHUNK_SIZE as usize).enumerate() {
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
index 81ccd53..8468cc9 100644
--- a/authfs/src/fsverity/editor.rs
+++ b/authfs/src/fsverity/editor.rs
@@ -68,6 +68,13 @@
}
}
+fn debug_assert_usize_is_u64() {
+ // Since we don't need to support 32-bit CPU, make an assert to make conversion between
+ // u64 and usize easy below. Otherwise, we need to check `divide_roundup(offset + buf.len()
+ // <= usize::MAX` or handle `TryInto` errors.
+ debug_assert!(usize::MAX as u64 == u64::MAX, "Only 64-bit arch is supported");
+}
+
/// VerifiedFileEditor provides an integrity layer to an underlying read-writable file, which may
/// not be stored in a trusted environment. Only new, empty files are currently supported.
pub struct VerifiedFileEditor<F: ReadByChunk + RandomWrite> {
@@ -150,10 +157,7 @@
impl<F: ReadByChunk + RandomWrite> RandomWrite for VerifiedFileEditor<F> {
fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
- // Since we don't need to support 32-bit CPU, make an assert to make conversion between
- // u64 and usize easy below. Otherwise, we need to check `divide_roundup(offset + buf.len()
- // <= usize::MAX` or handle `TryInto` errors.
- debug_assert!(usize::MAX as u64 == u64::MAX, "Only 64-bit arch is supported");
+ debug_assert_usize_is_u64();
// The write range may not be well-aligned with the chunk boundary. There are various cases
// to deal with:
@@ -212,6 +216,42 @@
}
Ok(buf.len())
}
+
+ fn resize(&self, size: u64) -> io::Result<()> {
+ debug_assert_usize_is_u64();
+
+ let mut merkle_tree = self.merkle_tree.write().unwrap();
+ // In case when we are truncating the file, we may need to recalculate the hash of the (new)
+ // last chunk. Since the content is provided by the untrusted backend, we need to read the
+ // data back first, verify it, then override the truncated portion with 0-padding for
+ // hashing. As an optimization, we only need to read the data back if the new size isn't a
+ // multiple of CHUNK_SIZE (since the hash is already correct).
+ //
+ // The same thing does not need to happen when the size is growing. Since the new extended
+ // data is always 0, we can just resize the `MerkleLeaves`, where a new hash is always
+ // calculated from 4096 zeros.
+ if size < merkle_tree.file_size() && size % CHUNK_SIZE > 0 {
+ let new_tail_size = (size % CHUNK_SIZE) as usize;
+ let chunk_index = size / CHUNK_SIZE;
+ if new_tail_size > 0 {
+ let mut buf: ChunkBuffer = [0; CHUNK_SIZE as usize];
+ let s = self.read_chunk(chunk_index, &mut buf)?;
+ debug_assert!(new_tail_size <= s);
+
+ let zeros = vec![0; CHUNK_SIZE as usize - new_tail_size];
+ let new_hash = Sha256Hasher::new()?
+ .update(&buf[..new_tail_size])?
+ .update(&zeros)?
+ .finalize()?;
+ merkle_tree.update_hash(chunk_index as usize, &new_hash, size);
+ }
+ }
+
+ self.file.resize(size)?;
+ merkle_tree.resize(size as usize);
+
+ Ok(())
+ }
}
impl<F: ReadByChunk + RandomWrite> ReadByChunk for VerifiedFileEditor<F> {
@@ -253,6 +293,13 @@
self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(&buf);
Ok(buf.len())
}
+
+ fn resize(&self, size: u64) -> io::Result<()> {
+ let size: usize =
+ size.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+ self.data.borrow_mut().resize(size, 0);
+ Ok(())
+ }
}
impl ReadByChunk for InMemoryEditor {
@@ -470,6 +517,88 @@
Ok(())
}
+ #[test]
+ fn test_resize_to_same_size() -> Result<()> {
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
+
+ assert!(file.resize(2048).is_ok());
+ assert_eq!(file.size(), 2048);
+
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")
+ .as_slice()
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn test_resize_to_grow() -> Result<()> {
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
+
+ // Resize should grow with 0s.
+ assert!(file.resize(4096).is_ok());
+ assert_eq!(file.size(), 4096);
+
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("9e0e2745c21e4e74065240936d2047340d96a466680c3c9d177b82433e7a0bb1")
+ .as_slice()
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn test_resize_to_shrink() -> Result<()> {
+ let file = VerifiedFileEditor::new(InMemoryEditor::new());
+ assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
+
+ // Truncate.
+ file.resize(2048)?;
+ assert_eq!(file.size(), 2048);
+
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")
+ .as_slice()
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn test_resize_to_shrink_with_read_failure() -> Result<()> {
+ let mut writer = InMemoryEditor::new();
+ writer.fail_read = true;
+ let file = VerifiedFileEditor::new(writer);
+ assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
+
+ // A truncate needs a read back. If the read fail, the resize should fail.
+ assert!(file.resize(2048).is_err());
+ Ok(())
+ }
+
+ #[test]
+ fn test_resize_to_shirink_to_chunk_boundary() -> Result<()> {
+ let mut writer = InMemoryEditor::new();
+ writer.fail_read = true;
+ let file = VerifiedFileEditor::new(writer);
+ assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
+
+ // Truncate to a chunk boundary. A read error doesn't matter since we won't need to
+ // recalcuate the leaf hash.
+ file.resize(4096)?;
+ assert_eq!(file.size(), 4096);
+
+ assert_eq!(
+ file.calculate_fsverity_digest()?,
+ to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
+ .as_slice()
+ );
+ Ok(())
+ }
+
fn to_u8_vec(hex_str: &str) -> Vec<u8> {
assert!(hex_str.len() % 2 == 0);
(0..hex_str.len())
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index 0cd8b68..d2948c7 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -15,6 +15,7 @@
*/
use anyhow::Result;
+use log::{debug, warn};
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::ffi::CStr;
@@ -27,8 +28,8 @@
use std::time::Duration;
use fuse::filesystem::{
- Context, DirEntry, DirectoryIterator, Entry, FileSystem, FsOptions, ZeroCopyReader,
- ZeroCopyWriter,
+ Context, DirEntry, DirectoryIterator, Entry, FileSystem, FsOptions, SetattrValid,
+ ZeroCopyReader, ZeroCopyWriter,
};
use fuse::mount::MountOption;
@@ -325,6 +326,54 @@
_ => Err(io::Error::from_raw_os_error(libc::EBADF)),
}
}
+
+ fn setattr(
+ &self,
+ _ctx: Context,
+ inode: Inode,
+ attr: libc::stat64,
+ _handle: Option<Handle>,
+ valid: SetattrValid,
+ ) -> io::Result<(libc::stat64, Duration)> {
+ match self.get_file_config(&inode)? {
+ FileConfig::RemoteVerifiedNewFile { editor } => {
+ // Initialize the default stat.
+ let mut new_attr = create_stat(inode, editor.size(), FileMode::ReadWrite)?;
+ // `valid` indicates what fields in `attr` are valid. Update to return correctly.
+ if valid.contains(SetattrValid::SIZE) {
+ // st_size is i64, but the cast should be safe since kernel should not give a
+ // negative size.
+ debug_assert!(attr.st_size >= 0);
+ new_attr.st_size = attr.st_size;
+ editor.resize(attr.st_size as u64)?;
+ }
+
+ if valid.contains(SetattrValid::MODE) {
+ warn!("Changing st_mode is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.contains(SetattrValid::UID) {
+ warn!("Changing st_uid is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.contains(SetattrValid::GID) {
+ warn!("Changing st_gid is not currently supported");
+ return Err(io::Error::from_raw_os_error(libc::ENOSYS));
+ }
+ if valid.contains(SetattrValid::CTIME) {
+ debug!("Ignoring ctime change as authfs does not maintain timestamp currently");
+ }
+ if valid.intersects(SetattrValid::ATIME | SetattrValid::ATIME_NOW) {
+ debug!("Ignoring atime change as authfs does not maintain timestamp currently");
+ }
+ if valid.intersects(SetattrValid::MTIME | SetattrValid::MTIME_NOW) {
+ debug!("Ignoring mtime change as authfs does not maintain timestamp currently");
+ }
+ Ok((new_attr, DEFAULT_METADATA_TIMEOUT))
+ }
+ _ => Err(io::Error::from_raw_os_error(libc::EBADF)),
+ }
+ }
}
/// Mount and start the FUSE instance. This requires CAP_SYS_ADMIN.
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 60f603f..b30195a 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -82,6 +82,10 @@
/// Debug only. A read-only local file without integrity check. Can be multiple.
#[structopt(long, parse(try_from_str = parse_local_ro_file_unverified_ro_option))]
local_ro_file_unverified: Vec<OptionLocalRoFileUnverified>,
+
+ /// Enable debugging features.
+ #[structopt(long)]
+ debug: bool,
}
struct OptionRemoteRoFile {
@@ -294,6 +298,12 @@
fn main() -> Result<()> {
let args = Args::from_args();
+
+ let log_level = if args.debug { log::Level::Debug } else { log::Level::Info };
+ android_logger::init_once(
+ android_logger::Config::default().with_tag("authfs").with_min_level(log_level),
+ );
+
let file_pool = prepare_file_pool(&args)?;
fusefs::loop_forever(file_pool, &args.mount_point)?;
bail!("Unexpected exit after the handler loop")