Read Merkle tree once per file
EagerChunkReader is added to read the wrapper Merkle tree reader. Also
add a function to calculate the tree size, in order to know how much
bytes to read.
Separately, add a check in fd_server to prevent crash when the read size
is 0.
Bug: 182827266
Test: atest AuthFsHostTest authfs_device_test_src_lib
Change-Id: Ibff8c9ac091f1449aec8f4a52cd148e6f41d7b33
diff --git a/authfs/src/file.rs b/authfs/src/file.rs
index 9bbf3ef..44e60d8 100644
--- a/authfs/src/file.rs
+++ b/authfs/src/file.rs
@@ -8,10 +8,11 @@
use binder::unstable_api::{new_spibinder, AIBinder};
use binder::FromIBinder;
+use std::convert::TryFrom;
use std::io;
use std::path::{Path, MAIN_SEPARATOR};
-use crate::common::CHUNK_SIZE;
+use crate::common::{divide_roundup, CHUNK_SIZE};
use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService::IVirtFdService;
use authfs_aidl_interface::binder::{Status, Strong};
@@ -83,3 +84,39 @@
Err(io::Error::from_raw_os_error(libc::EINVAL))
}
}
+
+pub struct EagerChunkReader {
+ buffer: Vec<u8>,
+}
+
+impl EagerChunkReader {
+ pub fn new<F: ReadByChunk>(chunked_file: F, file_size: u64) -> io::Result<EagerChunkReader> {
+ let last_index = divide_roundup(file_size, CHUNK_SIZE);
+ let file_size = usize::try_from(file_size).unwrap();
+ let mut buffer = Vec::with_capacity(file_size);
+ let mut chunk_buffer = [0; CHUNK_SIZE as usize];
+ for index in 0..last_index {
+ let size = chunked_file.read_chunk(index, &mut chunk_buffer)?;
+ buffer.extend_from_slice(&chunk_buffer[..size]);
+ }
+ if buffer.len() < file_size {
+ Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("Insufficient data size ({} < {})", buffer.len(), file_size),
+ ))
+ } else {
+ Ok(EagerChunkReader { buffer })
+ }
+ }
+}
+
+impl ReadByChunk for EagerChunkReader {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
+ if let Some(chunk) = &self.buffer.chunks(CHUNK_SIZE as usize).nth(chunk_index as usize) {
+ buf[..chunk.len()].copy_from_slice(chunk);
+ Ok(chunk.len())
+ } else {
+ Ok(0) // Read beyond EOF is normal
+ }
+ }
+}
diff --git a/authfs/src/fsverity.rs b/authfs/src/fsverity.rs
index 1515574..61ae928 100644
--- a/authfs/src/fsverity.rs
+++ b/authfs/src/fsverity.rs
@@ -20,5 +20,6 @@
mod sys;
mod verifier;
+pub use common::merkle_tree_size;
pub use editor::VerifiedFileEditor;
pub use verifier::VerifiedFileReader;
diff --git a/authfs/src/fsverity/common.rs b/authfs/src/fsverity/common.rs
index 8889f5c..83e8ae4 100644
--- a/authfs/src/fsverity/common.rs
+++ b/authfs/src/fsverity/common.rs
@@ -52,6 +52,18 @@
log128_ceil(hash_pages)
}
+/// Returns the size of Merkle tree for `data_size` bytes amount of data.
+pub fn merkle_tree_size(mut data_size: u64) -> u64 {
+ let mut total = 0;
+ while data_size > CHUNK_SIZE {
+ let hash_size = divide_roundup(data_size, CHUNK_SIZE) * Sha256Hasher::HASH_SIZE as u64;
+ let hash_storage_size = divide_roundup(hash_size, CHUNK_SIZE) * CHUNK_SIZE;
+ total += hash_storage_size;
+ data_size = hash_storage_size;
+ }
+ total
+}
+
pub fn build_fsverity_digest(
root_hash: &Sha256Hash,
file_size: u64,
@@ -75,3 +87,22 @@
.update(&[0u8; 16])? // reserved
.finalize()
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_merkle_tree_size() {
+ // To produce groundtruth:
+ // dd if=/dev/zero of=zeros bs=1 count=524289 && \
+ // fsverity digest --out-merkle-tree=tree zeros && \
+ // du -b tree
+ assert_eq!(merkle_tree_size(0), 0);
+ assert_eq!(merkle_tree_size(1), 0);
+ assert_eq!(merkle_tree_size(4096), 0);
+ assert_eq!(merkle_tree_size(4097), 4096);
+ assert_eq!(merkle_tree_size(524288), 4096);
+ assert_eq!(merkle_tree_size(524289), 12288);
+ }
+}
diff --git a/authfs/src/fsverity/metadata/metadata.rs b/authfs/src/fsverity/metadata/metadata.rs
index 0092bee..073e044 100644
--- a/authfs/src/fsverity/metadata/metadata.rs
+++ b/authfs/src/fsverity/metadata/metadata.rs
@@ -40,12 +40,17 @@
/// Read the raw Merkle tree from the metadata, if it exists. The API semantics is similar to a
/// regular pread(2), and may not return full requested buffer.
pub fn read_merkle_tree(&self, offset: u64, buf: &mut [u8]) -> io::Result<usize> {
+ let file_size = self.metadata_file.metadata()?.size();
let start = self.merkle_tree_offset + offset;
- let end = min(self.metadata_file.metadata()?.size(), start + buf.len() as u64);
+ let end = min(file_size, start + buf.len() as u64);
let read_size = (end - start) as usize;
debug_assert!(read_size <= buf.len());
- self.metadata_file.read_exact_at(&mut buf[..read_size], start)?;
- Ok(read_size)
+ if read_size == 0 {
+ Ok(0)
+ } else {
+ self.metadata_file.read_exact_at(&mut buf[..read_size], start)?;
+ Ok(read_size)
+ }
}
}
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index 1add37a..17a0a2a 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -49,6 +49,12 @@
let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize)?;
+ // When the file is smaller or equal to CHUNK_SIZE, the root of Merkle tree is defined as the
+ // hash of the file content, plus padding.
+ if file_size <= CHUNK_SIZE {
+ return Ok(chunk_hash);
+ }
+
fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
chunk_hash,
|actual_hash, result| {
@@ -141,9 +147,14 @@
merkle_tree: M,
) -> Result<VerifiedFileReader<F, M>, FsverityError> {
let mut buf = [0u8; CHUNK_SIZE as usize];
- let size = merkle_tree.read_chunk(0, &mut buf)?;
- if buf.len() != size {
- return Err(FsverityError::InsufficientData(size));
+ if file_size <= CHUNK_SIZE {
+ let _size = chunked_file.read_chunk(0, &mut buf)?;
+ // The rest of buffer is 0-padded.
+ } else {
+ let size = merkle_tree.read_chunk(0, &mut buf)?;
+ if buf.len() != size {
+ return Err(FsverityError::InsufficientData(size));
+ }
}
let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
let formatted_digest = build_fsverity_formatted_digest(&root_hash, file_size)?;
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index cbd24a9..03f832d 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -37,8 +37,8 @@
use crate::common::{divide_roundup, ChunkedSizeIter, CHUNK_SIZE};
use crate::file::{
- validate_basename, Attr, InMemoryDir, RandomWrite, ReadByChunk, RemoteDirEditor,
- RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader,
+ validate_basename, Attr, EagerChunkReader, InMemoryDir, RandomWrite, ReadByChunk,
+ RemoteDirEditor, RemoteFileEditor, RemoteFileReader,
};
use crate::fsstat::RemoteFsStatsReader;
use crate::fsverity::{VerifiedFileEditor, VerifiedFileReader};
@@ -59,7 +59,7 @@
/// A file type that is verified against fs-verity signature (thus read-only). The file is
/// served from a remote server.
VerifiedReadonly {
- reader: VerifiedFileReader<RemoteFileReader, RemoteMerkleTreeReader>,
+ reader: VerifiedFileReader<RemoteFileReader, EagerChunkReader>,
file_size: u64,
},
/// A file type that is a read-only passthrough from a file on a remote server.
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 0fa3db7..3561b8f 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -45,10 +45,11 @@
use auth::FakeAuthenticator;
use file::{
- Attr, InMemoryDir, RemoteDirEditor, RemoteFileEditor, RemoteFileReader, RemoteMerkleTreeReader,
+ Attr, EagerChunkReader, InMemoryDir, RemoteDirEditor, RemoteFileEditor, RemoteFileReader,
+ RemoteMerkleTreeReader,
};
use fsstat::RemoteFsStatsReader;
-use fsverity::{VerifiedFileEditor, VerifiedFileReader};
+use fsverity::{merkle_tree_size, VerifiedFileEditor, VerifiedFileReader};
use fsverity_digests_proto::fsverity_digests::FSVerityDigests;
use fusefs::{AuthFs, AuthFsEntry};
@@ -176,7 +177,10 @@
RemoteFileReader::new(service.clone(), remote_fd),
file_size,
signature.as_deref(),
- RemoteMerkleTreeReader::new(service.clone(), remote_fd),
+ EagerChunkReader::new(
+ RemoteMerkleTreeReader::new(service.clone(), remote_fd),
+ merkle_tree_size(file_size),
+ )?,
)?,
file_size,
})