Read Merkle tree once per file
EagerChunkReader is added to read the wrapper Merkle tree reader. Also
add a function to calculate the tree size, in order to know how much
bytes to read.
Separately, add a check in fd_server to prevent crash when the read size
is 0.
Bug: 182827266
Test: atest AuthFsHostTest authfs_device_test_src_lib
Change-Id: Ibff8c9ac091f1449aec8f4a52cd148e6f41d7b33
diff --git a/authfs/src/fsverity/common.rs b/authfs/src/fsverity/common.rs
index 8889f5c..83e8ae4 100644
--- a/authfs/src/fsverity/common.rs
+++ b/authfs/src/fsverity/common.rs
@@ -52,6 +52,18 @@
log128_ceil(hash_pages)
}
+/// Returns the size of Merkle tree for `data_size` bytes amount of data.
+pub fn merkle_tree_size(mut data_size: u64) -> u64 {
+ let mut total = 0;
+ while data_size > CHUNK_SIZE {
+ let hash_size = divide_roundup(data_size, CHUNK_SIZE) * Sha256Hasher::HASH_SIZE as u64;
+ let hash_storage_size = divide_roundup(hash_size, CHUNK_SIZE) * CHUNK_SIZE;
+ total += hash_storage_size;
+ data_size = hash_storage_size;
+ }
+ total
+}
+
pub fn build_fsverity_digest(
root_hash: &Sha256Hash,
file_size: u64,
@@ -75,3 +87,22 @@
.update(&[0u8; 16])? // reserved
.finalize()
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_merkle_tree_size() {
+ // To produce groundtruth:
+ // dd if=/dev/zero of=zeros bs=1 count=524289 && \
+ // fsverity digest --out-merkle-tree=tree zeros && \
+ // du -b tree
+ assert_eq!(merkle_tree_size(0), 0);
+ assert_eq!(merkle_tree_size(1), 0);
+ assert_eq!(merkle_tree_size(4096), 0);
+ assert_eq!(merkle_tree_size(4097), 4096);
+ assert_eq!(merkle_tree_size(524288), 4096);
+ assert_eq!(merkle_tree_size(524289), 12288);
+ }
+}
diff --git a/authfs/src/fsverity/metadata/metadata.rs b/authfs/src/fsverity/metadata/metadata.rs
index 0092bee..073e044 100644
--- a/authfs/src/fsverity/metadata/metadata.rs
+++ b/authfs/src/fsverity/metadata/metadata.rs
@@ -40,12 +40,17 @@
/// Read the raw Merkle tree from the metadata, if it exists. The API semantics is similar to a
/// regular pread(2), and may not return full requested buffer.
pub fn read_merkle_tree(&self, offset: u64, buf: &mut [u8]) -> io::Result<usize> {
+ let file_size = self.metadata_file.metadata()?.size();
let start = self.merkle_tree_offset + offset;
- let end = min(self.metadata_file.metadata()?.size(), start + buf.len() as u64);
+ let end = min(file_size, start + buf.len() as u64);
let read_size = (end - start) as usize;
debug_assert!(read_size <= buf.len());
- self.metadata_file.read_exact_at(&mut buf[..read_size], start)?;
- Ok(read_size)
+ if read_size == 0 {
+ Ok(0)
+ } else {
+ self.metadata_file.read_exact_at(&mut buf[..read_size], start)?;
+ Ok(read_size)
+ }
}
}
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index 1add37a..17a0a2a 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -49,6 +49,12 @@
let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize)?;
+ // When the file is smaller or equal to CHUNK_SIZE, the root of Merkle tree is defined as the
+ // hash of the file content, plus padding.
+ if file_size <= CHUNK_SIZE {
+ return Ok(chunk_hash);
+ }
+
fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
chunk_hash,
|actual_hash, result| {
@@ -141,9 +147,14 @@
merkle_tree: M,
) -> Result<VerifiedFileReader<F, M>, FsverityError> {
let mut buf = [0u8; CHUNK_SIZE as usize];
- let size = merkle_tree.read_chunk(0, &mut buf)?;
- if buf.len() != size {
- return Err(FsverityError::InsufficientData(size));
+ if file_size <= CHUNK_SIZE {
+ let _size = chunked_file.read_chunk(0, &mut buf)?;
+ // The rest of buffer is 0-padded.
+ } else {
+ let size = merkle_tree.read_chunk(0, &mut buf)?;
+ if buf.len() != size {
+ return Err(FsverityError::InsufficientData(size));
+ }
}
let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
let formatted_digest = build_fsverity_formatted_digest(&root_hash, file_size)?;