authfs: refine/rename trait ReadOnlyDataByChunk
- ReadOnlyDataByChunk is renamed to ReadByChunk
- `fn read_chunk` now takes a fixed-size array of chunk size
- Behavior change:
1. Reading beyond file size now should not crash, and should return 0.
2. `read_chunk` should return a full chunk unless it's the last
(wasn't clearly defined previously).
Bug: 181674212
Bug: 182173887
Test: atest
Change-Id: I7017b5ed986d0bfc594da58a1ed9f59ac555643e
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
index 543e9ac..81ccd53 100644
--- a/authfs/src/fsverity/editor.rs
+++ b/authfs/src/fsverity/editor.rs
@@ -58,7 +58,7 @@
use super::builder::MerkleLeaves;
use crate::common::{ChunkedSizeIter, CHUNK_SIZE};
use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
-use crate::file::{RandomWrite, ReadOnlyDataByChunk};
+use crate::file::{ChunkBuffer, RandomWrite, ReadByChunk};
// Implement the conversion from `CryptoError` to `io::Error` just to avoid manual error type
// mapping below.
@@ -70,12 +70,12 @@
/// VerifiedFileEditor provides an integrity layer to an underlying read-writable file, which may
/// not be stored in a trusted environment. Only new, empty files are currently supported.
-pub struct VerifiedFileEditor<F: ReadOnlyDataByChunk + RandomWrite> {
+pub struct VerifiedFileEditor<F: ReadByChunk + RandomWrite> {
file: F,
merkle_tree: Arc<RwLock<MerkleLeaves>>,
}
-impl<F: ReadOnlyDataByChunk + RandomWrite> VerifiedFileEditor<F> {
+impl<F: ReadByChunk + RandomWrite> VerifiedFileEditor<F> {
/// Wraps a supposedly new file for integrity protection.
pub fn new(file: F) -> Self {
Self { file, merkle_tree: Arc::new(RwLock::new(MerkleLeaves::new())) }
@@ -148,7 +148,7 @@
}
}
-impl<F: ReadOnlyDataByChunk + RandomWrite> RandomWrite for VerifiedFileEditor<F> {
+impl<F: ReadByChunk + RandomWrite> RandomWrite for VerifiedFileEditor<F> {
fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
// Since we don't need to support 32-bit CPU, make an assert to make conversion between
// u64 and usize easy below. Otherwise, we need to check `divide_roundup(offset + buf.len()
@@ -214,8 +214,8 @@
}
}
-impl<F: ReadOnlyDataByChunk + RandomWrite> ReadOnlyDataByChunk for VerifiedFileEditor<F> {
- fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
+impl<F: ReadByChunk + RandomWrite> ReadByChunk for VerifiedFileEditor<F> {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
self.file.read_chunk(chunk_index, buf)
}
}
@@ -255,10 +255,8 @@
}
}
- impl ReadOnlyDataByChunk for InMemoryEditor {
- fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
- debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
-
+ impl ReadByChunk for InMemoryEditor {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
if self.fail_read {
return Err(io::Error::new(io::ErrorKind::Other, "test!"));
}
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index 4af360f..13de42a 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -22,7 +22,7 @@
use crate::auth::Authenticator;
use crate::common::{divide_roundup, CHUNK_SIZE};
use crate::crypto::{CryptoError, Sha256Hasher};
-use crate::file::ReadOnlyDataByChunk;
+use crate::file::{ChunkBuffer, ReadByChunk};
const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
@@ -36,14 +36,14 @@
Sha256Hasher::new()?.update(&chunk)?.update(&ZEROS[..padding_size])?.finalize()
}
-fn verity_check<T: ReadOnlyDataByChunk>(
+fn verity_check<T: ReadByChunk>(
chunk: &[u8],
chunk_index: u64,
file_size: u64,
merkle_tree: &T,
) -> Result<HashBuffer, FsverityError> {
// The caller should not be able to produce a chunk at the first place if `file_size` is 0. The
- // current implementation expects to crash when a `ReadOnlyDataByChunk` implementation reads
+ // current implementation expects to crash when a `ReadByChunk` implementation reads
// beyond the file size, including empty file.
assert_ne!(file_size, 0);
@@ -68,7 +68,7 @@
/// offset of the child node's hash. It is up to the iterator user to use the node and hash,
/// e.g. for the actual verification.
#[allow(clippy::needless_collect)]
-fn fsverity_walk<T: ReadOnlyDataByChunk>(
+fn fsverity_walk<T: ReadByChunk>(
chunk_index: u64,
file_size: u64,
merkle_tree: &T,
@@ -125,14 +125,14 @@
Ok(formatted_digest)
}
-pub struct VerifiedFileReader<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> {
+pub struct VerifiedFileReader<F: ReadByChunk, M: ReadByChunk> {
chunked_file: F,
file_size: u64,
merkle_tree: M,
root_hash: HashBuffer,
}
-impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> VerifiedFileReader<F, M> {
+impl<F: ReadByChunk, M: ReadByChunk> VerifiedFileReader<F, M> {
pub fn new<A: Authenticator>(
authenticator: &A,
chunked_file: F,
@@ -156,11 +156,8 @@
}
}
-impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> ReadOnlyDataByChunk
- for VerifiedFileReader<F, M>
-{
- fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
- debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
+impl<F: ReadByChunk, M: ReadByChunk> ReadByChunk for VerifiedFileReader<F, M> {
+ fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
let size = self.chunked_file.read_chunk(chunk_index, buf)?;
let root_hash = verity_check(&buf[..size], chunk_index, self.file_size, &self.merkle_tree)
.map_err(|_| io::Error::from_raw_os_error(EIO))?;
@@ -176,7 +173,7 @@
mod tests {
use super::*;
use crate::auth::FakeAuthenticator;
- use crate::file::{LocalFileReader, ReadOnlyDataByChunk};
+ use crate::file::{LocalFileReader, ReadByChunk};
use anyhow::Result;
use std::fs::{self, File};
use std::io::Read;
@@ -215,7 +212,7 @@
for i in 0..total_chunk_number(file_size) {
let mut buf = [0u8; 4096];
- assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+ assert!(file_reader.read_chunk(i, &mut buf).is_ok());
}
Ok(())
}
@@ -230,7 +227,7 @@
for i in 0..total_chunk_number(file_size) {
let mut buf = [0u8; 4096];
- assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+ assert!(file_reader.read_chunk(i, &mut buf).is_ok());
}
Ok(())
}
@@ -245,7 +242,7 @@
for i in 0..total_chunk_number(file_size) {
let mut buf = [0u8; 4096];
- assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+ assert!(file_reader.read_chunk(i, &mut buf).is_ok());
}
Ok(())
}
@@ -264,9 +261,9 @@
let num_hashes = 4096 / 32;
let last_index = num_hashes;
for i in 0..last_index {
- assert!(file_reader.read_chunk(i, &mut buf[..]).is_err());
+ assert!(file_reader.read_chunk(i, &mut buf).is_err());
}
- assert!(file_reader.read_chunk(last_index, &mut buf[..]).is_ok());
+ assert!(file_reader.read_chunk(last_index, &mut buf).is_ok());
Ok(())
}