authfs: Add MerkleLeaves for integrity bookkeeping
MerkleLeaves will be used by a "writer" for remembering the hashes of
written blocks for integrity checking. For example, when a file is
written from a trusted environment to an untrusted storage / remote,
MerkleLeaves allows the writer to verify the reads later with a
cryptographical strong hash.
Besides verification, if requested, the tree can grow from the leaves(!)
to generate the root hash and fs-verity digest.
- fsverity/builder.rs: implements MerkleLeaves
- fsverity/verifier.rs: renamed from fsverity.rs with minor changes
- fsverity/common.rs: common utils from the original fsverity.rs with
one addition error in the enum
- crypto.rs: more helper function / constant
Bug: 171279640
Test: atest authfs_device_test_src_lib
Change-Id: I76e5ebd81a2f2afa017e3c670774ccbb797766df
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
new file mode 100644
index 0000000..607d3a7
--- /dev/null
+++ b/authfs/src/fsverity/builder.rs
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
+use crate::common::CHUNK_SIZE;
+use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+
+const HASH_SIZE: usize = Sha256Hasher::HASH_SIZE;
+const HASH_PER_PAGE: usize = CHUNK_SIZE as usize / HASH_SIZE;
+
+/// MerkleLeaves can be used by the class' customer for bookkeeping integrity data for their bytes.
+/// It can also be used to generate the standard fs-verity digest for the source data.
+///
+/// It's in-memory because for the initial use cases, we don't need to read back an existing file,
+/// and only need to deal with new files. Also, considering that the output file won't be large at
+/// the moment, it is sufficient to simply keep the Merkle tree in memory in the trusted world. To
+/// further simplify the initial implementation, we only need to keep the leaf nodes in memory, and
+/// generate the tree / root hash when requested.
+pub struct MerkleLeaves {
+ leaves: Vec<Sha256Hash>,
+ file_size: u64,
+}
+
+fn hash_all_pages(source: &[Sha256Hash]) -> Result<Vec<Sha256Hash>, CryptoError> {
+ source
+ .chunks(HASH_PER_PAGE)
+ .map(|chunk| {
+ let padding_bytes = (HASH_PER_PAGE - chunk.len()) * HASH_SIZE;
+ Ok(Sha256Hasher::new()?
+ .update_from(chunk)?
+ .update(&vec![0u8; padding_bytes])?
+ .finalize()?)
+ })
+ .collect()
+}
+
+#[allow(dead_code)]
+impl MerkleLeaves {
+ /// Creates a `MerkleLeaves` instance with empty data.
+ pub fn new() -> Self {
+ Self { leaves: Vec::new(), file_size: 0 }
+ }
+
+ /// Updates the hash of the `index`-th leaf, and increase the size to `size_at_least` if the
+ /// current size is smaller.
+ pub fn update_hash(&mut self, index: usize, hash: &Sha256Hash, size_at_least: u64) {
+ // +1 since index is zero-based.
+ if self.leaves.len() < index + 1 {
+ // When resizing, fill in hash of zeros by default. This makes it easy to handle holes
+ // in a file.
+ self.leaves.resize(index + 1, Sha256Hasher::HASH_OF_4096_ZEROS);
+ }
+ self.leaves[index].clone_from_slice(hash);
+
+ if size_at_least > self.file_size {
+ self.file_size = size_at_least;
+ }
+ }
+
+ /// Returns whether `index` is within the bound of leaves.
+ pub fn is_index_valid(&self, index: usize) -> bool {
+ index < self.leaves.len()
+ }
+
+ /// Returns whether the `index`-th hash is consistent to `hash`.
+ pub fn is_consistent(&self, index: usize, hash: &Sha256Hash) -> bool {
+ if let Some(element) = self.leaves.get(index) {
+ element == hash
+ } else {
+ false
+ }
+ }
+
+ fn calculate_root_hash(&self) -> Result<Sha256Hash, FsverityError> {
+ match self.leaves.len() {
+ // Special cases per fs-verity digest definition.
+ 0 => {
+ debug_assert_eq!(self.file_size, 0);
+ Ok([0u8; HASH_SIZE])
+ }
+ 1 => {
+ debug_assert!(self.file_size <= CHUNK_SIZE && self.file_size > 0);
+ Ok(self.leaves[0])
+ }
+ n => {
+ debug_assert_eq!((self.file_size - 1) / CHUNK_SIZE, n as u64);
+ let size_for_equivalent = n as u64 * CHUNK_SIZE;
+ let level = merkle_tree_height(size_for_equivalent).unwrap(); // safe since n > 0
+
+ // `leaves` is owned and can't be the initial state below. Here we manually hash it
+ // first to avoid a copy and to get the type right.
+ let second_level = hash_all_pages(&self.leaves)?;
+ let hashes =
+ (1..=level).try_fold(second_level, |source, _| hash_all_pages(&source))?;
+ if hashes.len() != 1 {
+ Err(FsverityError::InvalidState)
+ } else {
+ Ok(hashes.into_iter().next().unwrap())
+ }
+ }
+ }
+ }
+
+ /// Returns the fs-verity digest based on the current tree and file size.
+ pub fn calculate_fsverity_digest(&self) -> Result<Sha256Hash, FsverityError> {
+ let root_hash = self.calculate_root_hash()?;
+ Ok(build_fsverity_digest(&root_hash, self.file_size)?)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ // Test data below can be generated by:
+ // $ perl -e 'print "\x{00}" x 6000' > foo
+ // $ perl -e 'print "\x{01}" x 5000' >> foo
+ // $ fsverity digest foo
+ use super::*;
+ use anyhow::Result;
+
+ #[test]
+ fn merkle_tree_empty_file() -> Result<()> {
+ assert_eq!(
+ to_u8_vec("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95"),
+ generate_fsverity_digest_sequentially(&Vec::new())?
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn merkle_tree_file_size_less_than_or_equal_to_4k() -> Result<()> {
+ // Test a file that contains 4096 '\01's.
+ assert_eq!(
+ to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af"),
+ generate_fsverity_digest_sequentially(&vec![1; 4096])?
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn merkle_tree_more_sizes() -> Result<()> {
+ // Test files that contains >4096 '\01's.
+
+ assert_eq!(
+ to_u8_vec("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52"),
+ generate_fsverity_digest_sequentially(&vec![1; 4097])?
+ );
+
+ assert_eq!(
+ to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d"),
+ generate_fsverity_digest_sequentially(&vec![1; 8192])?
+ );
+
+ // Test with max size that still fits in 2 levels.
+ assert_eq!(
+ to_u8_vec("26b7c190a34e19f420808ee7ec233b09fa6c34543b5a9d2950530114c205d14f"),
+ generate_fsverity_digest_sequentially(&vec![1; 524288])?
+ );
+
+ // Test with data that requires 3 levels.
+ assert_eq!(
+ to_u8_vec("316835d9be1c95b5cd55d07ae7965d651689efad186e26cbf680e40b683a3262"),
+ generate_fsverity_digest_sequentially(&vec![1; 524289])?
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn merkle_tree_non_sequential() -> Result<()> {
+ let mut tree = MerkleLeaves::new();
+ let hash = Sha256Hasher::new()?.update(&vec![1u8; CHUNK_SIZE as usize])?.finalize()?;
+
+ // Update hashes of 4 1-blocks.
+ tree.update_hash(1, &hash, CHUNK_SIZE * 2);
+ tree.update_hash(3, &hash, CHUNK_SIZE * 4);
+ tree.update_hash(0, &hash, CHUNK_SIZE);
+ tree.update_hash(2, &hash, CHUNK_SIZE * 3);
+
+ assert_eq!(
+ to_u8_vec("7d3c0d2e1dc54230b20ed875f5f3a4bd3f9873df601936b3ca8127d4db3548f3"),
+ tree.calculate_fsverity_digest()?
+ );
+ Ok(())
+ }
+
+ fn generate_fsverity_digest_sequentially(test_data: &[u8]) -> Result<Sha256Hash> {
+ let mut tree = MerkleLeaves::new();
+ for (index, chunk) in test_data.chunks(CHUNK_SIZE as usize).enumerate() {
+ let hash = Sha256Hasher::new()?
+ .update(&chunk)?
+ .update(&vec![0u8; CHUNK_SIZE as usize - chunk.len()])?
+ .finalize()?;
+
+ tree.update_hash(index, &hash, CHUNK_SIZE * index as u64 + chunk.len() as u64);
+ }
+ Ok(tree.calculate_fsverity_digest()?)
+ }
+
+ fn to_u8_vec(hex_str: &str) -> Vec<u8> {
+ assert!(hex_str.len() % 2 == 0);
+ (0..hex_str.len())
+ .step_by(2)
+ .map(|i| u8::from_str_radix(&hex_str[i..i + 2], 16).unwrap())
+ .collect()
+ }
+}
diff --git a/authfs/src/fsverity/common.rs b/authfs/src/fsverity/common.rs
new file mode 100644
index 0000000..8889f5c
--- /dev/null
+++ b/authfs/src/fsverity/common.rs
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::io;
+
+use thiserror::Error;
+
+use super::sys::{FS_VERITY_HASH_ALG_SHA256, FS_VERITY_LOG_BLOCKSIZE, FS_VERITY_VERSION};
+use crate::common::{divide_roundup, CHUNK_SIZE};
+use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+
+#[derive(Error, Debug)]
+pub enum FsverityError {
+ #[error("Cannot verify a signature")]
+ BadSignature,
+ #[error("Insufficient data, only got {0}")]
+ InsufficientData(usize),
+ #[error("Cannot verify a block")]
+ CannotVerify,
+ #[error("I/O error")]
+ Io(#[from] io::Error),
+ #[error("Crypto")]
+ UnexpectedCryptoError(#[from] CryptoError),
+ #[error("Invalid state")]
+ InvalidState,
+}
+
+fn log128_ceil(num: u64) -> Option<u64> {
+ match num {
+ 0 => None,
+ n => Some(divide_roundup(64 - (n - 1).leading_zeros() as u64, 7)),
+ }
+}
+
+/// Return the Merkle tree height for our tree configuration, or None if the size is 0.
+pub fn merkle_tree_height(data_size: u64) -> Option<u64> {
+ let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+ let hash_pages = divide_roundup(data_size, hashes_per_node * CHUNK_SIZE);
+ log128_ceil(hash_pages)
+}
+
+pub fn build_fsverity_digest(
+ root_hash: &Sha256Hash,
+ file_size: u64,
+) -> Result<Sha256Hash, CryptoError> {
+ // Little-endian byte representation of fsverity_descriptor from linux/fsverity.h
+ // Not FFI-ed as it seems easier to deal with the raw bytes manually.
+ Sha256Hasher::new()?
+ .update(&FS_VERITY_VERSION.to_le_bytes())? // version
+ .update(&FS_VERITY_HASH_ALG_SHA256.to_le_bytes())? // hash_algorithm
+ .update(&FS_VERITY_LOG_BLOCKSIZE.to_le_bytes())? // log_blocksize
+ .update(&0u8.to_le_bytes())? // salt_size
+ .update(&0u32.to_le_bytes())? // sig_size
+ .update(&file_size.to_le_bytes())? // data_size
+ .update(root_hash)? // root_hash, first 32 bytes
+ .update(&[0u8; 32])? // root_hash, last 32 bytes, always 0 because we are using sha256.
+ .update(&[0u8; 32])? // salt
+ .update(&[0u8; 32])? // reserved
+ .update(&[0u8; 32])? // reserved
+ .update(&[0u8; 32])? // reserved
+ .update(&[0u8; 32])? // reserved
+ .update(&[0u8; 16])? // reserved
+ .finalize()
+}
diff --git a/authfs/src/fsverity/sys.rs b/authfs/src/fsverity/sys.rs
new file mode 100644
index 0000000..b3222db
--- /dev/null
+++ b/authfs/src/fsverity/sys.rs
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/// Magic used in fs-verity digest
+pub const FS_VERITY_MAGIC: &[u8; 8] = b"FSVerity";
+
+/// fs-verity version that we are using
+pub const FS_VERITY_VERSION: u8 = 1;
+
+/// Hash algorithm to use from linux/fsverity.h
+pub const FS_VERITY_HASH_ALG_SHA256: u8 = 1;
+
+/// Log 2 of the block size (only 4096 is supported now)
+pub const FS_VERITY_LOG_BLOCKSIZE: u8 = 12;
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
new file mode 100644
index 0000000..fd108f5
--- /dev/null
+++ b/authfs/src/fsverity/verifier.rs
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use libc::EIO;
+use std::io;
+
+use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
+use super::sys::{FS_VERITY_HASH_ALG_SHA256, FS_VERITY_MAGIC};
+use crate::auth::Authenticator;
+use crate::common::{divide_roundup, CHUNK_SIZE};
+use crate::crypto::{CryptoError, Sha256Hasher};
+use crate::reader::ReadOnlyDataByChunk;
+
+const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
+
+// The size of `struct fsverity_formatted_digest` in Linux with SHA-256.
+const SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256: usize = 12 + Sha256Hasher::HASH_SIZE;
+
+type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
+
+fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
+ let padding_size = pad_to - chunk.len();
+ Sha256Hasher::new()?.update(&chunk)?.update(&ZEROS[..padding_size])?.finalize()
+}
+
+fn verity_check<T: ReadOnlyDataByChunk>(
+ chunk: &[u8],
+ chunk_index: u64,
+ file_size: u64,
+ merkle_tree: &T,
+) -> Result<HashBuffer, FsverityError> {
+ // The caller should not be able to produce a chunk at the first place if `file_size` is 0. The
+ // current implementation expects to crash when a `ReadOnlyDataByChunk` implementation reads
+ // beyond the file size, including empty file.
+ assert_ne!(file_size, 0);
+
+ let chunk_hash = hash_with_padding(&chunk, CHUNK_SIZE as usize)?;
+
+ fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
+ chunk_hash,
+ |actual_hash, result| {
+ let (merkle_chunk, hash_offset_in_chunk) = result?;
+ let expected_hash =
+ &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + Sha256Hasher::HASH_SIZE];
+ if actual_hash != expected_hash {
+ return Err(FsverityError::CannotVerify);
+ }
+ Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize)?)
+ },
+ )
+}
+
+/// Given a chunk index and the size of the file, returns an iterator that walks the Merkle tree
+/// from the leaf to the root. The iterator carries the slice of the chunk/node as well as the
+/// offset of the child node's hash. It is up to the iterator user to use the node and hash,
+/// e.g. for the actual verification.
+#[allow(clippy::needless_collect)]
+fn fsverity_walk<T: ReadOnlyDataByChunk>(
+ chunk_index: u64,
+ file_size: u64,
+ merkle_tree: &T,
+) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> {
+ let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+ debug_assert_eq!(hashes_per_node, 128u64);
+ let max_level = merkle_tree_height(file_size).expect("file should not be empty") as u32;
+ let root_to_leaf_steps = (0..=max_level)
+ .rev()
+ .map(|x| {
+ let leaves_per_hash = hashes_per_node.pow(x);
+ let leaves_size_per_hash = CHUNK_SIZE * leaves_per_hash;
+ let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
+ let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
+ let level_size = nodes_at_level * CHUNK_SIZE;
+ let offset_in_level = (chunk_index / leaves_per_hash) * Sha256Hasher::HASH_SIZE as u64;
+ (level_size, offset_in_level)
+ })
+ .scan(0, |level_offset, (level_size, offset_in_level)| {
+ let this_level_offset = *level_offset;
+ *level_offset += level_size;
+ let global_hash_offset = this_level_offset + offset_in_level;
+ Some(global_hash_offset)
+ })
+ .map(|global_hash_offset| {
+ let chunk_index = global_hash_offset / CHUNK_SIZE;
+ let hash_offset_in_chunk = (global_hash_offset % CHUNK_SIZE) as usize;
+ (chunk_index, hash_offset_in_chunk)
+ })
+ .collect::<Vec<_>>(); // Needs to collect first to be able to reverse below.
+
+ Ok(root_to_leaf_steps.into_iter().rev().map(move |(chunk_index, hash_offset_in_chunk)| {
+ let mut merkle_chunk = [0u8; 4096];
+ // read_chunk is supposed to return a full chunk, or an incomplete one at the end of the
+ // file. In the incomplete case, the hash is calculated with 0-padding to the chunk size.
+ // Therefore, we don't need to check the returned size here.
+ let _ = merkle_tree.read_chunk(chunk_index, &mut merkle_chunk)?;
+ Ok((merkle_chunk, hash_offset_in_chunk))
+ }))
+}
+
+fn build_fsverity_formatted_digest(
+ root_hash: &HashBuffer,
+ file_size: u64,
+) -> Result<[u8; SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256], CryptoError> {
+ let digest = build_fsverity_digest(root_hash, file_size)?;
+ // Little-endian byte representation of fsverity_formatted_digest from linux/fsverity.h
+ // Not FFI-ed as it seems easier to deal with the raw bytes manually.
+ let mut formatted_digest = [0u8; SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256];
+ formatted_digest[0..8].copy_from_slice(FS_VERITY_MAGIC);
+ formatted_digest[8..10].copy_from_slice(&(FS_VERITY_HASH_ALG_SHA256 as u16).to_le_bytes());
+ formatted_digest[10..12].copy_from_slice(&(Sha256Hasher::HASH_SIZE as u16).to_le_bytes());
+ formatted_digest[12..].copy_from_slice(&digest);
+ Ok(formatted_digest)
+}
+
+pub struct FsverityChunkedFileReader<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> {
+ chunked_file: F,
+ file_size: u64,
+ merkle_tree: M,
+ root_hash: HashBuffer,
+}
+
+impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> FsverityChunkedFileReader<F, M> {
+ pub fn new<A: Authenticator>(
+ authenticator: &A,
+ chunked_file: F,
+ file_size: u64,
+ sig: Vec<u8>,
+ merkle_tree: M,
+ ) -> Result<FsverityChunkedFileReader<F, M>, FsverityError> {
+ let mut buf = [0u8; CHUNK_SIZE as usize];
+ let size = merkle_tree.read_chunk(0, &mut buf)?;
+ if buf.len() != size {
+ return Err(FsverityError::InsufficientData(size));
+ }
+ let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
+ let formatted_digest = build_fsverity_formatted_digest(&root_hash, file_size)?;
+ let valid = authenticator.verify(&sig, &formatted_digest)?;
+ if valid {
+ Ok(FsverityChunkedFileReader { chunked_file, file_size, merkle_tree, root_hash })
+ } else {
+ Err(FsverityError::BadSignature)
+ }
+ }
+}
+
+impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> ReadOnlyDataByChunk
+ for FsverityChunkedFileReader<F, M>
+{
+ fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
+ debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
+ let size = self.chunked_file.read_chunk(chunk_index, buf)?;
+ let root_hash = verity_check(&buf[..size], chunk_index, self.file_size, &self.merkle_tree)
+ .map_err(|_| io::Error::from_raw_os_error(EIO))?;
+ if root_hash != self.root_hash {
+ Err(io::Error::from_raw_os_error(EIO))
+ } else {
+ Ok(size)
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::auth::FakeAuthenticator;
+ use crate::reader::{ChunkedFileReader, ReadOnlyDataByChunk};
+ use anyhow::Result;
+ use std::fs::File;
+ use std::io::Read;
+
+ type LocalFsverityChunkedFileReader =
+ FsverityChunkedFileReader<ChunkedFileReader, ChunkedFileReader>;
+
+ fn total_chunk_number(file_size: u64) -> u64 {
+ (file_size + 4095) / 4096
+ }
+
+ // Returns a reader with fs-verity verification and the file size.
+ fn new_reader_with_fsverity(
+ content_path: &str,
+ merkle_tree_path: &str,
+ signature_path: &str,
+ ) -> Result<(LocalFsverityChunkedFileReader, u64)> {
+ let file_reader = ChunkedFileReader::new(File::open(content_path)?)?;
+ let file_size = file_reader.len();
+ let merkle_tree = ChunkedFileReader::new(File::open(merkle_tree_path)?)?;
+ let mut sig = Vec::new();
+ let _ = File::open(signature_path)?.read_to_end(&mut sig)?;
+ let authenticator = FakeAuthenticator::always_succeed();
+ Ok((
+ FsverityChunkedFileReader::new(
+ &authenticator,
+ file_reader,
+ file_size,
+ sig,
+ merkle_tree,
+ )?,
+ file_size,
+ ))
+ }
+
+ #[test]
+ fn fsverity_verify_full_read_4k() -> Result<()> {
+ let (file_reader, file_size) = new_reader_with_fsverity(
+ "testdata/input.4k",
+ "testdata/input.4k.merkle_dump",
+ "testdata/input.4k.fsv_sig",
+ )?;
+
+ for i in 0..total_chunk_number(file_size) {
+ let mut buf = [0u8; 4096];
+ assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+ }
+ Ok(())
+ }
+
+ #[test]
+ fn fsverity_verify_full_read_4k1() -> Result<()> {
+ let (file_reader, file_size) = new_reader_with_fsverity(
+ "testdata/input.4k1",
+ "testdata/input.4k1.merkle_dump",
+ "testdata/input.4k1.fsv_sig",
+ )?;
+
+ for i in 0..total_chunk_number(file_size) {
+ let mut buf = [0u8; 4096];
+ assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+ }
+ Ok(())
+ }
+
+ #[test]
+ fn fsverity_verify_full_read_4m() -> Result<()> {
+ let (file_reader, file_size) = new_reader_with_fsverity(
+ "testdata/input.4m",
+ "testdata/input.4m.merkle_dump",
+ "testdata/input.4m.fsv_sig",
+ )?;
+
+ for i in 0..total_chunk_number(file_size) {
+ let mut buf = [0u8; 4096];
+ assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+ }
+ Ok(())
+ }
+
+ #[test]
+ fn fsverity_verify_bad_merkle_tree() -> Result<()> {
+ let (file_reader, _) = new_reader_with_fsverity(
+ "testdata/input.4m",
+ "testdata/input.4m.merkle_dump.bad", // First leaf node is corrupted.
+ "testdata/input.4m.fsv_sig",
+ )?;
+
+ // A lowest broken node (a 4K chunk that contains 128 sha256 hashes) will fail the read
+ // failure of the underlying chunks, but not before or after.
+ let mut buf = [0u8; 4096];
+ let num_hashes = 4096 / 32;
+ let last_index = num_hashes;
+ for i in 0..last_index {
+ assert!(file_reader.read_chunk(i, &mut buf[..]).is_err());
+ }
+ assert!(file_reader.read_chunk(last_index, &mut buf[..]).is_ok());
+ Ok(())
+ }
+
+ #[test]
+ fn invalid_signature() -> Result<()> {
+ let authenticator = FakeAuthenticator::always_fail();
+ let file_reader = ChunkedFileReader::new(File::open("testdata/input.4m")?)?;
+ let file_size = file_reader.len();
+ let merkle_tree = ChunkedFileReader::new(File::open("testdata/input.4m.merkle_dump")?)?;
+ let sig = include_bytes!("../../testdata/input.4m.fsv_sig").to_vec();
+ assert!(FsverityChunkedFileReader::new(
+ &authenticator,
+ file_reader,
+ file_size,
+ sig,
+ merkle_tree
+ )
+ .is_err());
+ Ok(())
+ }
+}