Merge changes I76e5ebd8,I8e47acb1

* changes:
  authfs: Add MerkleLeaves for integrity bookkeeping
  authfs: Replace a trait bound const w/ a simple const
diff --git a/authfs/src/common.rs b/authfs/src/common.rs
index 2220ae7..522397f 100644
--- a/authfs/src/common.rs
+++ b/authfs/src/common.rs
@@ -14,7 +14,8 @@
  * limitations under the License.
  */
 
-pub const COMMON_PAGE_SIZE: u64 = 4096;
+/// Common block and page size in Linux.
+pub const CHUNK_SIZE: u64 = 4096;
 
 pub fn divide_roundup(dividend: u64, divisor: u64) -> u64 {
     (dividend + divisor - 1) / divisor
diff --git a/authfs/src/crypto.rs b/authfs/src/crypto.rs
index 8b8905c..672dfb6 100644
--- a/authfs/src/crypto.rs
+++ b/authfs/src/crypto.rs
@@ -26,6 +26,8 @@
 
 use authfs_crypto_bindgen::{SHA256_Final, SHA256_Init, SHA256_Update, SHA256_CTX};
 
+pub type Sha256Hash = [u8; Sha256Hasher::HASH_SIZE];
+
 pub struct Sha256Hasher {
     ctx: SHA256_CTX,
 }
@@ -33,6 +35,12 @@
 impl Sha256Hasher {
     pub const HASH_SIZE: usize = 32;
 
+    pub const HASH_OF_4096_ZEROS: [u8; Self::HASH_SIZE] = [
+        0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9, 0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b,
+        0x02, 0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a, 0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89,
+        0x2c, 0xa7,
+    ];
+
     pub fn new() -> Result<Sha256Hasher, CryptoError> {
         // Safe assuming the crypto FFI should initialize the uninitialized `ctx`, which is
         // currently a pure data struct.
@@ -58,6 +66,17 @@
         }
     }
 
+    pub fn update_from<I, T>(&mut self, iter: I) -> Result<&mut Self, CryptoError>
+    where
+        I: IntoIterator<Item = T>,
+        T: AsRef<[u8]>,
+    {
+        for data in iter {
+            self.update(data.as_ref())?;
+        }
+        Ok(self)
+    }
+
     pub fn finalize(&mut self) -> Result<[u8; Self::HASH_SIZE], CryptoError> {
         let mut md = [0u8; Self::HASH_SIZE];
         // Safe assuming the crypto FFI will not touch beyond `ctx` as pure data.
@@ -93,4 +112,11 @@
         assert_eq!(s, "039058c6f2c0cb492c533b0a4d14ef77cc0f78abccced5287d84a1a2011cfb81");
         Ok(())
     }
+
+    #[test]
+    fn sha256_of_4096_zeros() -> Result<(), CryptoError> {
+        let hash = Sha256Hasher::new()?.update(&[0u8; 4096])?.finalize()?;
+        assert_eq!(hash, Sha256Hasher::HASH_OF_4096_ZEROS);
+        Ok(())
+    }
 }
diff --git a/authfs/src/fsverity.rs b/authfs/src/fsverity.rs
index 306c9d9..37d96c1 100644
--- a/authfs/src/fsverity.rs
+++ b/authfs/src/fsverity.rs
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,319 +14,10 @@
  * limitations under the License.
  */
 
-use libc::EIO;
-use std::io;
-use thiserror::Error;
+mod builder;
+mod common;
+mod sys;
+mod verifier;
 
-use crate::auth::Authenticator;
-use crate::common::divide_roundup;
-use crate::crypto::{CryptoError, Sha256Hasher};
-use crate::reader::ReadOnlyDataByChunk;
-
-const ZEROS: [u8; 4096] = [0u8; 4096];
-
-// The size of `struct fsverity_formatted_digest` in Linux with SHA-256.
-const SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256: usize = 12 + Sha256Hasher::HASH_SIZE;
-
-#[derive(Error, Debug)]
-pub enum FsverityError {
-    #[error("Cannot verify a signature")]
-    BadSignature,
-    #[error("Insufficient data, only got {0}")]
-    InsufficientData(usize),
-    #[error("Cannot verify a block")]
-    CannotVerify,
-    #[error("I/O error")]
-    Io(#[from] io::Error),
-    #[error("Crypto")]
-    UnexpectedCryptoError(#[from] CryptoError),
-}
-
-type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
-
-fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
-    let padding_size = pad_to - chunk.len();
-    Sha256Hasher::new()?.update(&chunk)?.update(&ZEROS[..padding_size])?.finalize()
-}
-
-fn verity_check<T: ReadOnlyDataByChunk>(
-    chunk: &[u8],
-    chunk_index: u64,
-    file_size: u64,
-    merkle_tree: &T,
-) -> Result<HashBuffer, FsverityError> {
-    // The caller should not be able to produce a chunk at the first place if `file_size` is 0. The
-    // current implementation expects to crash when a `ReadOnlyDataByChunk` implementation reads
-    // beyone the file size, including empty file.
-    assert_ne!(file_size, 0);
-
-    let chunk_hash = hash_with_padding(&chunk, T::CHUNK_SIZE as usize)?;
-
-    fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
-        chunk_hash,
-        |actual_hash, result| {
-            let (merkle_chunk, hash_offset_in_chunk) = result?;
-            let expected_hash =
-                &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + Sha256Hasher::HASH_SIZE];
-            if actual_hash != expected_hash {
-                return Err(FsverityError::CannotVerify);
-            }
-            Ok(hash_with_padding(&merkle_chunk, T::CHUNK_SIZE as usize)?)
-        },
-    )
-}
-
-fn log128_ceil(num: u64) -> Option<u64> {
-    match num {
-        0 => None,
-        n => Some(divide_roundup(64 - (n - 1).leading_zeros() as u64, 7)),
-    }
-}
-
-/// Given a chunk index and the size of the file, returns an iterator that walks the Merkle tree
-/// from the leaf to the root. The iterator carries the slice of the chunk/node as well as the
-/// offset of the child node's hash. It is up to the iterator user to use the node and hash,
-/// e.g. for the actual verification.
-#[allow(clippy::needless_collect)]
-fn fsverity_walk<T: ReadOnlyDataByChunk>(
-    chunk_index: u64,
-    file_size: u64,
-    merkle_tree: &T,
-) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> {
-    let hashes_per_node = T::CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
-    let hash_pages = divide_roundup(file_size, hashes_per_node * T::CHUNK_SIZE);
-    debug_assert_eq!(hashes_per_node, 128u64);
-    let max_level = log128_ceil(hash_pages).expect("file should not be empty") as u32;
-    let root_to_leaf_steps = (0..=max_level)
-        .rev()
-        .map(|x| {
-            let leaves_per_hash = hashes_per_node.pow(x);
-            let leaves_size_per_hash = T::CHUNK_SIZE * leaves_per_hash;
-            let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
-            let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
-            let level_size = nodes_at_level * T::CHUNK_SIZE;
-            let offset_in_level = (chunk_index / leaves_per_hash) * Sha256Hasher::HASH_SIZE as u64;
-            (level_size, offset_in_level)
-        })
-        .scan(0, |level_offset, (level_size, offset_in_level)| {
-            let this_level_offset = *level_offset;
-            *level_offset += level_size;
-            let global_hash_offset = this_level_offset + offset_in_level;
-            Some(global_hash_offset)
-        })
-        .map(|global_hash_offset| {
-            let chunk_index = global_hash_offset / T::CHUNK_SIZE;
-            let hash_offset_in_chunk = (global_hash_offset % T::CHUNK_SIZE) as usize;
-            (chunk_index, hash_offset_in_chunk)
-        })
-        .collect::<Vec<_>>();
-
-    Ok(root_to_leaf_steps.into_iter().rev().map(move |(chunk_index, hash_offset_in_chunk)| {
-        let mut merkle_chunk = [0u8; 4096];
-        let _ = merkle_tree.read_chunk(chunk_index, &mut merkle_chunk)?;
-        Ok((merkle_chunk, hash_offset_in_chunk))
-    }))
-}
-
-fn build_fsverity_formatted_digest(
-    root_hash: &HashBuffer,
-    file_size: u64,
-) -> Result<[u8; SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256], CryptoError> {
-    let desc_hash = Sha256Hasher::new()?
-        .update(&1u8.to_le_bytes())? // version
-        .update(&1u8.to_le_bytes())? // hash_algorithm
-        .update(&12u8.to_le_bytes())? // log_blocksize
-        .update(&0u8.to_le_bytes())? // salt_size
-        .update(&0u32.to_le_bytes())? // sig_size
-        .update(&file_size.to_le_bytes())? // data_size
-        .update(root_hash)? // root_hash, first 32 bytes
-        .update(&[0u8; 32])? // root_hash, last 32 bytes
-        .update(&[0u8; 32])? // salt
-        .update(&[0u8; 32])? // reserved
-        .update(&[0u8; 32])? // reserved
-        .update(&[0u8; 32])? // reserved
-        .update(&[0u8; 32])? // reserved
-        .update(&[0u8; 16])? // reserved
-        .finalize()?;
-
-    let mut fsverity_digest = [0u8; SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256];
-    fsverity_digest[0..8].copy_from_slice(b"FSVerity");
-    fsverity_digest[8..10].copy_from_slice(&1u16.to_le_bytes());
-    fsverity_digest[10..12].copy_from_slice(&32u16.to_le_bytes());
-    fsverity_digest[12..].copy_from_slice(&desc_hash);
-    Ok(fsverity_digest)
-}
-
-pub struct FsverityChunkedFileReader<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> {
-    chunked_file: F,
-    file_size: u64,
-    merkle_tree: M,
-    root_hash: HashBuffer,
-}
-
-impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> FsverityChunkedFileReader<F, M> {
-    pub fn new<A: Authenticator>(
-        authenticator: &A,
-        chunked_file: F,
-        file_size: u64,
-        sig: Vec<u8>,
-        merkle_tree: M,
-    ) -> Result<FsverityChunkedFileReader<F, M>, FsverityError> {
-        // TODO(victorhsieh): Use generic constant directly once supported. No need to assert
-        // afterward.
-        let mut buf = [0u8; 4096];
-        assert_eq!(buf.len() as u64, M::CHUNK_SIZE);
-        let size = merkle_tree.read_chunk(0, &mut buf)?;
-        if buf.len() != size {
-            return Err(FsverityError::InsufficientData(size));
-        }
-        let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
-        let fsverity_digest = build_fsverity_formatted_digest(&root_hash, file_size)?;
-        let valid = authenticator.verify(&sig, &fsverity_digest)?;
-        if valid {
-            Ok(FsverityChunkedFileReader { chunked_file, file_size, merkle_tree, root_hash })
-        } else {
-            Err(FsverityError::BadSignature)
-        }
-    }
-}
-
-impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> ReadOnlyDataByChunk
-    for FsverityChunkedFileReader<F, M>
-{
-    fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
-        debug_assert!(buf.len() as u64 >= Self::CHUNK_SIZE);
-        let size = self.chunked_file.read_chunk(chunk_index, buf)?;
-        let root_hash = verity_check(&buf[..size], chunk_index, self.file_size, &self.merkle_tree)
-            .map_err(|_| io::Error::from_raw_os_error(EIO))?;
-        if root_hash != self.root_hash {
-            Err(io::Error::from_raw_os_error(EIO))
-        } else {
-            Ok(size)
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    use crate::auth::FakeAuthenticator;
-    use crate::reader::{ChunkedFileReader, ReadOnlyDataByChunk};
-    use anyhow::Result;
-    use std::fs::File;
-    use std::io::Read;
-
-    type LocalFsverityChunkedFileReader =
-        FsverityChunkedFileReader<ChunkedFileReader, ChunkedFileReader>;
-
-    fn total_chunk_number(file_size: u64) -> u64 {
-        (file_size + 4095) / 4096
-    }
-
-    // Returns a reader with fs-verity verification and the file size.
-    fn new_reader_with_fsverity(
-        content_path: &str,
-        merkle_tree_path: &str,
-        signature_path: &str,
-    ) -> Result<(LocalFsverityChunkedFileReader, u64)> {
-        let file_reader = ChunkedFileReader::new(File::open(content_path)?)?;
-        let file_size = file_reader.len();
-        let merkle_tree = ChunkedFileReader::new(File::open(merkle_tree_path)?)?;
-        let mut sig = Vec::new();
-        let _ = File::open(signature_path)?.read_to_end(&mut sig)?;
-        let authenticator = FakeAuthenticator::always_succeed();
-        Ok((
-            FsverityChunkedFileReader::new(
-                &authenticator,
-                file_reader,
-                file_size,
-                sig,
-                merkle_tree,
-            )?,
-            file_size,
-        ))
-    }
-
-    #[test]
-    fn fsverity_verify_full_read_4k() -> Result<()> {
-        let (file_reader, file_size) = new_reader_with_fsverity(
-            "testdata/input.4k",
-            "testdata/input.4k.merkle_dump",
-            "testdata/input.4k.fsv_sig",
-        )?;
-
-        for i in 0..total_chunk_number(file_size) {
-            let mut buf = [0u8; 4096];
-            assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
-        }
-        Ok(())
-    }
-
-    #[test]
-    fn fsverity_verify_full_read_4k1() -> Result<()> {
-        let (file_reader, file_size) = new_reader_with_fsverity(
-            "testdata/input.4k1",
-            "testdata/input.4k1.merkle_dump",
-            "testdata/input.4k1.fsv_sig",
-        )?;
-
-        for i in 0..total_chunk_number(file_size) {
-            let mut buf = [0u8; 4096];
-            assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
-        }
-        Ok(())
-    }
-
-    #[test]
-    fn fsverity_verify_full_read_4m() -> Result<()> {
-        let (file_reader, file_size) = new_reader_with_fsverity(
-            "testdata/input.4m",
-            "testdata/input.4m.merkle_dump",
-            "testdata/input.4m.fsv_sig",
-        )?;
-
-        for i in 0..total_chunk_number(file_size) {
-            let mut buf = [0u8; 4096];
-            assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
-        }
-        Ok(())
-    }
-
-    #[test]
-    fn fsverity_verify_bad_merkle_tree() -> Result<()> {
-        let (file_reader, _) = new_reader_with_fsverity(
-            "testdata/input.4m",
-            "testdata/input.4m.merkle_dump.bad", // First leaf node is corrupted.
-            "testdata/input.4m.fsv_sig",
-        )?;
-
-        // A lowest broken node (a 4K chunk that contains 128 sha256 hashes) will fail the read
-        // failure of the underlying chunks, but not before or after.
-        let mut buf = [0u8; 4096];
-        let num_hashes = 4096 / 32;
-        let last_index = num_hashes;
-        for i in 0..last_index {
-            assert!(file_reader.read_chunk(i, &mut buf[..]).is_err());
-        }
-        assert!(file_reader.read_chunk(last_index, &mut buf[..]).is_ok());
-        Ok(())
-    }
-
-    #[test]
-    fn invalid_signature() -> Result<()> {
-        let authenticator = FakeAuthenticator::always_fail();
-        let file_reader = ChunkedFileReader::new(File::open("testdata/input.4m")?)?;
-        let file_size = file_reader.len();
-        let merkle_tree = ChunkedFileReader::new(File::open("testdata/input.4m.merkle_dump")?)?;
-        let sig = include_bytes!("../testdata/input.4m.fsv_sig").to_vec();
-        assert!(FsverityChunkedFileReader::new(
-            &authenticator,
-            file_reader,
-            file_size,
-            sig,
-            merkle_tree
-        )
-        .is_err());
-        Ok(())
-    }
-}
+pub use self::builder::MerkleLeaves;
+pub use self::verifier::FsverityChunkedFileReader;
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
new file mode 100644
index 0000000..607d3a7
--- /dev/null
+++ b/authfs/src/fsverity/builder.rs
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
+use crate::common::CHUNK_SIZE;
+use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+
+const HASH_SIZE: usize = Sha256Hasher::HASH_SIZE;
+const HASH_PER_PAGE: usize = CHUNK_SIZE as usize / HASH_SIZE;
+
+/// MerkleLeaves can be used by the class' customer for bookkeeping integrity data for their bytes.
+/// It can also be used to generate the standard fs-verity digest for the source data.
+///
+/// It's in-memory because for the initial use cases, we don't need to read back an existing file,
+/// and only need to deal with new files. Also, considering that the output file won't be large at
+/// the moment, it is sufficient to simply keep the Merkle tree in memory in the trusted world. To
+/// further simplify the initial implementation, we only need to keep the leaf nodes in memory, and
+/// generate the tree / root hash when requested.
+pub struct MerkleLeaves {
+    leaves: Vec<Sha256Hash>,
+    file_size: u64,
+}
+
+fn hash_all_pages(source: &[Sha256Hash]) -> Result<Vec<Sha256Hash>, CryptoError> {
+    source
+        .chunks(HASH_PER_PAGE)
+        .map(|chunk| {
+            let padding_bytes = (HASH_PER_PAGE - chunk.len()) * HASH_SIZE;
+            Ok(Sha256Hasher::new()?
+                .update_from(chunk)?
+                .update(&vec![0u8; padding_bytes])?
+                .finalize()?)
+        })
+        .collect()
+}
+
+#[allow(dead_code)]
+impl MerkleLeaves {
+    /// Creates a `MerkleLeaves` instance with empty data.
+    pub fn new() -> Self {
+        Self { leaves: Vec::new(), file_size: 0 }
+    }
+
+    /// Updates the hash of the `index`-th leaf, and increase the size to `size_at_least` if the
+    /// current size is smaller.
+    pub fn update_hash(&mut self, index: usize, hash: &Sha256Hash, size_at_least: u64) {
+        // +1 since index is zero-based.
+        if self.leaves.len() < index + 1 {
+            // When resizing, fill in hash of zeros by default. This makes it easy to handle holes
+            // in a file.
+            self.leaves.resize(index + 1, Sha256Hasher::HASH_OF_4096_ZEROS);
+        }
+        self.leaves[index].clone_from_slice(hash);
+
+        if size_at_least > self.file_size {
+            self.file_size = size_at_least;
+        }
+    }
+
+    /// Returns whether `index` is within the bound of leaves.
+    pub fn is_index_valid(&self, index: usize) -> bool {
+        index < self.leaves.len()
+    }
+
+    /// Returns whether the `index`-th hash is consistent to `hash`.
+    pub fn is_consistent(&self, index: usize, hash: &Sha256Hash) -> bool {
+        if let Some(element) = self.leaves.get(index) {
+            element == hash
+        } else {
+            false
+        }
+    }
+
+    fn calculate_root_hash(&self) -> Result<Sha256Hash, FsverityError> {
+        match self.leaves.len() {
+            // Special cases per fs-verity digest definition.
+            0 => {
+                debug_assert_eq!(self.file_size, 0);
+                Ok([0u8; HASH_SIZE])
+            }
+            1 => {
+                debug_assert!(self.file_size <= CHUNK_SIZE && self.file_size > 0);
+                Ok(self.leaves[0])
+            }
+            n => {
+                debug_assert_eq!((self.file_size - 1) / CHUNK_SIZE, n as u64);
+                let size_for_equivalent = n as u64 * CHUNK_SIZE;
+                let level = merkle_tree_height(size_for_equivalent).unwrap(); // safe since n > 0
+
+                // `leaves` is owned and can't be the initial state below. Here we manually hash it
+                // first to avoid a copy and to get the type right.
+                let second_level = hash_all_pages(&self.leaves)?;
+                let hashes =
+                    (1..=level).try_fold(second_level, |source, _| hash_all_pages(&source))?;
+                if hashes.len() != 1 {
+                    Err(FsverityError::InvalidState)
+                } else {
+                    Ok(hashes.into_iter().next().unwrap())
+                }
+            }
+        }
+    }
+
+    /// Returns the fs-verity digest based on the current tree and file size.
+    pub fn calculate_fsverity_digest(&self) -> Result<Sha256Hash, FsverityError> {
+        let root_hash = self.calculate_root_hash()?;
+        Ok(build_fsverity_digest(&root_hash, self.file_size)?)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    // Test data below can be generated by:
+    //  $ perl -e 'print "\x{00}" x 6000' > foo
+    //  $ perl -e 'print "\x{01}" x 5000' >> foo
+    //  $ fsverity digest foo
+    use super::*;
+    use anyhow::Result;
+
+    #[test]
+    fn merkle_tree_empty_file() -> Result<()> {
+        assert_eq!(
+            to_u8_vec("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95"),
+            generate_fsverity_digest_sequentially(&Vec::new())?
+        );
+        Ok(())
+    }
+
+    #[test]
+    fn merkle_tree_file_size_less_than_or_equal_to_4k() -> Result<()> {
+        // Test a file that contains 4096 '\01's.
+        assert_eq!(
+            to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af"),
+            generate_fsverity_digest_sequentially(&vec![1; 4096])?
+        );
+        Ok(())
+    }
+
+    #[test]
+    fn merkle_tree_more_sizes() -> Result<()> {
+        // Test files that contains >4096 '\01's.
+
+        assert_eq!(
+            to_u8_vec("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52"),
+            generate_fsverity_digest_sequentially(&vec![1; 4097])?
+        );
+
+        assert_eq!(
+            to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d"),
+            generate_fsverity_digest_sequentially(&vec![1; 8192])?
+        );
+
+        // Test with max size that still fits in 2 levels.
+        assert_eq!(
+            to_u8_vec("26b7c190a34e19f420808ee7ec233b09fa6c34543b5a9d2950530114c205d14f"),
+            generate_fsverity_digest_sequentially(&vec![1; 524288])?
+        );
+
+        // Test with data that requires 3 levels.
+        assert_eq!(
+            to_u8_vec("316835d9be1c95b5cd55d07ae7965d651689efad186e26cbf680e40b683a3262"),
+            generate_fsverity_digest_sequentially(&vec![1; 524289])?
+        );
+        Ok(())
+    }
+
+    #[test]
+    fn merkle_tree_non_sequential() -> Result<()> {
+        let mut tree = MerkleLeaves::new();
+        let hash = Sha256Hasher::new()?.update(&vec![1u8; CHUNK_SIZE as usize])?.finalize()?;
+
+        // Update hashes of 4 1-blocks.
+        tree.update_hash(1, &hash, CHUNK_SIZE * 2);
+        tree.update_hash(3, &hash, CHUNK_SIZE * 4);
+        tree.update_hash(0, &hash, CHUNK_SIZE);
+        tree.update_hash(2, &hash, CHUNK_SIZE * 3);
+
+        assert_eq!(
+            to_u8_vec("7d3c0d2e1dc54230b20ed875f5f3a4bd3f9873df601936b3ca8127d4db3548f3"),
+            tree.calculate_fsverity_digest()?
+        );
+        Ok(())
+    }
+
+    fn generate_fsverity_digest_sequentially(test_data: &[u8]) -> Result<Sha256Hash> {
+        let mut tree = MerkleLeaves::new();
+        for (index, chunk) in test_data.chunks(CHUNK_SIZE as usize).enumerate() {
+            let hash = Sha256Hasher::new()?
+                .update(&chunk)?
+                .update(&vec![0u8; CHUNK_SIZE as usize - chunk.len()])?
+                .finalize()?;
+
+            tree.update_hash(index, &hash, CHUNK_SIZE * index as u64 + chunk.len() as u64);
+        }
+        Ok(tree.calculate_fsverity_digest()?)
+    }
+
+    fn to_u8_vec(hex_str: &str) -> Vec<u8> {
+        assert!(hex_str.len() % 2 == 0);
+        (0..hex_str.len())
+            .step_by(2)
+            .map(|i| u8::from_str_radix(&hex_str[i..i + 2], 16).unwrap())
+            .collect()
+    }
+}
diff --git a/authfs/src/fsverity/common.rs b/authfs/src/fsverity/common.rs
new file mode 100644
index 0000000..8889f5c
--- /dev/null
+++ b/authfs/src/fsverity/common.rs
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use std::io;
+
+use thiserror::Error;
+
+use super::sys::{FS_VERITY_HASH_ALG_SHA256, FS_VERITY_LOG_BLOCKSIZE, FS_VERITY_VERSION};
+use crate::common::{divide_roundup, CHUNK_SIZE};
+use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+
+#[derive(Error, Debug)]
+pub enum FsverityError {
+    #[error("Cannot verify a signature")]
+    BadSignature,
+    #[error("Insufficient data, only got {0}")]
+    InsufficientData(usize),
+    #[error("Cannot verify a block")]
+    CannotVerify,
+    #[error("I/O error")]
+    Io(#[from] io::Error),
+    #[error("Crypto")]
+    UnexpectedCryptoError(#[from] CryptoError),
+    #[error("Invalid state")]
+    InvalidState,
+}
+
+fn log128_ceil(num: u64) -> Option<u64> {
+    match num {
+        0 => None,
+        n => Some(divide_roundup(64 - (n - 1).leading_zeros() as u64, 7)),
+    }
+}
+
+/// Return the Merkle tree height for our tree configuration, or None if the size is 0.
+pub fn merkle_tree_height(data_size: u64) -> Option<u64> {
+    let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+    let hash_pages = divide_roundup(data_size, hashes_per_node * CHUNK_SIZE);
+    log128_ceil(hash_pages)
+}
+
+pub fn build_fsverity_digest(
+    root_hash: &Sha256Hash,
+    file_size: u64,
+) -> Result<Sha256Hash, CryptoError> {
+    // Little-endian byte representation of fsverity_descriptor from linux/fsverity.h
+    // Not FFI-ed as it seems easier to deal with the raw bytes manually.
+    Sha256Hasher::new()?
+        .update(&FS_VERITY_VERSION.to_le_bytes())? // version
+        .update(&FS_VERITY_HASH_ALG_SHA256.to_le_bytes())? // hash_algorithm
+        .update(&FS_VERITY_LOG_BLOCKSIZE.to_le_bytes())? // log_blocksize
+        .update(&0u8.to_le_bytes())? // salt_size
+        .update(&0u32.to_le_bytes())? // sig_size
+        .update(&file_size.to_le_bytes())? // data_size
+        .update(root_hash)? // root_hash, first 32 bytes
+        .update(&[0u8; 32])? // root_hash, last 32 bytes, always 0 because we are using sha256.
+        .update(&[0u8; 32])? // salt
+        .update(&[0u8; 32])? // reserved
+        .update(&[0u8; 32])? // reserved
+        .update(&[0u8; 32])? // reserved
+        .update(&[0u8; 32])? // reserved
+        .update(&[0u8; 16])? // reserved
+        .finalize()
+}
diff --git a/authfs/src/fsverity/sys.rs b/authfs/src/fsverity/sys.rs
new file mode 100644
index 0000000..b3222db
--- /dev/null
+++ b/authfs/src/fsverity/sys.rs
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/// Magic used in fs-verity digest
+pub const FS_VERITY_MAGIC: &[u8; 8] = b"FSVerity";
+
+/// fs-verity version that we are using
+pub const FS_VERITY_VERSION: u8 = 1;
+
+/// Hash algorithm to use from linux/fsverity.h
+pub const FS_VERITY_HASH_ALG_SHA256: u8 = 1;
+
+/// Log 2 of the block size (only 4096 is supported now)
+pub const FS_VERITY_LOG_BLOCKSIZE: u8 = 12;
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
new file mode 100644
index 0000000..fd108f5
--- /dev/null
+++ b/authfs/src/fsverity/verifier.rs
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use libc::EIO;
+use std::io;
+
+use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
+use super::sys::{FS_VERITY_HASH_ALG_SHA256, FS_VERITY_MAGIC};
+use crate::auth::Authenticator;
+use crate::common::{divide_roundup, CHUNK_SIZE};
+use crate::crypto::{CryptoError, Sha256Hasher};
+use crate::reader::ReadOnlyDataByChunk;
+
+const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
+
+// The size of `struct fsverity_formatted_digest` in Linux with SHA-256.
+const SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256: usize = 12 + Sha256Hasher::HASH_SIZE;
+
+type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
+
+fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
+    let padding_size = pad_to - chunk.len();
+    Sha256Hasher::new()?.update(&chunk)?.update(&ZEROS[..padding_size])?.finalize()
+}
+
+fn verity_check<T: ReadOnlyDataByChunk>(
+    chunk: &[u8],
+    chunk_index: u64,
+    file_size: u64,
+    merkle_tree: &T,
+) -> Result<HashBuffer, FsverityError> {
+    // The caller should not be able to produce a chunk at the first place if `file_size` is 0. The
+    // current implementation expects to crash when a `ReadOnlyDataByChunk` implementation reads
+    // beyond the file size, including empty file.
+    assert_ne!(file_size, 0);
+
+    let chunk_hash = hash_with_padding(&chunk, CHUNK_SIZE as usize)?;
+
+    fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
+        chunk_hash,
+        |actual_hash, result| {
+            let (merkle_chunk, hash_offset_in_chunk) = result?;
+            let expected_hash =
+                &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + Sha256Hasher::HASH_SIZE];
+            if actual_hash != expected_hash {
+                return Err(FsverityError::CannotVerify);
+            }
+            Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize)?)
+        },
+    )
+}
+
+/// Given a chunk index and the size of the file, returns an iterator that walks the Merkle tree
+/// from the leaf to the root. The iterator carries the slice of the chunk/node as well as the
+/// offset of the child node's hash. It is up to the iterator user to use the node and hash,
+/// e.g. for the actual verification.
+#[allow(clippy::needless_collect)]
+fn fsverity_walk<T: ReadOnlyDataByChunk>(
+    chunk_index: u64,
+    file_size: u64,
+    merkle_tree: &T,
+) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> {
+    let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+    debug_assert_eq!(hashes_per_node, 128u64);
+    let max_level = merkle_tree_height(file_size).expect("file should not be empty") as u32;
+    let root_to_leaf_steps = (0..=max_level)
+        .rev()
+        .map(|x| {
+            let leaves_per_hash = hashes_per_node.pow(x);
+            let leaves_size_per_hash = CHUNK_SIZE * leaves_per_hash;
+            let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
+            let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
+            let level_size = nodes_at_level * CHUNK_SIZE;
+            let offset_in_level = (chunk_index / leaves_per_hash) * Sha256Hasher::HASH_SIZE as u64;
+            (level_size, offset_in_level)
+        })
+        .scan(0, |level_offset, (level_size, offset_in_level)| {
+            let this_level_offset = *level_offset;
+            *level_offset += level_size;
+            let global_hash_offset = this_level_offset + offset_in_level;
+            Some(global_hash_offset)
+        })
+        .map(|global_hash_offset| {
+            let chunk_index = global_hash_offset / CHUNK_SIZE;
+            let hash_offset_in_chunk = (global_hash_offset % CHUNK_SIZE) as usize;
+            (chunk_index, hash_offset_in_chunk)
+        })
+        .collect::<Vec<_>>(); // Needs to collect first to be able to reverse below.
+
+    Ok(root_to_leaf_steps.into_iter().rev().map(move |(chunk_index, hash_offset_in_chunk)| {
+        let mut merkle_chunk = [0u8; 4096];
+        // read_chunk is supposed to return a full chunk, or an incomplete one at the end of the
+        // file. In the incomplete case, the hash is calculated with 0-padding to the chunk size.
+        // Therefore, we don't need to check the returned size here.
+        let _ = merkle_tree.read_chunk(chunk_index, &mut merkle_chunk)?;
+        Ok((merkle_chunk, hash_offset_in_chunk))
+    }))
+}
+
+fn build_fsverity_formatted_digest(
+    root_hash: &HashBuffer,
+    file_size: u64,
+) -> Result<[u8; SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256], CryptoError> {
+    let digest = build_fsverity_digest(root_hash, file_size)?;
+    // Little-endian byte representation of fsverity_formatted_digest from linux/fsverity.h
+    // Not FFI-ed as it seems easier to deal with the raw bytes manually.
+    let mut formatted_digest = [0u8; SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256];
+    formatted_digest[0..8].copy_from_slice(FS_VERITY_MAGIC);
+    formatted_digest[8..10].copy_from_slice(&(FS_VERITY_HASH_ALG_SHA256 as u16).to_le_bytes());
+    formatted_digest[10..12].copy_from_slice(&(Sha256Hasher::HASH_SIZE as u16).to_le_bytes());
+    formatted_digest[12..].copy_from_slice(&digest);
+    Ok(formatted_digest)
+}
+
+pub struct FsverityChunkedFileReader<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> {
+    chunked_file: F,
+    file_size: u64,
+    merkle_tree: M,
+    root_hash: HashBuffer,
+}
+
+impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> FsverityChunkedFileReader<F, M> {
+    pub fn new<A: Authenticator>(
+        authenticator: &A,
+        chunked_file: F,
+        file_size: u64,
+        sig: Vec<u8>,
+        merkle_tree: M,
+    ) -> Result<FsverityChunkedFileReader<F, M>, FsverityError> {
+        let mut buf = [0u8; CHUNK_SIZE as usize];
+        let size = merkle_tree.read_chunk(0, &mut buf)?;
+        if buf.len() != size {
+            return Err(FsverityError::InsufficientData(size));
+        }
+        let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
+        let formatted_digest = build_fsverity_formatted_digest(&root_hash, file_size)?;
+        let valid = authenticator.verify(&sig, &formatted_digest)?;
+        if valid {
+            Ok(FsverityChunkedFileReader { chunked_file, file_size, merkle_tree, root_hash })
+        } else {
+            Err(FsverityError::BadSignature)
+        }
+    }
+}
+
+impl<F: ReadOnlyDataByChunk, M: ReadOnlyDataByChunk> ReadOnlyDataByChunk
+    for FsverityChunkedFileReader<F, M>
+{
+    fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
+        debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
+        let size = self.chunked_file.read_chunk(chunk_index, buf)?;
+        let root_hash = verity_check(&buf[..size], chunk_index, self.file_size, &self.merkle_tree)
+            .map_err(|_| io::Error::from_raw_os_error(EIO))?;
+        if root_hash != self.root_hash {
+            Err(io::Error::from_raw_os_error(EIO))
+        } else {
+            Ok(size)
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::auth::FakeAuthenticator;
+    use crate::reader::{ChunkedFileReader, ReadOnlyDataByChunk};
+    use anyhow::Result;
+    use std::fs::File;
+    use std::io::Read;
+
+    type LocalFsverityChunkedFileReader =
+        FsverityChunkedFileReader<ChunkedFileReader, ChunkedFileReader>;
+
+    fn total_chunk_number(file_size: u64) -> u64 {
+        (file_size + 4095) / 4096
+    }
+
+    // Returns a reader with fs-verity verification and the file size.
+    fn new_reader_with_fsverity(
+        content_path: &str,
+        merkle_tree_path: &str,
+        signature_path: &str,
+    ) -> Result<(LocalFsverityChunkedFileReader, u64)> {
+        let file_reader = ChunkedFileReader::new(File::open(content_path)?)?;
+        let file_size = file_reader.len();
+        let merkle_tree = ChunkedFileReader::new(File::open(merkle_tree_path)?)?;
+        let mut sig = Vec::new();
+        let _ = File::open(signature_path)?.read_to_end(&mut sig)?;
+        let authenticator = FakeAuthenticator::always_succeed();
+        Ok((
+            FsverityChunkedFileReader::new(
+                &authenticator,
+                file_reader,
+                file_size,
+                sig,
+                merkle_tree,
+            )?,
+            file_size,
+        ))
+    }
+
+    #[test]
+    fn fsverity_verify_full_read_4k() -> Result<()> {
+        let (file_reader, file_size) = new_reader_with_fsverity(
+            "testdata/input.4k",
+            "testdata/input.4k.merkle_dump",
+            "testdata/input.4k.fsv_sig",
+        )?;
+
+        for i in 0..total_chunk_number(file_size) {
+            let mut buf = [0u8; 4096];
+            assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+        }
+        Ok(())
+    }
+
+    #[test]
+    fn fsverity_verify_full_read_4k1() -> Result<()> {
+        let (file_reader, file_size) = new_reader_with_fsverity(
+            "testdata/input.4k1",
+            "testdata/input.4k1.merkle_dump",
+            "testdata/input.4k1.fsv_sig",
+        )?;
+
+        for i in 0..total_chunk_number(file_size) {
+            let mut buf = [0u8; 4096];
+            assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+        }
+        Ok(())
+    }
+
+    #[test]
+    fn fsverity_verify_full_read_4m() -> Result<()> {
+        let (file_reader, file_size) = new_reader_with_fsverity(
+            "testdata/input.4m",
+            "testdata/input.4m.merkle_dump",
+            "testdata/input.4m.fsv_sig",
+        )?;
+
+        for i in 0..total_chunk_number(file_size) {
+            let mut buf = [0u8; 4096];
+            assert!(file_reader.read_chunk(i, &mut buf[..]).is_ok());
+        }
+        Ok(())
+    }
+
+    #[test]
+    fn fsverity_verify_bad_merkle_tree() -> Result<()> {
+        let (file_reader, _) = new_reader_with_fsverity(
+            "testdata/input.4m",
+            "testdata/input.4m.merkle_dump.bad", // First leaf node is corrupted.
+            "testdata/input.4m.fsv_sig",
+        )?;
+
+        // A lowest broken node (a 4K chunk that contains 128 sha256 hashes) will fail the read
+        // failure of the underlying chunks, but not before or after.
+        let mut buf = [0u8; 4096];
+        let num_hashes = 4096 / 32;
+        let last_index = num_hashes;
+        for i in 0..last_index {
+            assert!(file_reader.read_chunk(i, &mut buf[..]).is_err());
+        }
+        assert!(file_reader.read_chunk(last_index, &mut buf[..]).is_ok());
+        Ok(())
+    }
+
+    #[test]
+    fn invalid_signature() -> Result<()> {
+        let authenticator = FakeAuthenticator::always_fail();
+        let file_reader = ChunkedFileReader::new(File::open("testdata/input.4m")?)?;
+        let file_size = file_reader.len();
+        let merkle_tree = ChunkedFileReader::new(File::open("testdata/input.4m.merkle_dump")?)?;
+        let sig = include_bytes!("../../testdata/input.4m.fsv_sig").to_vec();
+        assert!(FsverityChunkedFileReader::new(
+            &authenticator,
+            file_reader,
+            file_size,
+            sig,
+            merkle_tree
+        )
+        .is_err());
+        Ok(())
+    }
+}
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index 0dfd0af..f5dd6ec 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -29,14 +29,11 @@
 use fuse::filesystem::{Context, DirEntry, DirectoryIterator, Entry, FileSystem, ZeroCopyWriter};
 use fuse::mount::MountOption;
 
-use crate::common::{divide_roundup, COMMON_PAGE_SIZE};
+use crate::common::{divide_roundup, CHUNK_SIZE};
 use crate::fsverity::FsverityChunkedFileReader;
 use crate::reader::{ChunkedFileReader, ReadOnlyDataByChunk};
 use crate::remote_file::{RemoteChunkedFileReader, RemoteFsverityMerkleTreeReader};
 
-// We're reading the backing file by chunk, so setting the block size to be the same.
-const BLOCK_SIZE: usize = COMMON_PAGE_SIZE as usize;
-
 const DEFAULT_METADATA_TIMEOUT: std::time::Duration = Duration::from_secs(5);
 
 pub type Inode = u64;
@@ -89,9 +86,9 @@
 
 cfg_if::cfg_if! {
     if #[cfg(all(target_arch = "aarch64", target_pointer_width = "64"))] {
-        fn blk_size() -> libc::c_int { BLOCK_SIZE as libc::c_int }
+        fn blk_size() -> libc::c_int { CHUNK_SIZE as libc::c_int }
     } else {
-        fn blk_size() -> libc::c_long { BLOCK_SIZE as libc::c_long }
+        fn blk_size() -> libc::c_long { CHUNK_SIZE as libc::c_long }
     }
 }
 
@@ -135,7 +132,7 @@
             return None;
         }
         let chunk_data_size =
-            std::cmp::min(self.remaining, BLOCK_SIZE - (self.offset % BLOCK_SIZE as u64) as usize);
+            std::cmp::min(self.remaining, (CHUNK_SIZE - self.offset % CHUNK_SIZE) as usize);
         let retval = (self.offset, chunk_data_size);
         self.offset += chunk_data_size as u64;
         self.remaining = self.remaining.saturating_sub(chunk_data_size);
@@ -144,7 +141,7 @@
 }
 
 fn offset_to_chunk_index(offset: u64) -> u64 {
-    offset / BLOCK_SIZE as u64
+    offset / CHUNK_SIZE
 }
 
 fn read_chunks<W: io::Write, T: ReadOnlyDataByChunk>(
@@ -163,13 +160,13 @@
             // instead of accepting a buffer, the writer could expose the final destination buffer
             // for the reader to write to. It might not be generally applicable though, e.g. with
             // virtio transport, the buffer may not be continuous.
-            let mut buf = [0u8; BLOCK_SIZE];
+            let mut buf = [0u8; CHUNK_SIZE as usize];
             let read_size = file.read_chunk(offset_to_chunk_index(current_offset), &mut buf)?;
             if read_size < planned_data_size {
                 return Err(io::Error::from_raw_os_error(libc::ENODATA));
             }
 
-            let begin = (current_offset % BLOCK_SIZE as u64) as usize;
+            let begin = (current_offset % CHUNK_SIZE) as usize;
             let end = begin + planned_data_size;
             let s = w.write(&buf[begin..end])?;
             if s != planned_data_size {
diff --git a/authfs/src/reader.rs b/authfs/src/reader.rs
index d365a41..0242afa 100644
--- a/authfs/src/reader.rs
+++ b/authfs/src/reader.rs
@@ -20,14 +20,11 @@
 use std::io::Result;
 use std::os::unix::fs::FileExt;
 
-use crate::common::COMMON_PAGE_SIZE;
+use crate::common::CHUNK_SIZE;
 
 /// A trait for reading data by chunks. The data is assumed readonly and has fixed length. Chunks
 /// can be read by specifying the chunk index. Only the last chunk may have incomplete chunk size.
 pub trait ReadOnlyDataByChunk {
-    /// Default chunk size.
-    const CHUNK_SIZE: u64 = COMMON_PAGE_SIZE;
-
     /// Read the `chunk_index`-th chunk to `buf`. Each slice/chunk has size `CHUNK_SIZE` except for
     /// the last one, which can be an incomplete chunk. `buf` is currently required to be large
     /// enough to hold a full chunk of data. Reading beyond the file size (including empty file)
@@ -35,10 +32,10 @@
     fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> Result<usize>;
 }
 
-fn chunk_index_to_range(size: u64, chunk_size: u64, chunk_index: u64) -> Result<(u64, u64)> {
-    let start = chunk_index * chunk_size;
+fn chunk_index_to_range(size: u64, chunk_index: u64) -> Result<(u64, u64)> {
+    let start = chunk_index * CHUNK_SIZE;
     assert!(start < size);
-    let end = std::cmp::min(size, start + chunk_size);
+    let end = std::cmp::min(size, start + CHUNK_SIZE);
     Ok((start, end))
 }
 
@@ -62,8 +59,8 @@
 
 impl ReadOnlyDataByChunk for ChunkedFileReader {
     fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> Result<usize> {
-        debug_assert!(buf.len() as u64 >= Self::CHUNK_SIZE);
-        let (start, end) = chunk_index_to_range(self.size, Self::CHUNK_SIZE, chunk_index)?;
+        debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
+        let (start, end) = chunk_index_to_range(self.size, chunk_index)?;
         let size = (end - start) as usize;
         self.file.read_at(&mut buf[..size], start)
     }
diff --git a/authfs/src/remote_file.rs b/authfs/src/remote_file.rs
index 7c3d12e..01e803c 100644
--- a/authfs/src/remote_file.rs
+++ b/authfs/src/remote_file.rs
@@ -19,6 +19,7 @@
 use std::io::Write;
 use std::sync::{Arc, Mutex};
 
+use crate::common::CHUNK_SIZE;
 use crate::reader::ReadOnlyDataByChunk;
 
 use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService;
@@ -49,7 +50,7 @@
 
 impl ReadOnlyDataByChunk for RemoteChunkedFileReader {
     fn read_chunk(&self, chunk_index: u64, mut buf: &mut [u8]) -> io::Result<usize> {
-        let offset = i64::try_from(chunk_index * Self::CHUNK_SIZE)
+        let offset = i64::try_from(chunk_index * CHUNK_SIZE)
             .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
 
         let service = Arc::clone(&self.service);
@@ -77,7 +78,7 @@
 
 impl ReadOnlyDataByChunk for RemoteFsverityMerkleTreeReader {
     fn read_chunk(&self, chunk_index: u64, mut buf: &mut [u8]) -> io::Result<usize> {
-        let offset = i64::try_from(chunk_index * Self::CHUNK_SIZE)
+        let offset = i64::try_from(chunk_index * CHUNK_SIZE)
             .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
 
         let service = Arc::clone(&self.service);