authfs: Replace a trait bound const w/ a simple const

fs-verity assumes block/page/chunk size to be 4K, and the assumption
may not change for the foreseeable future. Remove the const from the
trait and use a simple const for simplicity.

Bug: 171279640
Test: atest

Change-Id: I8e47acb1869b15dab676dfb10449d2800f3aca73
diff --git a/authfs/src/common.rs b/authfs/src/common.rs
index 2220ae7..522397f 100644
--- a/authfs/src/common.rs
+++ b/authfs/src/common.rs
@@ -14,7 +14,8 @@
  * limitations under the License.
  */
 
-pub const COMMON_PAGE_SIZE: u64 = 4096;
+/// Common block and page size in Linux.
+pub const CHUNK_SIZE: u64 = 4096;
 
 pub fn divide_roundup(dividend: u64, divisor: u64) -> u64 {
     (dividend + divisor - 1) / divisor
diff --git a/authfs/src/fsverity.rs b/authfs/src/fsverity.rs
index 306c9d9..8429caa 100644
--- a/authfs/src/fsverity.rs
+++ b/authfs/src/fsverity.rs
@@ -19,11 +19,11 @@
 use thiserror::Error;
 
 use crate::auth::Authenticator;
-use crate::common::divide_roundup;
+use crate::common::{divide_roundup, CHUNK_SIZE};
 use crate::crypto::{CryptoError, Sha256Hasher};
 use crate::reader::ReadOnlyDataByChunk;
 
-const ZEROS: [u8; 4096] = [0u8; 4096];
+const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
 
 // The size of `struct fsverity_formatted_digest` in Linux with SHA-256.
 const SIZE_OF_FSVERITY_FORMATTED_DIGEST_SHA256: usize = 12 + Sha256Hasher::HASH_SIZE;
@@ -60,7 +60,7 @@
     // beyone the file size, including empty file.
     assert_ne!(file_size, 0);
 
-    let chunk_hash = hash_with_padding(&chunk, T::CHUNK_SIZE as usize)?;
+    let chunk_hash = hash_with_padding(&chunk, CHUNK_SIZE as usize)?;
 
     fsverity_walk(chunk_index, file_size, merkle_tree)?.try_fold(
         chunk_hash,
@@ -71,7 +71,7 @@
             if actual_hash != expected_hash {
                 return Err(FsverityError::CannotVerify);
             }
-            Ok(hash_with_padding(&merkle_chunk, T::CHUNK_SIZE as usize)?)
+            Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize)?)
         },
     )
 }
@@ -93,18 +93,18 @@
     file_size: u64,
     merkle_tree: &T,
 ) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> {
-    let hashes_per_node = T::CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
-    let hash_pages = divide_roundup(file_size, hashes_per_node * T::CHUNK_SIZE);
+    let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+    let hash_pages = divide_roundup(file_size, hashes_per_node * CHUNK_SIZE);
     debug_assert_eq!(hashes_per_node, 128u64);
     let max_level = log128_ceil(hash_pages).expect("file should not be empty") as u32;
     let root_to_leaf_steps = (0..=max_level)
         .rev()
         .map(|x| {
             let leaves_per_hash = hashes_per_node.pow(x);
-            let leaves_size_per_hash = T::CHUNK_SIZE * leaves_per_hash;
+            let leaves_size_per_hash = CHUNK_SIZE * leaves_per_hash;
             let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
             let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
-            let level_size = nodes_at_level * T::CHUNK_SIZE;
+            let level_size = nodes_at_level * CHUNK_SIZE;
             let offset_in_level = (chunk_index / leaves_per_hash) * Sha256Hasher::HASH_SIZE as u64;
             (level_size, offset_in_level)
         })
@@ -115,8 +115,8 @@
             Some(global_hash_offset)
         })
         .map(|global_hash_offset| {
-            let chunk_index = global_hash_offset / T::CHUNK_SIZE;
-            let hash_offset_in_chunk = (global_hash_offset % T::CHUNK_SIZE) as usize;
+            let chunk_index = global_hash_offset / CHUNK_SIZE;
+            let hash_offset_in_chunk = (global_hash_offset % CHUNK_SIZE) as usize;
             (chunk_index, hash_offset_in_chunk)
         })
         .collect::<Vec<_>>();
@@ -172,10 +172,7 @@
         sig: Vec<u8>,
         merkle_tree: M,
     ) -> Result<FsverityChunkedFileReader<F, M>, FsverityError> {
-        // TODO(victorhsieh): Use generic constant directly once supported. No need to assert
-        // afterward.
-        let mut buf = [0u8; 4096];
-        assert_eq!(buf.len() as u64, M::CHUNK_SIZE);
+        let mut buf = [0u8; CHUNK_SIZE as usize];
         let size = merkle_tree.read_chunk(0, &mut buf)?;
         if buf.len() != size {
             return Err(FsverityError::InsufficientData(size));
@@ -195,7 +192,7 @@
     for FsverityChunkedFileReader<F, M>
 {
     fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> io::Result<usize> {
-        debug_assert!(buf.len() as u64 >= Self::CHUNK_SIZE);
+        debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
         let size = self.chunked_file.read_chunk(chunk_index, buf)?;
         let root_hash = verity_check(&buf[..size], chunk_index, self.file_size, &self.merkle_tree)
             .map_err(|_| io::Error::from_raw_os_error(EIO))?;
diff --git a/authfs/src/fusefs.rs b/authfs/src/fusefs.rs
index 0dfd0af..f5dd6ec 100644
--- a/authfs/src/fusefs.rs
+++ b/authfs/src/fusefs.rs
@@ -29,14 +29,11 @@
 use fuse::filesystem::{Context, DirEntry, DirectoryIterator, Entry, FileSystem, ZeroCopyWriter};
 use fuse::mount::MountOption;
 
-use crate::common::{divide_roundup, COMMON_PAGE_SIZE};
+use crate::common::{divide_roundup, CHUNK_SIZE};
 use crate::fsverity::FsverityChunkedFileReader;
 use crate::reader::{ChunkedFileReader, ReadOnlyDataByChunk};
 use crate::remote_file::{RemoteChunkedFileReader, RemoteFsverityMerkleTreeReader};
 
-// We're reading the backing file by chunk, so setting the block size to be the same.
-const BLOCK_SIZE: usize = COMMON_PAGE_SIZE as usize;
-
 const DEFAULT_METADATA_TIMEOUT: std::time::Duration = Duration::from_secs(5);
 
 pub type Inode = u64;
@@ -89,9 +86,9 @@
 
 cfg_if::cfg_if! {
     if #[cfg(all(target_arch = "aarch64", target_pointer_width = "64"))] {
-        fn blk_size() -> libc::c_int { BLOCK_SIZE as libc::c_int }
+        fn blk_size() -> libc::c_int { CHUNK_SIZE as libc::c_int }
     } else {
-        fn blk_size() -> libc::c_long { BLOCK_SIZE as libc::c_long }
+        fn blk_size() -> libc::c_long { CHUNK_SIZE as libc::c_long }
     }
 }
 
@@ -135,7 +132,7 @@
             return None;
         }
         let chunk_data_size =
-            std::cmp::min(self.remaining, BLOCK_SIZE - (self.offset % BLOCK_SIZE as u64) as usize);
+            std::cmp::min(self.remaining, (CHUNK_SIZE - self.offset % CHUNK_SIZE) as usize);
         let retval = (self.offset, chunk_data_size);
         self.offset += chunk_data_size as u64;
         self.remaining = self.remaining.saturating_sub(chunk_data_size);
@@ -144,7 +141,7 @@
 }
 
 fn offset_to_chunk_index(offset: u64) -> u64 {
-    offset / BLOCK_SIZE as u64
+    offset / CHUNK_SIZE
 }
 
 fn read_chunks<W: io::Write, T: ReadOnlyDataByChunk>(
@@ -163,13 +160,13 @@
             // instead of accepting a buffer, the writer could expose the final destination buffer
             // for the reader to write to. It might not be generally applicable though, e.g. with
             // virtio transport, the buffer may not be continuous.
-            let mut buf = [0u8; BLOCK_SIZE];
+            let mut buf = [0u8; CHUNK_SIZE as usize];
             let read_size = file.read_chunk(offset_to_chunk_index(current_offset), &mut buf)?;
             if read_size < planned_data_size {
                 return Err(io::Error::from_raw_os_error(libc::ENODATA));
             }
 
-            let begin = (current_offset % BLOCK_SIZE as u64) as usize;
+            let begin = (current_offset % CHUNK_SIZE) as usize;
             let end = begin + planned_data_size;
             let s = w.write(&buf[begin..end])?;
             if s != planned_data_size {
diff --git a/authfs/src/reader.rs b/authfs/src/reader.rs
index d365a41..0242afa 100644
--- a/authfs/src/reader.rs
+++ b/authfs/src/reader.rs
@@ -20,14 +20,11 @@
 use std::io::Result;
 use std::os::unix::fs::FileExt;
 
-use crate::common::COMMON_PAGE_SIZE;
+use crate::common::CHUNK_SIZE;
 
 /// A trait for reading data by chunks. The data is assumed readonly and has fixed length. Chunks
 /// can be read by specifying the chunk index. Only the last chunk may have incomplete chunk size.
 pub trait ReadOnlyDataByChunk {
-    /// Default chunk size.
-    const CHUNK_SIZE: u64 = COMMON_PAGE_SIZE;
-
     /// Read the `chunk_index`-th chunk to `buf`. Each slice/chunk has size `CHUNK_SIZE` except for
     /// the last one, which can be an incomplete chunk. `buf` is currently required to be large
     /// enough to hold a full chunk of data. Reading beyond the file size (including empty file)
@@ -35,10 +32,10 @@
     fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> Result<usize>;
 }
 
-fn chunk_index_to_range(size: u64, chunk_size: u64, chunk_index: u64) -> Result<(u64, u64)> {
-    let start = chunk_index * chunk_size;
+fn chunk_index_to_range(size: u64, chunk_index: u64) -> Result<(u64, u64)> {
+    let start = chunk_index * CHUNK_SIZE;
     assert!(start < size);
-    let end = std::cmp::min(size, start + chunk_size);
+    let end = std::cmp::min(size, start + CHUNK_SIZE);
     Ok((start, end))
 }
 
@@ -62,8 +59,8 @@
 
 impl ReadOnlyDataByChunk for ChunkedFileReader {
     fn read_chunk(&self, chunk_index: u64, buf: &mut [u8]) -> Result<usize> {
-        debug_assert!(buf.len() as u64 >= Self::CHUNK_SIZE);
-        let (start, end) = chunk_index_to_range(self.size, Self::CHUNK_SIZE, chunk_index)?;
+        debug_assert!(buf.len() as u64 >= CHUNK_SIZE);
+        let (start, end) = chunk_index_to_range(self.size, chunk_index)?;
         let size = (end - start) as usize;
         self.file.read_at(&mut buf[..size], start)
     }
diff --git a/authfs/src/remote_file.rs b/authfs/src/remote_file.rs
index 7c3d12e..01e803c 100644
--- a/authfs/src/remote_file.rs
+++ b/authfs/src/remote_file.rs
@@ -19,6 +19,7 @@
 use std::io::Write;
 use std::sync::{Arc, Mutex};
 
+use crate::common::CHUNK_SIZE;
 use crate::reader::ReadOnlyDataByChunk;
 
 use authfs_aidl_interface::aidl::com::android::virt::fs::IVirtFdService;
@@ -49,7 +50,7 @@
 
 impl ReadOnlyDataByChunk for RemoteChunkedFileReader {
     fn read_chunk(&self, chunk_index: u64, mut buf: &mut [u8]) -> io::Result<usize> {
-        let offset = i64::try_from(chunk_index * Self::CHUNK_SIZE)
+        let offset = i64::try_from(chunk_index * CHUNK_SIZE)
             .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
 
         let service = Arc::clone(&self.service);
@@ -77,7 +78,7 @@
 
 impl ReadOnlyDataByChunk for RemoteFsverityMerkleTreeReader {
     fn read_chunk(&self, chunk_index: u64, mut buf: &mut [u8]) -> io::Result<usize> {
-        let offset = i64::try_from(chunk_index * Self::CHUNK_SIZE)
+        let offset = i64::try_from(chunk_index * CHUNK_SIZE)
             .map_err(|_| io::Error::from_raw_os_error(libc::EOVERFLOW))?;
 
         let service = Arc::clone(&self.service);