authfs: Migrate to the openssl crate from custom bindings
Now that there is a standard binding to boringssl, switch over to using
it.
Bug: 233582804
Test: atest AuthFsHostTest
Test: atest authfs_device_test_src_lib
Change-Id: I4ae4353e21801bd4f9b43c4284d941820d540d80
diff --git a/authfs/Android.bp b/authfs/Android.bp
index 935ed5c..84eb0f4 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -13,7 +13,6 @@
"authfs_aidl_interface-rust",
"libandroid_logger",
"libanyhow",
- "libauthfs_crypto_bindgen",
"libauthfs_fsverity_metadata",
"libbinder_rpc_unstable_bindgen",
"libbinder_rs",
@@ -23,6 +22,7 @@
"liblibc",
"liblog_rust",
"libnix",
+ "libopenssl",
"libprotobuf",
"libstructopt",
"libthiserror",
@@ -34,26 +34,11 @@
},
},
shared_libs: [
- "libcrypto",
"libbinder_rpc_unstable",
],
defaults: ["crosvm_defaults"],
}
-// TODO(b/172687320): remove once there is a canonical bindgen.
-rust_bindgen {
- name: "libauthfs_crypto_bindgen",
- wrapper_src: "src/crypto.hpp",
- crate_name: "authfs_crypto_bindgen",
- source_stem: "bindings",
- shared_libs: [
- "libcrypto",
- ],
- bindgen_flags: ["--size_t-is-usize"],
- cflags: ["-D BORINGSSL_NO_CXX"],
- apex_available: ["com.android.virt"],
-}
-
rust_binary {
name: "authfs",
defaults: ["authfs_defaults"],
@@ -80,13 +65,3 @@
"testdata/input.4m.fsv_meta.bad_merkle",
],
}
-
-rust_test {
- name: "libauthfs_crypto_bindgen_test",
- srcs: [":libauthfs_crypto_bindgen"],
- crate_name: "authfs_crypto_bindgen_test",
- test_suites: ["general-tests"],
- auto_gen_config: true,
- clippy_lints: "none",
- lints: "none",
-}
diff --git a/authfs/src/crypto.hpp b/authfs/src/crypto.hpp
deleted file mode 100644
index 58b0bd3..0000000
--- a/authfs/src/crypto.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AUTHFS_OPENSSL_WRAPPER_H
-#define AUTHFS_OPENSSL_WRAPPER_H
-
-#include <openssl/sha.h>
-
-#endif // AUTHFS_OPENSSL_WRAPPER_H
diff --git a/authfs/src/crypto.rs b/authfs/src/crypto.rs
deleted file mode 100644
index 672dfb6..0000000
--- a/authfs/src/crypto.rs
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use std::mem::MaybeUninit;
-
-use thiserror::Error;
-
-#[derive(Error, Debug)]
-pub enum CryptoError {
- #[error("Unexpected error returned from {0}")]
- Unexpected(&'static str),
-}
-
-use authfs_crypto_bindgen::{SHA256_Final, SHA256_Init, SHA256_Update, SHA256_CTX};
-
-pub type Sha256Hash = [u8; Sha256Hasher::HASH_SIZE];
-
-pub struct Sha256Hasher {
- ctx: SHA256_CTX,
-}
-
-impl Sha256Hasher {
- pub const HASH_SIZE: usize = 32;
-
- pub const HASH_OF_4096_ZEROS: [u8; Self::HASH_SIZE] = [
- 0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9, 0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b,
- 0x02, 0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a, 0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89,
- 0x2c, 0xa7,
- ];
-
- pub fn new() -> Result<Sha256Hasher, CryptoError> {
- // Safe assuming the crypto FFI should initialize the uninitialized `ctx`, which is
- // currently a pure data struct.
- unsafe {
- let mut ctx = MaybeUninit::uninit();
- if SHA256_Init(ctx.as_mut_ptr()) == 0 {
- Err(CryptoError::Unexpected("SHA256_Init"))
- } else {
- Ok(Sha256Hasher { ctx: ctx.assume_init() })
- }
- }
- }
-
- pub fn update(&mut self, data: &[u8]) -> Result<&mut Self, CryptoError> {
- // Safe assuming the crypto FFI will not touch beyond `ctx` as pure data.
- let retval = unsafe {
- SHA256_Update(&mut self.ctx, data.as_ptr() as *mut std::ffi::c_void, data.len())
- };
- if retval == 0 {
- Err(CryptoError::Unexpected("SHA256_Update"))
- } else {
- Ok(self)
- }
- }
-
- pub fn update_from<I, T>(&mut self, iter: I) -> Result<&mut Self, CryptoError>
- where
- I: IntoIterator<Item = T>,
- T: AsRef<[u8]>,
- {
- for data in iter {
- self.update(data.as_ref())?;
- }
- Ok(self)
- }
-
- pub fn finalize(&mut self) -> Result<[u8; Self::HASH_SIZE], CryptoError> {
- let mut md = [0u8; Self::HASH_SIZE];
- // Safe assuming the crypto FFI will not touch beyond `ctx` as pure data.
- let retval = unsafe { SHA256_Final(md.as_mut_ptr(), &mut self.ctx) };
- if retval == 0 {
- Err(CryptoError::Unexpected("SHA256_Final"))
- } else {
- Ok(md)
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- fn to_hex_string(data: &[u8]) -> String {
- data.iter().map(|&b| format!("{:02x}", b)).collect()
- }
-
- #[test]
- fn verify_hash_values() -> Result<(), CryptoError> {
- let hash = Sha256Hasher::new()?.update(&[0; 0])?.finalize()?;
- let s: String = to_hex_string(&hash);
- assert_eq!(s, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855");
-
- let hash = Sha256Hasher::new()?
- .update(&[1u8; 1])?
- .update(&[2u8; 1])?
- .update(&[3u8; 1])?
- .finalize()?;
- let s: String = to_hex_string(&hash);
- assert_eq!(s, "039058c6f2c0cb492c533b0a4d14ef77cc0f78abccced5287d84a1a2011cfb81");
- Ok(())
- }
-
- #[test]
- fn sha256_of_4096_zeros() -> Result<(), CryptoError> {
- let hash = Sha256Hasher::new()?.update(&[0u8; 4096])?.finalize()?;
- assert_eq!(hash, Sha256Hasher::HASH_OF_4096_ZEROS);
- Ok(())
- }
-}
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
index fda47bc..8585fdf 100644
--- a/authfs/src/fsverity/builder.rs
+++ b/authfs/src/fsverity/builder.rs
@@ -14,13 +14,20 @@
* limitations under the License.
*/
-use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
+use super::common::{
+ build_fsverity_digest, merkle_tree_height, FsverityError, Sha256Hash, SHA256_HASH_SIZE,
+};
use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+use openssl::sha::Sha256;
-const HASH_SIZE: usize = Sha256Hasher::HASH_SIZE;
+const HASH_SIZE: usize = SHA256_HASH_SIZE;
const HASH_PER_PAGE: usize = CHUNK_SIZE as usize / HASH_SIZE;
+const HASH_OF_4096_ZEROS: Sha256Hash = [
+ 0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9, 0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
+ 0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a, 0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7,
+];
+
/// MerkleLeaves can be used by the class' customer for bookkeeping integrity data for their bytes.
/// It can also be used to generate the standard fs-verity digest for the source data.
///
@@ -34,12 +41,17 @@
file_size: u64,
}
-fn hash_all_pages(source: &[Sha256Hash]) -> Result<Vec<Sha256Hash>, CryptoError> {
+fn hash_all_pages(source: &[Sha256Hash]) -> Vec<Sha256Hash> {
source
.chunks(HASH_PER_PAGE)
.map(|chunk| {
let padding_bytes = (HASH_PER_PAGE - chunk.len()) * HASH_SIZE;
- Sha256Hasher::new()?.update_from(chunk)?.update(&vec![0u8; padding_bytes])?.finalize()
+ let mut ctx = Sha256::new();
+ for data in chunk {
+ ctx.update(data.as_ref());
+ }
+ ctx.update(&vec![0u8; padding_bytes]);
+ ctx.finish()
})
.collect()
}
@@ -64,7 +76,7 @@
pub fn resize(&mut self, new_file_size: usize) {
let new_file_size = new_file_size as u64;
let leaves_size = divide_roundup(new_file_size, CHUNK_SIZE);
- self.leaves.resize(leaves_size as usize, Sha256Hasher::HASH_OF_4096_ZEROS);
+ self.leaves.resize(leaves_size as usize, HASH_OF_4096_ZEROS);
self.file_size = new_file_size;
}
@@ -75,7 +87,7 @@
if self.leaves.len() < index + 1 {
// When resizing, fill in hash of zeros by default. This makes it easy to handle holes
// in a file.
- self.leaves.resize(index + 1, Sha256Hasher::HASH_OF_4096_ZEROS);
+ self.leaves.resize(index + 1, HASH_OF_4096_ZEROS);
}
self.leaves[index].clone_from_slice(hash);
@@ -116,9 +128,8 @@
// `leaves` is owned and can't be the initial state below. Here we manually hash it
// first to avoid a copy and to get the type right.
- let second_level = hash_all_pages(&self.leaves)?;
- let hashes =
- (1..=level).try_fold(second_level, |source, _| hash_all_pages(&source))?;
+ let second_level = hash_all_pages(&self.leaves);
+ let hashes = (1..=level).fold(second_level, |source, _| hash_all_pages(&source));
if hashes.len() != 1 {
Err(FsverityError::InvalidState)
} else {
@@ -131,7 +142,7 @@
/// Returns the fs-verity digest based on the current tree and file size.
pub fn calculate_fsverity_digest(&self) -> Result<Sha256Hash, FsverityError> {
let root_hash = self.calculate_root_hash()?;
- Ok(build_fsverity_digest(&root_hash, self.file_size)?)
+ Ok(build_fsverity_digest(&root_hash, self.file_size))
}
}
@@ -143,6 +154,7 @@
// $ fsverity digest foo
use super::*;
use anyhow::Result;
+ use openssl::sha::sha256;
#[test]
fn merkle_tree_empty_file() -> Result<()> {
@@ -194,7 +206,7 @@
#[test]
fn merkle_tree_non_sequential() -> Result<()> {
let mut tree = MerkleLeaves::new();
- let hash = Sha256Hasher::new()?.update(&vec![1u8; CHUNK_SIZE as usize])?.finalize()?;
+ let hash = sha256(&vec![1u8; CHUNK_SIZE as usize]);
// Update hashes of 4 1-blocks.
tree.update_hash(1, &hash, CHUNK_SIZE * 2);
@@ -221,8 +233,8 @@
assert!(tree.is_index_valid(1));
assert!(tree.is_index_valid(2));
assert!(!tree.is_index_valid(3));
- assert!(tree.is_consistent(1, &Sha256Hasher::HASH_OF_4096_ZEROS));
- assert!(tree.is_consistent(2, &Sha256Hasher::HASH_OF_4096_ZEROS));
+ assert!(tree.is_consistent(1, &HASH_OF_4096_ZEROS));
+ assert!(tree.is_consistent(2, &HASH_OF_4096_ZEROS));
Ok(())
}
@@ -240,17 +252,17 @@
assert!(!tree.is_index_valid(2));
// The second chunk is a hole and full of zero. When shrunk, with zero padding, the hash
// happens to be consistent to a full-zero chunk.
- assert!(tree.is_consistent(1, &Sha256Hasher::HASH_OF_4096_ZEROS));
+ assert!(tree.is_consistent(1, &HASH_OF_4096_ZEROS));
Ok(())
}
fn generate_fsverity_digest_sequentially(test_data: &[u8]) -> Result<Sha256Hash> {
let mut tree = MerkleLeaves::new();
for (index, chunk) in test_data.chunks(CHUNK_SIZE as usize).enumerate() {
- let hash = Sha256Hasher::new()?
- .update(chunk)?
- .update(&vec![0u8; CHUNK_SIZE as usize - chunk.len()])?
- .finalize()?;
+ let mut ctx = Sha256::new();
+ ctx.update(chunk);
+ ctx.update(&vec![0u8; CHUNK_SIZE as usize - chunk.len()]);
+ let hash = ctx.finish();
tree.update_hash(index, &hash, CHUNK_SIZE * index as u64 + chunk.len() as u64);
}
diff --git a/authfs/src/fsverity/common.rs b/authfs/src/fsverity/common.rs
index eba379d..cb268ef 100644
--- a/authfs/src/fsverity/common.rs
+++ b/authfs/src/fsverity/common.rs
@@ -20,7 +20,13 @@
use super::sys::{FS_VERITY_HASH_ALG_SHA256, FS_VERITY_LOG_BLOCKSIZE, FS_VERITY_VERSION};
use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+use openssl::sha::Sha256;
+
+/// Output size of SHA-256 in bytes.
+pub const SHA256_HASH_SIZE: usize = 32;
+
+/// A SHA-256 hash.
+pub type Sha256Hash = [u8; SHA256_HASH_SIZE];
#[derive(Error, Debug)]
pub enum FsverityError {
@@ -32,8 +38,6 @@
CannotVerify,
#[error("I/O error")]
Io(#[from] io::Error),
- #[error("Crypto")]
- UnexpectedCryptoError(#[from] CryptoError),
#[error("Invalid state")]
InvalidState,
}
@@ -47,7 +51,7 @@
/// Return the Merkle tree height for our tree configuration, or None if the size is 0.
pub fn merkle_tree_height(data_size: u64) -> Option<u64> {
- let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+ let hashes_per_node = CHUNK_SIZE / SHA256_HASH_SIZE as u64;
let hash_pages = divide_roundup(data_size, hashes_per_node * CHUNK_SIZE);
log128_ceil(hash_pages)
}
@@ -56,7 +60,7 @@
pub fn merkle_tree_size(mut data_size: u64) -> u64 {
let mut total = 0;
while data_size > CHUNK_SIZE {
- let hash_size = divide_roundup(data_size, CHUNK_SIZE) * Sha256Hasher::HASH_SIZE as u64;
+ let hash_size = divide_roundup(data_size, CHUNK_SIZE) * SHA256_HASH_SIZE as u64;
let hash_storage_size = divide_roundup(hash_size, CHUNK_SIZE) * CHUNK_SIZE;
total += hash_storage_size;
data_size = hash_storage_size;
@@ -64,28 +68,25 @@
total
}
-pub fn build_fsverity_digest(
- root_hash: &Sha256Hash,
- file_size: u64,
-) -> Result<Sha256Hash, CryptoError> {
+pub fn build_fsverity_digest(root_hash: &Sha256Hash, file_size: u64) -> Sha256Hash {
// Little-endian byte representation of fsverity_descriptor from linux/fsverity.h
// Not FFI-ed as it seems easier to deal with the raw bytes manually.
- Sha256Hasher::new()?
- .update(&FS_VERITY_VERSION.to_le_bytes())? // version
- .update(&FS_VERITY_HASH_ALG_SHA256.to_le_bytes())? // hash_algorithm
- .update(&FS_VERITY_LOG_BLOCKSIZE.to_le_bytes())? // log_blocksize
- .update(&0u8.to_le_bytes())? // salt_size
- .update(&0u32.to_le_bytes())? // sig_size
- .update(&file_size.to_le_bytes())? // data_size
- .update(root_hash)? // root_hash, first 32 bytes
- .update(&[0u8; 32])? // root_hash, last 32 bytes, always 0 because we are using sha256.
- .update(&[0u8; 32])? // salt
- .update(&[0u8; 32])? // reserved
- .update(&[0u8; 32])? // reserved
- .update(&[0u8; 32])? // reserved
- .update(&[0u8; 32])? // reserved
- .update(&[0u8; 16])? // reserved
- .finalize()
+ let mut hash = Sha256::new();
+ hash.update(&FS_VERITY_VERSION.to_le_bytes()); // version
+ hash.update(&FS_VERITY_HASH_ALG_SHA256.to_le_bytes()); // hash_algorithm
+ hash.update(&FS_VERITY_LOG_BLOCKSIZE.to_le_bytes()); // log_blocksize
+ hash.update(&0u8.to_le_bytes()); // salt_size
+ hash.update(&0u32.to_le_bytes()); // sig_size
+ hash.update(&file_size.to_le_bytes()); // data_size
+ hash.update(root_hash); // root_hash, first 32 bytes
+ hash.update(&[0u8; 32]); // root_hash, last 32 bytes, always 0 because we are using sha256.
+ hash.update(&[0u8; 32]); // salt
+ hash.update(&[0u8; 32]); // reserved
+ hash.update(&[0u8; 32]); // reserved
+ hash.update(&[0u8; 32]); // reserved
+ hash.update(&[0u8; 32]); // reserved
+ hash.update(&[0u8; 16]); // reserved
+ hash.finish()
}
#[cfg(test)]
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
index 857c6d9..1e298be 100644
--- a/authfs/src/fsverity/editor.rs
+++ b/authfs/src/fsverity/editor.rs
@@ -56,17 +56,10 @@
use std::sync::{Arc, RwLock};
use super::builder::MerkleLeaves;
+use super::common::{Sha256Hash, SHA256_HASH_SIZE};
use crate::common::{ChunkedSizeIter, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
use crate::file::{ChunkBuffer, RandomWrite, ReadByChunk};
-
-// Implement the conversion from `CryptoError` to `io::Error` just to avoid manual error type
-// mapping below.
-impl From<CryptoError> for io::Error {
- fn from(error: CryptoError) -> Self {
- io::Error::new(io::ErrorKind::Other, error)
- }
-}
+use openssl::sha::{sha256, Sha256};
fn debug_assert_usize_is_u64() {
// Since we don't need to support 32-bit CPU, make an assert to make conversion between
@@ -90,7 +83,7 @@
/// Returns the fs-verity digest size in bytes.
pub fn get_fsverity_digest_size(&self) -> usize {
- Sha256Hasher::HASH_SIZE
+ SHA256_HASH_SIZE
}
/// Calculates the fs-verity digest of the current file.
@@ -119,7 +112,7 @@
let size = self.read_backing_chunk_unverified(chunk_index, buf)?;
// Ensure the returned buffer matches the known hash.
- let hash = Sha256Hasher::new()?.update(buf)?.finalize()?;
+ let hash = sha256(buf);
if !merkle_tree_locked.is_consistent(chunk_index as usize, &hash) {
return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
}
@@ -147,17 +140,17 @@
self.read_backing_chunk_unverified(output_chunk_index as u64, &mut orig_data)?;
// Verify original content
- let hash = Sha256Hasher::new()?.update(&orig_data)?.finalize()?;
+ let hash = sha256(&orig_data);
if !merkle_tree.is_consistent(output_chunk_index, &hash) {
return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
}
}
- Ok(Sha256Hasher::new()?
- .update(&orig_data[..offset_from_alignment])?
- .update(source)?
- .update(&orig_data[offset_from_alignment + source.len()..])?
- .finalize()?)
+ let mut ctx = Sha256::new();
+ ctx.update(&orig_data[..offset_from_alignment]);
+ ctx.update(source);
+ ctx.update(&orig_data[offset_from_alignment + source.len()..]);
+ Ok(ctx.finish())
}
fn new_chunk_hash(
@@ -171,7 +164,7 @@
if current_size as u64 == CHUNK_SIZE {
// Case 1: If the chunk is a complete one, just calculate the hash, regardless of
// write location.
- Ok(Sha256Hasher::new()?.update(source)?.finalize()?)
+ Ok(sha256(source))
} else {
// Case 2: For an incomplete write, calculate the hash based on previous data (if
// any).
@@ -273,10 +266,10 @@
debug_assert!(new_tail_size <= s);
let zeros = vec![0; CHUNK_SIZE as usize - new_tail_size];
- let new_hash = Sha256Hasher::new()?
- .update(&buf[..new_tail_size])?
- .update(&zeros)?
- .finalize()?;
+ let mut ctx = Sha256::new();
+ ctx.update(&buf[..new_tail_size]);
+ ctx.update(&zeros);
+ let new_hash = ctx.finish();
merkle_tree.update_hash(chunk_index as usize, &new_hash, size);
}
}
@@ -519,7 +512,7 @@
// detects the inconsistent read.
{
let mut merkle_tree = file.merkle_tree.write().unwrap();
- let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
+ let overriding_hash = [42; SHA256_HASH_SIZE];
merkle_tree.update_hash(0, &overriding_hash, 8192);
}
assert!(file.write_at(&[1; 1], 2048).is_err());
@@ -532,7 +525,7 @@
// resumed write will fail since no bytes can be written due to the same inconsistency.
{
let mut merkle_tree = file.merkle_tree.write().unwrap();
- let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
+ let overriding_hash = [42; SHA256_HASH_SIZE];
merkle_tree.update_hash(1, &overriding_hash, 8192);
}
assert_eq!(file.write_at(&[10; 8000], 0)?, 4096);
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index aaf4bf7..1434b7e 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -17,18 +17,21 @@
use libc::EIO;
use std::io;
-use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
+use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError, SHA256_HASH_SIZE};
use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hasher};
use crate::file::{ChunkBuffer, ReadByChunk};
+use openssl::sha::{sha256, Sha256};
const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
-type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
+type HashBuffer = [u8; SHA256_HASH_SIZE];
-fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
+fn hash_with_padding(chunk: &[u8], pad_to: usize) -> HashBuffer {
let padding_size = pad_to - chunk.len();
- Sha256Hasher::new()?.update(chunk)?.update(&ZEROS[..padding_size])?.finalize()
+ let mut ctx = Sha256::new();
+ ctx.update(chunk);
+ ctx.update(&ZEROS[..padding_size]);
+ ctx.finish()
}
fn verity_check<T: ReadByChunk>(
@@ -42,7 +45,7 @@
// beyond the file size, including empty file.
assert_ne!(file_size, 0);
- let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize)?;
+ let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize);
// When the file is smaller or equal to CHUNK_SIZE, the root of Merkle tree is defined as the
// hash of the file content, plus padding.
@@ -55,11 +58,11 @@
|actual_hash, result| {
let (merkle_chunk, hash_offset_in_chunk) = result?;
let expected_hash =
- &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + Sha256Hasher::HASH_SIZE];
+ &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + SHA256_HASH_SIZE];
if actual_hash != expected_hash {
return Err(FsverityError::CannotVerify);
}
- Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize)?)
+ Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize))
},
)
}
@@ -74,7 +77,7 @@
file_size: u64,
merkle_tree: &T,
) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> {
- let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+ let hashes_per_node = CHUNK_SIZE / SHA256_HASH_SIZE as u64;
debug_assert_eq!(hashes_per_node, 128u64);
let max_level = merkle_tree_height(file_size).expect("file should not be empty") as u32;
let root_to_leaf_steps = (0..=max_level)
@@ -85,7 +88,7 @@
let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
let level_size = nodes_at_level * CHUNK_SIZE;
- let offset_in_level = (chunk_index / leaves_per_hash) * Sha256Hasher::HASH_SIZE as u64;
+ let offset_in_level = (chunk_index / leaves_per_hash) * SHA256_HASH_SIZE as u64;
(level_size, offset_in_level)
})
.scan(0, |level_offset, (level_size, offset_in_level)| {
@@ -135,8 +138,8 @@
return Err(FsverityError::InsufficientData(size));
}
}
- let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
- if expected_digest == build_fsverity_digest(&root_hash, file_size)? {
+ let root_hash = sha256(&buf[..]);
+ if expected_digest == build_fsverity_digest(&root_hash, file_size) {
// Once verified, use the root_hash for verification going forward.
Ok(VerifiedFileReader { chunked_file, file_size, merkle_tree, root_hash })
} else {
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 60318e8..c09ed71 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -42,7 +42,6 @@
use structopt::StructOpt;
mod common;
-mod crypto;
mod file;
mod fsstat;
mod fsverity;