Merge "Move helper function out of struct impl."
diff --git a/apex/Android.bp b/apex/Android.bp
index 0f30c67..4698088 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -4,9 +4,9 @@
microdroid_filesystem_images = [
"microdroid_super",
- "microdroid_boot-5.10",
+ "microdroid_boot",
"microdroid_init_boot",
- "microdroid_vendor_boot-5.10",
+ "microdroid_vendor_boot",
"microdroid_vbmeta",
"microdroid_vbmeta_bootconfig",
]
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
index 1c0714e..e314b71 100644
--- a/apex/sign_virt_apex.py
+++ b/apex/sign_virt_apex.py
@@ -361,8 +361,8 @@
virt_apex_files = {
'bootloader.pubkey': 'etc/microdroid_bootloader.avbpubkey',
'bootloader': 'etc/microdroid_bootloader',
- 'boot.img': 'etc/fs/microdroid_boot-5.10.img',
- 'vendor_boot.img': 'etc/fs/microdroid_vendor_boot-5.10.img',
+ 'boot.img': 'etc/fs/microdroid_boot.img',
+ 'vendor_boot.img': 'etc/fs/microdroid_vendor_boot.img',
'init_boot.img': 'etc/fs/microdroid_init_boot.img',
'super.img': 'etc/fs/microdroid_super.img',
'vbmeta.img': 'etc/fs/microdroid_vbmeta.img',
diff --git a/authfs/Android.bp b/authfs/Android.bp
index 935ed5c..84eb0f4 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -13,7 +13,6 @@
"authfs_aidl_interface-rust",
"libandroid_logger",
"libanyhow",
- "libauthfs_crypto_bindgen",
"libauthfs_fsverity_metadata",
"libbinder_rpc_unstable_bindgen",
"libbinder_rs",
@@ -23,6 +22,7 @@
"liblibc",
"liblog_rust",
"libnix",
+ "libopenssl",
"libprotobuf",
"libstructopt",
"libthiserror",
@@ -34,26 +34,11 @@
},
},
shared_libs: [
- "libcrypto",
"libbinder_rpc_unstable",
],
defaults: ["crosvm_defaults"],
}
-// TODO(b/172687320): remove once there is a canonical bindgen.
-rust_bindgen {
- name: "libauthfs_crypto_bindgen",
- wrapper_src: "src/crypto.hpp",
- crate_name: "authfs_crypto_bindgen",
- source_stem: "bindings",
- shared_libs: [
- "libcrypto",
- ],
- bindgen_flags: ["--size_t-is-usize"],
- cflags: ["-D BORINGSSL_NO_CXX"],
- apex_available: ["com.android.virt"],
-}
-
rust_binary {
name: "authfs",
defaults: ["authfs_defaults"],
@@ -80,13 +65,3 @@
"testdata/input.4m.fsv_meta.bad_merkle",
],
}
-
-rust_test {
- name: "libauthfs_crypto_bindgen_test",
- srcs: [":libauthfs_crypto_bindgen"],
- crate_name: "authfs_crypto_bindgen_test",
- test_suites: ["general-tests"],
- auto_gen_config: true,
- clippy_lints: "none",
- lints: "none",
-}
diff --git a/authfs/src/crypto.hpp b/authfs/src/crypto.hpp
deleted file mode 100644
index 58b0bd3..0000000
--- a/authfs/src/crypto.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AUTHFS_OPENSSL_WRAPPER_H
-#define AUTHFS_OPENSSL_WRAPPER_H
-
-#include <openssl/sha.h>
-
-#endif // AUTHFS_OPENSSL_WRAPPER_H
diff --git a/authfs/src/crypto.rs b/authfs/src/crypto.rs
deleted file mode 100644
index 672dfb6..0000000
--- a/authfs/src/crypto.rs
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use std::mem::MaybeUninit;
-
-use thiserror::Error;
-
-#[derive(Error, Debug)]
-pub enum CryptoError {
- #[error("Unexpected error returned from {0}")]
- Unexpected(&'static str),
-}
-
-use authfs_crypto_bindgen::{SHA256_Final, SHA256_Init, SHA256_Update, SHA256_CTX};
-
-pub type Sha256Hash = [u8; Sha256Hasher::HASH_SIZE];
-
-pub struct Sha256Hasher {
- ctx: SHA256_CTX,
-}
-
-impl Sha256Hasher {
- pub const HASH_SIZE: usize = 32;
-
- pub const HASH_OF_4096_ZEROS: [u8; Self::HASH_SIZE] = [
- 0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9, 0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b,
- 0x02, 0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a, 0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89,
- 0x2c, 0xa7,
- ];
-
- pub fn new() -> Result<Sha256Hasher, CryptoError> {
- // Safe assuming the crypto FFI should initialize the uninitialized `ctx`, which is
- // currently a pure data struct.
- unsafe {
- let mut ctx = MaybeUninit::uninit();
- if SHA256_Init(ctx.as_mut_ptr()) == 0 {
- Err(CryptoError::Unexpected("SHA256_Init"))
- } else {
- Ok(Sha256Hasher { ctx: ctx.assume_init() })
- }
- }
- }
-
- pub fn update(&mut self, data: &[u8]) -> Result<&mut Self, CryptoError> {
- // Safe assuming the crypto FFI will not touch beyond `ctx` as pure data.
- let retval = unsafe {
- SHA256_Update(&mut self.ctx, data.as_ptr() as *mut std::ffi::c_void, data.len())
- };
- if retval == 0 {
- Err(CryptoError::Unexpected("SHA256_Update"))
- } else {
- Ok(self)
- }
- }
-
- pub fn update_from<I, T>(&mut self, iter: I) -> Result<&mut Self, CryptoError>
- where
- I: IntoIterator<Item = T>,
- T: AsRef<[u8]>,
- {
- for data in iter {
- self.update(data.as_ref())?;
- }
- Ok(self)
- }
-
- pub fn finalize(&mut self) -> Result<[u8; Self::HASH_SIZE], CryptoError> {
- let mut md = [0u8; Self::HASH_SIZE];
- // Safe assuming the crypto FFI will not touch beyond `ctx` as pure data.
- let retval = unsafe { SHA256_Final(md.as_mut_ptr(), &mut self.ctx) };
- if retval == 0 {
- Err(CryptoError::Unexpected("SHA256_Final"))
- } else {
- Ok(md)
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- fn to_hex_string(data: &[u8]) -> String {
- data.iter().map(|&b| format!("{:02x}", b)).collect()
- }
-
- #[test]
- fn verify_hash_values() -> Result<(), CryptoError> {
- let hash = Sha256Hasher::new()?.update(&[0; 0])?.finalize()?;
- let s: String = to_hex_string(&hash);
- assert_eq!(s, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855");
-
- let hash = Sha256Hasher::new()?
- .update(&[1u8; 1])?
- .update(&[2u8; 1])?
- .update(&[3u8; 1])?
- .finalize()?;
- let s: String = to_hex_string(&hash);
- assert_eq!(s, "039058c6f2c0cb492c533b0a4d14ef77cc0f78abccced5287d84a1a2011cfb81");
- Ok(())
- }
-
- #[test]
- fn sha256_of_4096_zeros() -> Result<(), CryptoError> {
- let hash = Sha256Hasher::new()?.update(&[0u8; 4096])?.finalize()?;
- assert_eq!(hash, Sha256Hasher::HASH_OF_4096_ZEROS);
- Ok(())
- }
-}
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
index fda47bc..8585fdf 100644
--- a/authfs/src/fsverity/builder.rs
+++ b/authfs/src/fsverity/builder.rs
@@ -14,13 +14,20 @@
* limitations under the License.
*/
-use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
+use super::common::{
+ build_fsverity_digest, merkle_tree_height, FsverityError, Sha256Hash, SHA256_HASH_SIZE,
+};
use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+use openssl::sha::Sha256;
-const HASH_SIZE: usize = Sha256Hasher::HASH_SIZE;
+const HASH_SIZE: usize = SHA256_HASH_SIZE;
const HASH_PER_PAGE: usize = CHUNK_SIZE as usize / HASH_SIZE;
+const HASH_OF_4096_ZEROS: Sha256Hash = [
+ 0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9, 0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
+ 0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a, 0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7,
+];
+
/// MerkleLeaves can be used by the class' customer for bookkeeping integrity data for their bytes.
/// It can also be used to generate the standard fs-verity digest for the source data.
///
@@ -34,12 +41,17 @@
file_size: u64,
}
-fn hash_all_pages(source: &[Sha256Hash]) -> Result<Vec<Sha256Hash>, CryptoError> {
+fn hash_all_pages(source: &[Sha256Hash]) -> Vec<Sha256Hash> {
source
.chunks(HASH_PER_PAGE)
.map(|chunk| {
let padding_bytes = (HASH_PER_PAGE - chunk.len()) * HASH_SIZE;
- Sha256Hasher::new()?.update_from(chunk)?.update(&vec![0u8; padding_bytes])?.finalize()
+ let mut ctx = Sha256::new();
+ for data in chunk {
+ ctx.update(data.as_ref());
+ }
+ ctx.update(&vec![0u8; padding_bytes]);
+ ctx.finish()
})
.collect()
}
@@ -64,7 +76,7 @@
pub fn resize(&mut self, new_file_size: usize) {
let new_file_size = new_file_size as u64;
let leaves_size = divide_roundup(new_file_size, CHUNK_SIZE);
- self.leaves.resize(leaves_size as usize, Sha256Hasher::HASH_OF_4096_ZEROS);
+ self.leaves.resize(leaves_size as usize, HASH_OF_4096_ZEROS);
self.file_size = new_file_size;
}
@@ -75,7 +87,7 @@
if self.leaves.len() < index + 1 {
// When resizing, fill in hash of zeros by default. This makes it easy to handle holes
// in a file.
- self.leaves.resize(index + 1, Sha256Hasher::HASH_OF_4096_ZEROS);
+ self.leaves.resize(index + 1, HASH_OF_4096_ZEROS);
}
self.leaves[index].clone_from_slice(hash);
@@ -116,9 +128,8 @@
// `leaves` is owned and can't be the initial state below. Here we manually hash it
// first to avoid a copy and to get the type right.
- let second_level = hash_all_pages(&self.leaves)?;
- let hashes =
- (1..=level).try_fold(second_level, |source, _| hash_all_pages(&source))?;
+ let second_level = hash_all_pages(&self.leaves);
+ let hashes = (1..=level).fold(second_level, |source, _| hash_all_pages(&source));
if hashes.len() != 1 {
Err(FsverityError::InvalidState)
} else {
@@ -131,7 +142,7 @@
/// Returns the fs-verity digest based on the current tree and file size.
pub fn calculate_fsverity_digest(&self) -> Result<Sha256Hash, FsverityError> {
let root_hash = self.calculate_root_hash()?;
- Ok(build_fsverity_digest(&root_hash, self.file_size)?)
+ Ok(build_fsverity_digest(&root_hash, self.file_size))
}
}
@@ -143,6 +154,7 @@
// $ fsverity digest foo
use super::*;
use anyhow::Result;
+ use openssl::sha::sha256;
#[test]
fn merkle_tree_empty_file() -> Result<()> {
@@ -194,7 +206,7 @@
#[test]
fn merkle_tree_non_sequential() -> Result<()> {
let mut tree = MerkleLeaves::new();
- let hash = Sha256Hasher::new()?.update(&vec![1u8; CHUNK_SIZE as usize])?.finalize()?;
+ let hash = sha256(&vec![1u8; CHUNK_SIZE as usize]);
// Update hashes of 4 1-blocks.
tree.update_hash(1, &hash, CHUNK_SIZE * 2);
@@ -221,8 +233,8 @@
assert!(tree.is_index_valid(1));
assert!(tree.is_index_valid(2));
assert!(!tree.is_index_valid(3));
- assert!(tree.is_consistent(1, &Sha256Hasher::HASH_OF_4096_ZEROS));
- assert!(tree.is_consistent(2, &Sha256Hasher::HASH_OF_4096_ZEROS));
+ assert!(tree.is_consistent(1, &HASH_OF_4096_ZEROS));
+ assert!(tree.is_consistent(2, &HASH_OF_4096_ZEROS));
Ok(())
}
@@ -240,17 +252,17 @@
assert!(!tree.is_index_valid(2));
// The second chunk is a hole and full of zero. When shrunk, with zero padding, the hash
// happens to be consistent to a full-zero chunk.
- assert!(tree.is_consistent(1, &Sha256Hasher::HASH_OF_4096_ZEROS));
+ assert!(tree.is_consistent(1, &HASH_OF_4096_ZEROS));
Ok(())
}
fn generate_fsverity_digest_sequentially(test_data: &[u8]) -> Result<Sha256Hash> {
let mut tree = MerkleLeaves::new();
for (index, chunk) in test_data.chunks(CHUNK_SIZE as usize).enumerate() {
- let hash = Sha256Hasher::new()?
- .update(chunk)?
- .update(&vec![0u8; CHUNK_SIZE as usize - chunk.len()])?
- .finalize()?;
+ let mut ctx = Sha256::new();
+ ctx.update(chunk);
+ ctx.update(&vec![0u8; CHUNK_SIZE as usize - chunk.len()]);
+ let hash = ctx.finish();
tree.update_hash(index, &hash, CHUNK_SIZE * index as u64 + chunk.len() as u64);
}
diff --git a/authfs/src/fsverity/common.rs b/authfs/src/fsverity/common.rs
index eba379d..cb268ef 100644
--- a/authfs/src/fsverity/common.rs
+++ b/authfs/src/fsverity/common.rs
@@ -20,7 +20,13 @@
use super::sys::{FS_VERITY_HASH_ALG_SHA256, FS_VERITY_LOG_BLOCKSIZE, FS_VERITY_VERSION};
use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+use openssl::sha::Sha256;
+
+/// Output size of SHA-256 in bytes.
+pub const SHA256_HASH_SIZE: usize = 32;
+
+/// A SHA-256 hash.
+pub type Sha256Hash = [u8; SHA256_HASH_SIZE];
#[derive(Error, Debug)]
pub enum FsverityError {
@@ -32,8 +38,6 @@
CannotVerify,
#[error("I/O error")]
Io(#[from] io::Error),
- #[error("Crypto")]
- UnexpectedCryptoError(#[from] CryptoError),
#[error("Invalid state")]
InvalidState,
}
@@ -47,7 +51,7 @@
/// Return the Merkle tree height for our tree configuration, or None if the size is 0.
pub fn merkle_tree_height(data_size: u64) -> Option<u64> {
- let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+ let hashes_per_node = CHUNK_SIZE / SHA256_HASH_SIZE as u64;
let hash_pages = divide_roundup(data_size, hashes_per_node * CHUNK_SIZE);
log128_ceil(hash_pages)
}
@@ -56,7 +60,7 @@
pub fn merkle_tree_size(mut data_size: u64) -> u64 {
let mut total = 0;
while data_size > CHUNK_SIZE {
- let hash_size = divide_roundup(data_size, CHUNK_SIZE) * Sha256Hasher::HASH_SIZE as u64;
+ let hash_size = divide_roundup(data_size, CHUNK_SIZE) * SHA256_HASH_SIZE as u64;
let hash_storage_size = divide_roundup(hash_size, CHUNK_SIZE) * CHUNK_SIZE;
total += hash_storage_size;
data_size = hash_storage_size;
@@ -64,28 +68,25 @@
total
}
-pub fn build_fsverity_digest(
- root_hash: &Sha256Hash,
- file_size: u64,
-) -> Result<Sha256Hash, CryptoError> {
+pub fn build_fsverity_digest(root_hash: &Sha256Hash, file_size: u64) -> Sha256Hash {
// Little-endian byte representation of fsverity_descriptor from linux/fsverity.h
// Not FFI-ed as it seems easier to deal with the raw bytes manually.
- Sha256Hasher::new()?
- .update(&FS_VERITY_VERSION.to_le_bytes())? // version
- .update(&FS_VERITY_HASH_ALG_SHA256.to_le_bytes())? // hash_algorithm
- .update(&FS_VERITY_LOG_BLOCKSIZE.to_le_bytes())? // log_blocksize
- .update(&0u8.to_le_bytes())? // salt_size
- .update(&0u32.to_le_bytes())? // sig_size
- .update(&file_size.to_le_bytes())? // data_size
- .update(root_hash)? // root_hash, first 32 bytes
- .update(&[0u8; 32])? // root_hash, last 32 bytes, always 0 because we are using sha256.
- .update(&[0u8; 32])? // salt
- .update(&[0u8; 32])? // reserved
- .update(&[0u8; 32])? // reserved
- .update(&[0u8; 32])? // reserved
- .update(&[0u8; 32])? // reserved
- .update(&[0u8; 16])? // reserved
- .finalize()
+ let mut hash = Sha256::new();
+ hash.update(&FS_VERITY_VERSION.to_le_bytes()); // version
+ hash.update(&FS_VERITY_HASH_ALG_SHA256.to_le_bytes()); // hash_algorithm
+ hash.update(&FS_VERITY_LOG_BLOCKSIZE.to_le_bytes()); // log_blocksize
+ hash.update(&0u8.to_le_bytes()); // salt_size
+ hash.update(&0u32.to_le_bytes()); // sig_size
+ hash.update(&file_size.to_le_bytes()); // data_size
+ hash.update(root_hash); // root_hash, first 32 bytes
+ hash.update(&[0u8; 32]); // root_hash, last 32 bytes, always 0 because we are using sha256.
+ hash.update(&[0u8; 32]); // salt
+ hash.update(&[0u8; 32]); // reserved
+ hash.update(&[0u8; 32]); // reserved
+ hash.update(&[0u8; 32]); // reserved
+ hash.update(&[0u8; 32]); // reserved
+ hash.update(&[0u8; 16]); // reserved
+ hash.finish()
}
#[cfg(test)]
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
index 857c6d9..1e298be 100644
--- a/authfs/src/fsverity/editor.rs
+++ b/authfs/src/fsverity/editor.rs
@@ -56,17 +56,10 @@
use std::sync::{Arc, RwLock};
use super::builder::MerkleLeaves;
+use super::common::{Sha256Hash, SHA256_HASH_SIZE};
use crate::common::{ChunkedSizeIter, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
use crate::file::{ChunkBuffer, RandomWrite, ReadByChunk};
-
-// Implement the conversion from `CryptoError` to `io::Error` just to avoid manual error type
-// mapping below.
-impl From<CryptoError> for io::Error {
- fn from(error: CryptoError) -> Self {
- io::Error::new(io::ErrorKind::Other, error)
- }
-}
+use openssl::sha::{sha256, Sha256};
fn debug_assert_usize_is_u64() {
// Since we don't need to support 32-bit CPU, make an assert to make conversion between
@@ -90,7 +83,7 @@
/// Returns the fs-verity digest size in bytes.
pub fn get_fsverity_digest_size(&self) -> usize {
- Sha256Hasher::HASH_SIZE
+ SHA256_HASH_SIZE
}
/// Calculates the fs-verity digest of the current file.
@@ -119,7 +112,7 @@
let size = self.read_backing_chunk_unverified(chunk_index, buf)?;
// Ensure the returned buffer matches the known hash.
- let hash = Sha256Hasher::new()?.update(buf)?.finalize()?;
+ let hash = sha256(buf);
if !merkle_tree_locked.is_consistent(chunk_index as usize, &hash) {
return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
}
@@ -147,17 +140,17 @@
self.read_backing_chunk_unverified(output_chunk_index as u64, &mut orig_data)?;
// Verify original content
- let hash = Sha256Hasher::new()?.update(&orig_data)?.finalize()?;
+ let hash = sha256(&orig_data);
if !merkle_tree.is_consistent(output_chunk_index, &hash) {
return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
}
}
- Ok(Sha256Hasher::new()?
- .update(&orig_data[..offset_from_alignment])?
- .update(source)?
- .update(&orig_data[offset_from_alignment + source.len()..])?
- .finalize()?)
+ let mut ctx = Sha256::new();
+ ctx.update(&orig_data[..offset_from_alignment]);
+ ctx.update(source);
+ ctx.update(&orig_data[offset_from_alignment + source.len()..]);
+ Ok(ctx.finish())
}
fn new_chunk_hash(
@@ -171,7 +164,7 @@
if current_size as u64 == CHUNK_SIZE {
// Case 1: If the chunk is a complete one, just calculate the hash, regardless of
// write location.
- Ok(Sha256Hasher::new()?.update(source)?.finalize()?)
+ Ok(sha256(source))
} else {
// Case 2: For an incomplete write, calculate the hash based on previous data (if
// any).
@@ -273,10 +266,10 @@
debug_assert!(new_tail_size <= s);
let zeros = vec![0; CHUNK_SIZE as usize - new_tail_size];
- let new_hash = Sha256Hasher::new()?
- .update(&buf[..new_tail_size])?
- .update(&zeros)?
- .finalize()?;
+ let mut ctx = Sha256::new();
+ ctx.update(&buf[..new_tail_size]);
+ ctx.update(&zeros);
+ let new_hash = ctx.finish();
merkle_tree.update_hash(chunk_index as usize, &new_hash, size);
}
}
@@ -519,7 +512,7 @@
// detects the inconsistent read.
{
let mut merkle_tree = file.merkle_tree.write().unwrap();
- let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
+ let overriding_hash = [42; SHA256_HASH_SIZE];
merkle_tree.update_hash(0, &overriding_hash, 8192);
}
assert!(file.write_at(&[1; 1], 2048).is_err());
@@ -532,7 +525,7 @@
// resumed write will fail since no bytes can be written due to the same inconsistency.
{
let mut merkle_tree = file.merkle_tree.write().unwrap();
- let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
+ let overriding_hash = [42; SHA256_HASH_SIZE];
merkle_tree.update_hash(1, &overriding_hash, 8192);
}
assert_eq!(file.write_at(&[10; 8000], 0)?, 4096);
diff --git a/authfs/src/fsverity/metadata/Android.bp b/authfs/src/fsverity/metadata/Android.bp
index af3729f..c988884 100644
--- a/authfs/src/fsverity/metadata/Android.bp
+++ b/authfs/src/fsverity/metadata/Android.bp
@@ -18,7 +18,7 @@
],
rustlibs: [
"libauthfs_fsverity_metadata_bindgen",
- "libring",
+ "libopenssl",
],
edition: "2018",
apex_available: ["com.android.virt"],
diff --git a/authfs/src/fsverity/metadata/metadata.rs b/authfs/src/fsverity/metadata/metadata.rs
index 8bc0617..54d0145 100644
--- a/authfs/src/fsverity/metadata/metadata.rs
+++ b/authfs/src/fsverity/metadata/metadata.rs
@@ -20,7 +20,7 @@
FSVERITY_SIGNATURE_TYPE_NONE, FSVERITY_SIGNATURE_TYPE_PKCS7, FSVERITY_SIGNATURE_TYPE_RAW,
};
-use ring::digest::{Context, SHA256};
+use openssl::sha::sha256;
use std::cmp::min;
use std::ffi::OsString;
use std::fs::File;
@@ -96,14 +96,11 @@
// Digest needs to be calculated with the raw value (without changing the endianness).
let digest = match header.descriptor.hash_algorithm {
- FSVERITY_HASH_ALG_SHA256 => {
- let mut context = Context::new(&SHA256);
- context.update(
- &back_buffer
- [DESCRIPTOR_OFFSET..DESCRIPTOR_OFFSET + size_of::<fsverity_descriptor>()],
- );
- Ok(context.finish().as_ref().to_owned())
- }
+ FSVERITY_HASH_ALG_SHA256 => Ok(sha256(
+ &back_buffer
+ [DESCRIPTOR_OFFSET..DESCRIPTOR_OFFSET + size_of::<fsverity_descriptor>()],
+ )
+ .to_vec()),
alg => Err(io::Error::new(
io::ErrorKind::Other,
format!("Unsupported hash algorithm {}, continue (likely failing soon)", alg),
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index aaf4bf7..1434b7e 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -17,18 +17,21 @@
use libc::EIO;
use std::io;
-use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
+use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError, SHA256_HASH_SIZE};
use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hasher};
use crate::file::{ChunkBuffer, ReadByChunk};
+use openssl::sha::{sha256, Sha256};
const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
-type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
+type HashBuffer = [u8; SHA256_HASH_SIZE];
-fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
+fn hash_with_padding(chunk: &[u8], pad_to: usize) -> HashBuffer {
let padding_size = pad_to - chunk.len();
- Sha256Hasher::new()?.update(chunk)?.update(&ZEROS[..padding_size])?.finalize()
+ let mut ctx = Sha256::new();
+ ctx.update(chunk);
+ ctx.update(&ZEROS[..padding_size]);
+ ctx.finish()
}
fn verity_check<T: ReadByChunk>(
@@ -42,7 +45,7 @@
// beyond the file size, including empty file.
assert_ne!(file_size, 0);
- let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize)?;
+ let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize);
// When the file is smaller or equal to CHUNK_SIZE, the root of Merkle tree is defined as the
// hash of the file content, plus padding.
@@ -55,11 +58,11 @@
|actual_hash, result| {
let (merkle_chunk, hash_offset_in_chunk) = result?;
let expected_hash =
- &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + Sha256Hasher::HASH_SIZE];
+ &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + SHA256_HASH_SIZE];
if actual_hash != expected_hash {
return Err(FsverityError::CannotVerify);
}
- Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize)?)
+ Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize))
},
)
}
@@ -74,7 +77,7 @@
file_size: u64,
merkle_tree: &T,
) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> {
- let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+ let hashes_per_node = CHUNK_SIZE / SHA256_HASH_SIZE as u64;
debug_assert_eq!(hashes_per_node, 128u64);
let max_level = merkle_tree_height(file_size).expect("file should not be empty") as u32;
let root_to_leaf_steps = (0..=max_level)
@@ -85,7 +88,7 @@
let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
let level_size = nodes_at_level * CHUNK_SIZE;
- let offset_in_level = (chunk_index / leaves_per_hash) * Sha256Hasher::HASH_SIZE as u64;
+ let offset_in_level = (chunk_index / leaves_per_hash) * SHA256_HASH_SIZE as u64;
(level_size, offset_in_level)
})
.scan(0, |level_offset, (level_size, offset_in_level)| {
@@ -135,8 +138,8 @@
return Err(FsverityError::InsufficientData(size));
}
}
- let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
- if expected_digest == build_fsverity_digest(&root_hash, file_size)? {
+ let root_hash = sha256(&buf[..]);
+ if expected_digest == build_fsverity_digest(&root_hash, file_size) {
// Once verified, use the root_hash for verification going forward.
Ok(VerifiedFileReader { chunked_file, file_size, merkle_tree, root_hash })
} else {
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 60318e8..c09ed71 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -42,7 +42,6 @@
use structopt::StructOpt;
mod common;
-mod crypto;
mod file;
mod fsstat;
mod fsverity;
diff --git a/compos/tests/Android.bp b/compos/tests/Android.bp
index c178ddd..b77a7e4 100644
--- a/compos/tests/Android.bp
+++ b/compos/tests/Android.bp
@@ -10,6 +10,7 @@
"compatibility-tradefed",
"compatibility-host-util",
],
+ data_native_bins: ["bcc_validator"],
static_libs: [
"VirtualizationTestHelper",
],
diff --git a/compos/tests/java/android/compos/test/ComposTestCase.java b/compos/tests/java/android/compos/test/ComposTestCase.java
index eec9e39..51f0a1f 100644
--- a/compos/tests/java/android/compos/test/ComposTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposTestCase.java
@@ -16,17 +16,24 @@
package android.compos.test;
+import static android.virt.test.CommandResultSubject.assertThat;
+import static android.virt.test.CommandResultSubject.command_results;
+
import static com.android.tradefed.testtype.DeviceJUnit4ClassRunner.TestLogData;
import static com.google.common.truth.Truth.assertThat;
+import static com.google.common.truth.Truth.assertWithMessage;
import android.platform.test.annotations.RootPermissionTest;
import android.virt.test.CommandRunner;
import android.virt.test.VirtualizationTestCaseBase;
import com.android.tradefed.log.LogUtil.CLog;
+import com.android.tradefed.result.FileInputStreamSource;
+import com.android.tradefed.result.LogDataType;
import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
import com.android.tradefed.util.CommandResult;
+import com.android.tradefed.util.RunUtil;
import org.junit.After;
import org.junit.Before;
@@ -35,6 +42,8 @@
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
+import java.io.File;
+
@RootPermissionTest
@RunWith(DeviceJUnit4ClassRunner.class)
public final class ComposTestCase extends VirtualizationTestCaseBase {
@@ -127,7 +136,7 @@
long start = System.currentTimeMillis();
CommandResult result = runOdrefresh(android, "--force-compile");
long elapsed = System.currentTimeMillis() - start;
- assertThat(result.getExitCode()).isEqualTo(COMPILATION_SUCCESS);
+ assertThat(result).exitCode().isEqualTo(COMPILATION_SUCCESS);
CLog.i("Local compilation took " + elapsed + "ms");
}
@@ -137,12 +146,7 @@
// --check may delete the output.
CommandResult result = runOdrefresh(android, "--check");
- assertThat(result.getExitCode()).isEqualTo(OKAY);
-
- // Make sure we generate a fresh instance.
- android.tryRun("rm", "-rf", COMPOS_TEST_ROOT);
- // TODO: remove once composd starts to clean up the directory.
- android.tryRun("rm", "-rf", ODREFRESH_OUTPUT_DIR);
+ assertThat(result).exitCode().isEqualTo(OKAY);
// Expect the compilation in Compilation OS to finish successfully.
{
@@ -151,11 +155,14 @@
android.runForResultWithTimeout(
ODREFRESH_TIMEOUT_MS, COMPOSD_CMD_BIN, "test-compile");
long elapsed = System.currentTimeMillis() - start;
- assertThat(result.getExitCode()).isEqualTo(0);
+ assertThat(result).exitCode().isEqualTo(0);
CLog.i("Comp OS compilation took " + elapsed + "ms");
}
killVmAndReconnectAdb();
+ // Expect the BCC extracted from the BCC to be well-formed.
+ assertVmBccIsValid();
+
// Save the actual checksum for the output directory.
String actualChecksumSnapshot = checksumDirectoryContentPartial(android,
ODREFRESH_OUTPUT_DIR);
@@ -171,6 +178,24 @@
android.run(COMPOS_VERIFY_BIN + " --debug --instance test");
}
+ private void assertVmBccIsValid() throws Exception {
+ File bcc_file = getDevice().pullFile(COMPOS_APEXDATA_DIR + "/test/bcc");
+ assertThat(bcc_file).isNotNull();
+
+ // Add the BCC to test artifacts, in case it is ill-formed or otherwise interesting.
+ mTestLogs.addTestLog(bcc_file.getPath(), LogDataType.UNKNOWN,
+ new FileInputStreamSource(bcc_file));
+
+ // Find the validator binary - note that it's specified as a dependency in our Android.bp.
+ File validator = getTestInformation().getDependencyFile("bcc_validator", /*targetFirst=*/
+ false);
+
+ CommandResult result = new RunUtil().runTimedCmd(10000,
+ validator.getAbsolutePath(), "verify-chain", bcc_file.getAbsolutePath());
+ assertWithMessage("bcc_validator failed").about(command_results())
+ .that(result).isSuccess();
+ }
+
private CommandResult runOdrefresh(CommandRunner android, String command) throws Exception {
return android.runForResultWithTimeout(
ODREFRESH_TIMEOUT_MS,
diff --git a/docs/getting_started/index.md b/docs/getting_started/index.md
index be97ad5..7eae02d 100644
--- a/docs/getting_started/index.md
+++ b/docs/getting_started/index.md
@@ -102,7 +102,7 @@
```shell
mkdir android-kernel && cd android-kernel
-repo init -u https://android.googlesource.com/kernel/manifest -b common-android12-5.10
+repo init -u https://android.googlesource.com/kernel/manifest -b common-android13-5.15
repo sync
FAST_BUILD=1 DIST_DIR=out/dist BUILD_CONFIG=common/build.config.gki.aarch64 build/build.sh -j80
```
@@ -113,7 +113,7 @@
Then copy the built kernel to the Android source tree.
```
-cp out/dist/Image <android_root>/kernel/prebuilts/5.10/arm64/kernel-5.10
+cp out/dist/Image <android_root>/kernel/prebuilts/5.15/arm64/kernel-5.15
```
Finally rebuild the `com.android.virt` APEX and install it by following the
diff --git a/libs/idsig/Android.bp b/libs/idsig/Android.bp
index 3f70a64..2e9c663 100644
--- a/libs/idsig/Android.bp
+++ b/libs/idsig/Android.bp
@@ -11,8 +11,8 @@
rustlibs: [
"libanyhow",
"libbyteorder",
- "libring",
"libnum_traits",
+ "libopenssl",
],
proc_macros: ["libnum_derive"],
}
diff --git a/libs/idsig/src/apksigv4.rs b/libs/idsig/src/apksigv4.rs
index a5578d8..3004ed1 100644
--- a/libs/idsig/src/apksigv4.rs
+++ b/libs/idsig/src/apksigv4.rs
@@ -175,7 +175,7 @@
// Create hash tree (and root hash)
let algorithm = match algorithm {
- HashAlgorithm::SHA256 => &ring::digest::SHA256,
+ HashAlgorithm::SHA256 => openssl::hash::MessageDigest::sha256(),
};
let hash_tree = HashTree::from(&mut apk, size, salt, block_size, algorithm)?;
diff --git a/libs/idsig/src/hashtree.rs b/libs/idsig/src/hashtree.rs
index 63f83ea..038f839 100644
--- a/libs/idsig/src/hashtree.rs
+++ b/libs/idsig/src/hashtree.rs
@@ -14,9 +14,7 @@
* limitations under the License.
*/
-pub use ring::digest::{Algorithm, Digest};
-
-use ring::digest;
+use openssl::hash::{DigestBytes, Hasher, MessageDigest};
use std::io::{Cursor, Read, Result, Write};
/// `HashTree` is a merkle tree (and its root hash) that is compatible with fs-verity.
@@ -35,7 +33,7 @@
input_size: usize,
salt: &[u8],
block_size: usize,
- algorithm: &'static Algorithm,
+ algorithm: MessageDigest,
) -> Result<Self> {
let salt = zero_pad_salt(salt, algorithm);
let tree = generate_hash_tree(input, input_size, &salt, block_size, algorithm)?;
@@ -45,10 +43,10 @@
let root_hash = if tree.is_empty() {
let mut data = Vec::new();
input.read_to_end(&mut data)?;
- hash_one_block(&data, &salt, block_size, algorithm).as_ref().to_vec()
+ hash_one_block(&data, &salt, block_size, algorithm)?.as_ref().to_vec()
} else {
let first_block = &tree[0..block_size];
- hash_one_block(first_block, &salt, block_size, algorithm).as_ref().to_vec()
+ hash_one_block(first_block, &salt, block_size, algorithm)?.as_ref().to_vec()
};
Ok(HashTree { tree, root_hash })
}
@@ -69,9 +67,9 @@
input_size: usize,
salt: &[u8],
block_size: usize,
- algorithm: &'static Algorithm,
+ algorithm: MessageDigest,
) -> Result<Vec<u8>> {
- let digest_size = algorithm.output_len;
+ let digest_size = algorithm.size();
let levels = calc_hash_levels(input_size, block_size, digest_size);
let tree_size = levels.iter().map(|r| r.len()).sum();
@@ -89,7 +87,7 @@
let mut num_blocks = (input_size + block_size - 1) / block_size;
while num_blocks > 0 {
input.read_exact(&mut a_block)?;
- let h = hash_one_block(&a_block, salt, block_size, algorithm);
+ let h = hash_one_block(&a_block, salt, block_size, algorithm)?;
level0.write_all(h.as_ref()).unwrap();
num_blocks -= 1;
}
@@ -102,10 +100,10 @@
let cur_and_prev = &mut hash_tree[cur.start..prev.end];
let (cur, prev) = cur_and_prev.split_at_mut(prev.start - cur.start);
let mut cur = Cursor::new(cur);
- prev.chunks(block_size).for_each(|data| {
- let h = hash_one_block(data, salt, block_size, algorithm);
+ for data in prev.chunks(block_size) {
+ let h = hash_one_block(data, salt, block_size, algorithm)?;
cur.write_all(h.as_ref()).unwrap();
- });
+ }
}
}
Ok(hash_tree)
@@ -117,14 +115,14 @@
input: &[u8],
salt: &[u8],
block_size: usize,
- algorithm: &'static Algorithm,
-) -> Digest {
- let mut ctx = digest::Context::new(algorithm);
- ctx.update(salt);
- ctx.update(input);
+ algorithm: MessageDigest,
+) -> Result<DigestBytes> {
+ let mut ctx = Hasher::new(algorithm)?;
+ ctx.update(salt)?;
+ ctx.update(input)?;
let pad_size = block_size - input.len();
- ctx.update(&vec![0; pad_size]);
- ctx.finish()
+ ctx.update(&vec![0; pad_size])?;
+ Ok(ctx.finish()?)
}
type Range = std::ops::Range<usize>;
@@ -180,11 +178,11 @@
/// If a salt was specified, then it’s zero-padded to the closest multiple of the input size of the
/// hash algorithm’s compression function, e.g. 64 bytes for SHA-256 or 128 bytes for SHA-512. The
/// padded salt is prepended to every data or Merkle tree block that is hashed.
-fn zero_pad_salt(salt: &[u8], algorithm: &Algorithm) -> Vec<u8> {
+fn zero_pad_salt(salt: &[u8], algorithm: MessageDigest) -> Vec<u8> {
if salt.is_empty() {
salt.to_vec()
} else {
- let padded_len = round_to_multiple(salt.len(), algorithm.block_len);
+ let padded_len = round_to_multiple(salt.len(), algorithm.block_size());
let mut salt = salt.to_vec();
salt.resize(padded_len, 0);
salt
@@ -194,7 +192,7 @@
#[cfg(test)]
mod tests {
use super::*;
- use ring::digest;
+ use openssl::hash::MessageDigest;
use std::fs::{self, File};
#[test]
@@ -210,7 +208,7 @@
let size = std::fs::metadata(&input_name)?.len() as usize;
let salt = vec![1, 2, 3, 4, 5, 6];
- let ht = HashTree::from(&mut input, size, &salt, 4096, &digest::SHA256)?;
+ let ht = HashTree::from(&mut input, size, &salt, 4096, MessageDigest::sha256())?;
assert_eq!(golden_hash_tree.as_slice(), ht.tree.as_slice());
assert_eq!(golden_root_hash, ht.root_hash.as_slice());
diff --git a/microdroid/Android.bp b/microdroid/Android.bp
index 8702568..4b804b1 100644
--- a/microdroid/Android.bp
+++ b/microdroid/Android.bp
@@ -240,17 +240,17 @@
]
bootimg {
- name: "microdroid_boot-5.10",
+ name: "microdroid_boot",
// We don't have kernel for arm and x86. But Soong demands one when it builds for
// arm or x86 target. Satisfy that by providing an empty file as the kernel.
kernel_prebuilt: "empty_kernel",
arch: {
arm64: {
- kernel_prebuilt: ":kernel_prebuilts-5.10-arm64",
+ kernel_prebuilt: ":kernel_prebuilts-5.15-arm64",
cmdline: microdroid_boot_cmdline,
},
x86_64: {
- kernel_prebuilt: ":kernel_prebuilts-5.10-x86_64",
+ kernel_prebuilt: ":kernel_prebuilts-5.15-x86_64",
cmdline: microdroid_boot_cmdline + [
// console=none is to work around the x86 specific u-boot behavior which when
// console= option is not found in the kernel commandline console=ttyS0 is
@@ -272,7 +272,7 @@
bootimg {
name: "microdroid_init_boot",
- ramdisk_module: "microdroid_ramdisk-5.10",
+ ramdisk_module: "microdroid_ramdisk",
kernel_prebuilt: "empty_kernel",
header_version: "4",
partition_name: "init_boot",
@@ -281,7 +281,7 @@
}
android_filesystem {
- name: "microdroid_ramdisk-5.10",
+ name: "microdroid_ramdisk",
deps: [
"init_first_stage",
],
@@ -299,8 +299,8 @@
}
bootimg {
- name: "microdroid_vendor_boot-5.10",
- ramdisk_module: "microdroid_vendor_ramdisk-5.10",
+ name: "microdroid_vendor_boot",
+ ramdisk_module: "microdroid_vendor_ramdisk",
dtb_prebuilt: "dummy_dtb.img",
header_version: "4",
vendor_boot: true,
@@ -321,17 +321,17 @@
name: "microdroid_kernel_modules",
arch: {
arm64: {
- srcs: [":virt_device_prebuilts_kernel_modules_microdroid-5.10-arm64"],
+ srcs: [":virt_device_prebuilts_kernel_modules_microdroid-5.15-arm64"],
},
x86_64: {
- srcs: [":virt_device_prebuilts_kernel_modules_microdroid-5.10-x86_64"],
+ srcs: [":virt_device_prebuilts_kernel_modules_microdroid-5.15-x86_64"],
},
},
- kernel_version: "5.10",
+ kernel_version: "5.15",
}
android_filesystem {
- name: "microdroid_vendor_ramdisk-5.10",
+ name: "microdroid_vendor_ramdisk",
deps: [
"microdroid_fstab",
"microdroid_kernel_modules",
@@ -600,9 +600,9 @@
private_key: ":microdroid_sign_key",
partitions: [
"microdroid_vendor",
- "microdroid_vendor_boot-5.10",
+ "microdroid_vendor_boot",
"microdroid",
- "microdroid_boot-5.10",
+ "microdroid_boot",
"microdroid_init_boot",
],
}
diff --git a/microdroid/microdroid.json b/microdroid/microdroid.json
index aff0b7b..bf8d93e 100644
--- a/microdroid/microdroid.json
+++ b/microdroid/microdroid.json
@@ -5,7 +5,7 @@
"partitions": [
{
"label": "boot_a",
- "path": "/apex/com.android.virt/etc/fs/microdroid_boot-5.10.img"
+ "path": "/apex/com.android.virt/etc/fs/microdroid_boot.img"
},
{
"label": "init_boot_a",
@@ -13,7 +13,7 @@
},
{
"label": "vendor_boot_a",
- "path": "/apex/com.android.virt/etc/fs/microdroid_vendor_boot-5.10.img"
+ "path": "/apex/com.android.virt/etc/fs/microdroid_vendor_boot.img"
},
{
"label": "vbmeta_a",
diff --git a/tests/Android.bp b/tests/Android.bp
index 2c36a62..a06a33a 100644
--- a/tests/Android.bp
+++ b/tests/Android.bp
@@ -16,7 +16,7 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
-kernel_version = "5.10"
+kernel_version = "5.15"
kernel_stem = "kernel_prebuilts-" + kernel_version