Merge "Enable SELinux in ComposTestCase"
diff --git a/TEST_MAPPING b/TEST_MAPPING
index d2a4821..5218abb 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -20,6 +20,9 @@
"path": "packages/modules/Virtualization/apkdmverity"
},
{
+ "path": "packages/modules/Virtualization/apkverify"
+ },
+ {
"path": "packages/modules/Virtualization/authfs"
},
{
diff --git a/apkverify/Android.bp b/apkverify/Android.bp
index 8a98320..d2dbf41 100644
--- a/apkverify/Android.bp
+++ b/apkverify/Android.bp
@@ -2,8 +2,8 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
-rust_library {
- name: "libapkverify",
+rust_defaults {
+ name: "libapkverify.defaults",
host_supported: true,
crate_name: "apkverify",
srcs: ["src/lib.rs"],
@@ -19,3 +19,26 @@
"libzip",
],
}
+
+rust_library {
+ name: "libapkverify",
+ defaults: ["libapkverify.defaults"],
+}
+
+rust_test {
+ name: "libapkverify.test",
+ defaults: ["libapkverify.defaults"],
+ test_suites: ["general-tests"],
+}
+
+rust_test {
+ name: "libapkverify.integration_test",
+ host_supported: true,
+ crate_name: "apkverify_test",
+ srcs: ["tests/*_test.rs"],
+ prefer_rlib: true,
+ edition: "2018",
+ test_suites: ["general-tests"],
+ rustlibs: ["libapkverify"],
+ data: ["tests/data/*"],
+}
diff --git a/apkverify/TEST_MAPPING b/apkverify/TEST_MAPPING
new file mode 100644
index 0000000..9248716
--- /dev/null
+++ b/apkverify/TEST_MAPPING
@@ -0,0 +1,10 @@
+{
+ "presubmit" : [
+ {
+ "name" : "libapkverify.test"
+ },
+ {
+ "name" : "libapkverify.integration_test"
+ }
+ ]
+}
diff --git a/apkverify/src/bytes_ext.rs b/apkverify/src/bytes_ext.rs
index 5efb33c..1b8d6b6 100644
--- a/apkverify/src/bytes_ext.rs
+++ b/apkverify/src/bytes_ext.rs
@@ -95,3 +95,19 @@
}
Ok(buf.split_to(len))
}
+
+#[cfg(test)]
+mod tests {
+ use bytes::{BufMut, BytesMut};
+ #[test]
+ fn test_read_length_prefixed_slice() {
+ let data = b"hello world";
+ let mut b = BytesMut::new();
+ b.put_u32_le(data.len() as u32);
+ b.put_slice(data);
+ let mut slice = b.freeze();
+ let res = super::read_length_prefixed_slice(&mut slice);
+ assert!(res.is_ok());
+ assert_eq!(data, res.ok().unwrap().as_ref());
+ }
+}
diff --git a/apkverify/src/sigutil.rs b/apkverify/src/sigutil.rs
index 9de794a..06645fe 100644
--- a/apkverify/src/sigutil.rs
+++ b/apkverify/src/sigutil.rs
@@ -18,10 +18,12 @@
use anyhow::{anyhow, bail, Result};
use byteorder::{LittleEndian, ReadBytesExt};
-use bytes::{Buf, Bytes};
-use std::io::{Read, Seek, SeekFrom};
+use bytes::{Buf, BufMut, Bytes};
+use ring::digest;
+use std::cmp::min;
+use std::io::{Cursor, Read, Seek, SeekFrom, Take};
-use crate::ziputil::zip_sections;
+use crate::ziputil::{set_central_directory_offset, zip_sections};
const APK_SIG_BLOCK_MIN_SIZE: u32 = 32;
const APK_SIG_BLOCK_MAGIC: u128 = 0x3234206b636f6c4220676953204b5041;
@@ -45,27 +47,146 @@
#[allow(unused)]
const CONTENT_DIGEST_SHA256: u32 = 4;
-pub struct SignatureInfo {
- pub signature_block: Bytes,
+const CHUNK_SIZE_BYTES: u64 = 1024 * 1024;
+
+pub struct ApkSections<R> {
+ inner: R,
+ signing_block_offset: u32,
+ signing_block_size: u32,
+ central_directory_offset: u32,
+ central_directory_size: u32,
+ eocd_offset: u32,
+ eocd_size: u32,
}
-/// Returns the APK Signature Scheme block contained in the provided file for the given ID
-/// and the additional information relevant for verifying the block against the file.
-pub fn find_signature<F: Read + Seek>(f: F, block_id: u32) -> Result<SignatureInfo> {
- let (mut f, sections) = zip_sections(f)?;
+impl<R: Read + Seek> ApkSections<R> {
+ pub fn new(reader: R) -> Result<ApkSections<R>> {
+ let (mut f, zip_sections) = zip_sections(reader)?;
+ let (signing_block_offset, signing_block_size) =
+ find_signing_block(&mut f, zip_sections.central_directory_offset)?;
+ Ok(ApkSections {
+ inner: f,
+ signing_block_offset,
+ signing_block_size,
+ central_directory_offset: zip_sections.central_directory_offset,
+ central_directory_size: zip_sections.central_directory_size,
+ eocd_offset: zip_sections.eocd_offset,
+ eocd_size: zip_sections.eocd_size,
+ })
+ }
- let (signing_block, _signing_block_offset) =
- find_signing_block(&mut f, sections.central_directory_offset)?;
+ /// Returns the APK Signature Scheme block contained in the provided file for the given ID
+ /// and the additional information relevant for verifying the block against the file.
+ pub fn find_signature(&mut self, block_id: u32) -> Result<Bytes> {
+ let signing_block = self.bytes(self.signing_block_offset, self.signing_block_size)?;
+ // TODO(jooyung): propagate NotFound error so that verification can fallback to V2
+ find_signature_scheme_block(Bytes::from(signing_block), block_id)
+ }
- // TODO(jooyung): propagate NotFound error so that verification can fallback to V2
- let signature_scheme_block = find_signature_scheme_block(signing_block, block_id)?;
- Ok(SignatureInfo { signature_block: signature_scheme_block })
+ /// Computes digest with "signature algorithm" over APK contents, central directory, and EOCD.
+ /// 1. The digest of each chunk is computed over the concatenation of byte 0xa5, the chunk’s
+ /// length in bytes (little-endian uint32), and the chunk’s contents.
+ /// 2. The top-level digest is computed over the concatenation of byte 0x5a, the number of
+ /// chunks (little-endian uint32), and the concatenation of digests of the chunks in the
+ /// order the chunks appear in the APK.
+ /// (see https://source.android.com/security/apksigning/v2#integrity-protected-contents)
+ pub fn compute_digest(&mut self, signature_algorithm_id: u32) -> Result<Vec<u8>> {
+ let digester = Digester::new(signature_algorithm_id)?;
+
+ let mut digests_of_chunks = bytes::BytesMut::new();
+ let mut chunk_count = 0u32;
+ let mut chunk = vec![0u8; CHUNK_SIZE_BYTES as usize];
+ for data in &[
+ ApkSections::zip_entries,
+ ApkSections::central_directory,
+ ApkSections::eocd_for_verification,
+ ] {
+ let mut data = data(self)?;
+ while data.limit() > 0 {
+ let chunk_size = min(CHUNK_SIZE_BYTES, data.limit());
+ let mut slice = &mut chunk[..(chunk_size as usize)];
+ data.read_exact(&mut slice)?;
+ digests_of_chunks.put_slice(
+ digester.digest(slice, CHUNK_HEADER_MID, chunk_size as u32).as_ref(),
+ );
+ chunk_count += 1;
+ }
+ }
+ Ok(digester.digest(&digests_of_chunks, CHUNK_HEADER_TOP, chunk_count).as_ref().into())
+ }
+
+ fn zip_entries(&mut self) -> Result<Take<Box<dyn Read + '_>>> {
+ scoped_read(&mut self.inner, 0, self.signing_block_offset as u64)
+ }
+ fn central_directory(&mut self) -> Result<Take<Box<dyn Read + '_>>> {
+ scoped_read(
+ &mut self.inner,
+ self.central_directory_offset as u64,
+ self.central_directory_size as u64,
+ )
+ }
+ fn eocd_for_verification(&mut self) -> Result<Take<Box<dyn Read + '_>>> {
+ let mut eocd = self.bytes(self.eocd_offset, self.eocd_size)?;
+ // Protection of section 4 (ZIP End of Central Directory) is complicated by the section
+ // containing the offset of ZIP Central Directory. The offset changes when the size of the
+ // APK Signing Block changes, for instance, when a new signature is added. Thus, when
+ // computing digest over the ZIP End of Central Directory, the field containing the offset
+ // of ZIP Central Directory must be treated as containing the offset of the APK Signing
+ // Block.
+ set_central_directory_offset(&mut eocd, self.signing_block_offset)?;
+ Ok(Read::take(Box::new(Cursor::new(eocd)), self.eocd_size as u64))
+ }
+ fn bytes(&mut self, offset: u32, size: u32) -> Result<Vec<u8>> {
+ self.inner.seek(SeekFrom::Start(offset as u64))?;
+ let mut buf = vec![0u8; size as usize];
+ self.inner.read_exact(&mut buf)?;
+ Ok(buf)
+ }
+}
+
+fn scoped_read<'a, R: Read + Seek>(
+ src: &'a mut R,
+ offset: u64,
+ size: u64,
+) -> Result<Take<Box<dyn Read + 'a>>> {
+ src.seek(SeekFrom::Start(offset))?;
+ Ok(Read::take(Box::new(src), size))
+}
+
+struct Digester {
+ algorithm: &'static digest::Algorithm,
+}
+
+const CHUNK_HEADER_TOP: &[u8] = &[0x5a];
+const CHUNK_HEADER_MID: &[u8] = &[0xa5];
+impl Digester {
+ fn new(signature_algorithm_id: u32) -> Result<Digester> {
+ let digest_algorithm_id = to_content_digest_algorithm(signature_algorithm_id)?;
+ let algorithm = match digest_algorithm_id {
+ CONTENT_DIGEST_CHUNKED_SHA256 => &digest::SHA256,
+ CONTENT_DIGEST_CHUNKED_SHA512 => &digest::SHA512,
+ // TODO(jooyung): implement
+ CONTENT_DIGEST_VERITY_CHUNKED_SHA256 => {
+ bail!("TODO(b/190343842): CONTENT_DIGEST_VERITY_CHUNKED_SHA256: not implemented")
+ }
+ _ => bail!("Unknown digest algorithm: {}", digest_algorithm_id),
+ };
+ Ok(Digester { algorithm })
+ }
+ // v2/v3 digests are computed after prepending "header" byte and "size" info.
+ fn digest(&self, data: &[u8], header: &[u8], size: u32) -> digest::Digest {
+ let mut ctx = digest::Context::new(self.algorithm);
+ ctx.update(header);
+ ctx.update(&size.to_le_bytes());
+ ctx.update(data);
+ ctx.finish()
+ }
}
fn find_signing_block<T: Read + Seek>(
reader: &mut T,
central_directory_offset: u32,
-) -> Result<(Bytes, u32)> {
+) -> Result<(u32, u32)> {
// FORMAT:
// OFFSET DATA TYPE DESCRIPTION
// * @+0 bytes uint64: size in bytes (excluding this field)
@@ -96,10 +217,7 @@
size_in_footer
);
}
- reader.seek(SeekFrom::Start(signing_block_offset as u64))?;
- let mut buf = vec![0u8; total_size as usize];
- reader.read_exact(&mut buf)?;
- Ok((Bytes::from(buf), signing_block_offset))
+ Ok((signing_block_offset, total_size))
}
fn find_signature_scheme_block(buf: Bytes, block_id: u32) -> Result<Bytes> {
diff --git a/apkverify/src/v3.rs b/apkverify/src/v3.rs
index 91043ab..5ec3d07 100644
--- a/apkverify/src/v3.rs
+++ b/apkverify/src/v3.rs
@@ -19,11 +19,13 @@
// TODO(jooyung) remove this
#![allow(dead_code)]
-use anyhow::{anyhow, bail, Result};
+use anyhow::{anyhow, bail, Context, Result};
use bytes::Bytes;
use std::fs::File;
+use std::io::{Read, Seek};
use std::ops::Range;
use std::path::Path;
+use x509_parser::x509;
use crate::bytes_ext::{BytesExt, LengthPrefixed, ReadFromBytes};
use crate::sigutil::*;
@@ -85,16 +87,18 @@
/// associated with each signer.
pub fn verify<P: AsRef<Path>>(path: P) -> Result<()> {
let f = File::open(path.as_ref())?;
- let signature = find_signature(f, APK_SIGNATURE_SCHEME_V3_BLOCK_ID)?;
- verify_signature(&signature.signature_block)?;
+ let mut sections = ApkSections::new(f)?;
+ verify_signature(&mut sections)?;
Ok(())
}
/// Verifies the contents of the provided APK file against the provided APK Signature Scheme v3
/// Block.
-fn verify_signature(block: &Bytes) -> Result<()> {
+fn verify_signature<R: Read + Seek>(sections: &mut ApkSections<R>) -> Result<()> {
+ let mut block = sections.find_signature(APK_SIGNATURE_SCHEME_V3_BLOCK_ID)?;
+
// parse v3 scheme block
- let signers = block.slice(..).read::<Signers>()?;
+ let signers = block.read::<Signers>()?;
// find supported by platform
let mut supported =
@@ -106,13 +110,13 @@
}
// and it should be verified
- supported.pop().unwrap().verify()?;
+ supported.pop().unwrap().verify(sections)?;
Ok(())
}
impl Signer {
- fn verify(&self) -> Result<()> {
+ fn verify<R: Read + Seek>(&self, sections: &mut ApkSections<R>) -> Result<()> {
// 1. Choose the strongest supported signature algorithm ID from signatures. The strength
// ordering is up to each implementation/platform version.
let strongest: &Signature = self
@@ -124,7 +128,8 @@
// 2. Verify the corresponding signature from signatures against signed data using public key.
// (It is now safe to parse signed data.)
- verify_signed_data(&self.signed_data, strongest, &self.public_key)?;
+ let (_, key_info) = x509::SubjectPublicKeyInfo::from_der(self.public_key.as_ref())?;
+ verify_signed_data(&self.signed_data, strongest, &key_info)?;
// It is now safe to parse signed data.
let signed_data: SignedData = self.signed_data.slice(..).read()?;
@@ -134,10 +139,44 @@
if self.sdk_range() != signed_data.sdk_range() {
bail!("SDK versions mismatch between signed and unsigned in v3 signer block.");
}
- // TODO(jooyung) 4. Verify that the ordered list of signature algorithm IDs in digests and signatures is identical. (This is to prevent signature stripping/addition.)
- // TODO(jooyung) 5. Compute the digest of APK contents using the same digest algorithm as the digest algorithm used by the signature algorithm.
- // TODO(jooyung) 6. Verify that the computed digest is identical to the corresponding digest from digests.
- // TODO(jooyung) 7. Verify that SubjectPublicKeyInfo of the first certificate of certificates is identical to public key.
+
+ // 4. Verify that the ordered list of signature algorithm IDs in digests and signatures is
+ // identical. (This is to prevent signature stripping/addition.)
+ if !self
+ .signatures
+ .iter()
+ .map(|sig| sig.signature_algorithm_id)
+ .eq(signed_data.digests.iter().map(|dig| dig.signature_algorithm_id))
+ {
+ bail!("Signature algorithms don't match between digests and signatures records");
+ }
+
+ // 5. Compute the digest of APK contents using the same digest algorithm as the digest
+ // algorithm used by the signature algorithm.
+ let digest = signed_data
+ .digests
+ .iter()
+ .find(|&dig| dig.signature_algorithm_id == strongest.signature_algorithm_id)
+ .unwrap(); // ok to unwrap since we check if two lists are the same above
+ let computed = sections.compute_digest(digest.signature_algorithm_id)?;
+
+ // 6. Verify that the computed digest is identical to the corresponding digest from digests.
+ if computed != digest.digest.as_ref() {
+ bail!(
+ "Digest mismatch: computed={:?} vs expected={:?}",
+ to_hex_string(&computed),
+ to_hex_string(&digest.digest),
+ );
+ }
+
+ // 7. Verify that SubjectPublicKeyInfo of the first certificate of certificates is identical
+ // to public key.
+ let cert = signed_data.certificates.first().context("No certificates listed")?;
+ let (_, cert) = x509_parser::parse_x509_certificate(cert.as_ref())?;
+ if cert.tbs_certificate.subject_pki != key_info {
+ bail!("Public key mismatch between certificate and signature record");
+ }
+
// TODO(jooyung) 8. If the proof-of-rotation attribute exists for the signer verify that the struct is valid and this signer is the last certificate in the list.
Ok(())
}
@@ -146,10 +185,9 @@
fn verify_signed_data(
data: &Bytes,
signature: &Signature,
- public_key: &SubjectPublicKeyInfo,
+ key_info: &x509::SubjectPublicKeyInfo,
) -> Result<()> {
use ring::signature;
- let (_, key_info) = x509_parser::x509::SubjectPublicKeyInfo::from_der(public_key.as_ref())?;
let verification_alg: &dyn signature::VerificationAlgorithm =
match signature.signature_algorithm_id {
SIGNATURE_RSA_PSS_WITH_SHA256 => &signature::RSA_PSS_2048_8192_SHA256,
@@ -172,7 +210,7 @@
}
_ => bail!("Unsupported signature algorithm: {:#x}", signature.signature_algorithm_id),
};
- let key = signature::UnparsedPublicKey::new(verification_alg, key_info.subject_public_key.data);
+ let key = signature::UnparsedPublicKey::new(verification_alg, &key_info.subject_public_key);
key.verify(data.as_ref(), signature.signature.as_ref())?;
Ok(())
}
@@ -215,3 +253,8 @@
Ok(Self { signature_algorithm_id: buf.read()?, digest: buf.read()? })
}
}
+
+#[inline]
+fn to_hex_string(buf: &[u8]) -> String {
+ buf.iter().map(|b| format!("{:02X}", b)).collect()
+}
diff --git a/apkverify/src/ziputil.rs b/apkverify/src/ziputil.rs
index 28ecf87..dbf5131 100644
--- a/apkverify/src/ziputil.rs
+++ b/apkverify/src/ziputil.rs
@@ -17,7 +17,7 @@
//! Utilities for zip handling
use anyhow::{bail, Result};
-use bytes::Buf;
+use bytes::{Buf, BufMut};
use std::io::{Read, Seek, SeekFrom};
use zip::ZipArchive;
@@ -69,3 +69,12 @@
}
Ok((&buf[EOCD_CENTRAL_DIRECTORY_OFFSET_FIELD_OFFSET..]).get_u32_le())
}
+
+/// Update EOCD's central_directory_offset field.
+pub fn set_central_directory_offset(buf: &mut [u8], value: u32) -> Result<()> {
+ if buf.len() < EOCD_MIN_SIZE {
+ bail!("Invalid EOCD size: {}", buf.len());
+ }
+ (&mut buf[EOCD_CENTRAL_DIRECTORY_OFFSET_FIELD_OFFSET..]).put_u32_le(value);
+ Ok(())
+}
diff --git a/apkverify/tests/apkverify_test.rs b/apkverify/tests/apkverify_test.rs
index 03db61a..cad5ef2 100644
--- a/apkverify/tests/apkverify_test.rs
+++ b/apkverify/tests/apkverify_test.rs
@@ -16,7 +16,36 @@
use apkverify::verify;
+macro_rules! assert_contains {
+ ($haystack:expr,$needle:expr $(,)?) => {
+ match (&$haystack, &$needle) {
+ (haystack_value, needle_value) => {
+ assert!(
+ haystack_value.contains(needle_value),
+ "{} is not found in {}",
+ needle_value,
+ haystack_value
+ );
+ }
+ }
+ };
+}
+
#[test]
fn test_verify_v3() {
assert!(verify("tests/data/test.apex").is_ok());
}
+
+#[test]
+fn test_verify_v3_digest_mismatch() {
+ let res = verify("tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk");
+ assert!(res.is_err());
+ assert_contains!(res.err().unwrap().to_string(), "Digest mismatch");
+}
+
+#[test]
+fn test_verify_v3_cert_and_publick_key_mismatch() {
+ let res = verify("tests/data/v3-only-cert-and-public-key-mismatch.apk");
+ assert!(res.is_err());
+ assert_contains!(res.err().unwrap().to_string(), "Public key mismatch");
+}
diff --git a/apkverify/tests/data/README.md b/apkverify/tests/data/README.md
index df40af6..953ecdb 100644
--- a/apkverify/tests/data/README.md
+++ b/apkverify/tests/data/README.md
@@ -9,4 +9,8 @@
Verified using v4 scheme (APK Signature Scheme v4): false
Verified for SourceStamp: false
Number of signers: 1
-```
\ No newline at end of file
+```
+
+Some test APKs are copied from tools/apksig/src/test/resources/com/android/apksig/.
+- v3-only-cert-and-public-key-mismatch.apk
+- v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk
diff --git a/apkverify/tests/data/v3-only-cert-and-public-key-mismatch.apk b/apkverify/tests/data/v3-only-cert-and-public-key-mismatch.apk
new file mode 100644
index 0000000..2291e7e
--- /dev/null
+++ b/apkverify/tests/data/v3-only-cert-and-public-key-mismatch.apk
Binary files differ
diff --git a/apkverify/tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk b/apkverify/tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk
new file mode 100644
index 0000000..2800929
--- /dev/null
+++ b/apkverify/tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk
Binary files differ
diff --git a/idsig/Android.bp b/idsig/Android.bp
new file mode 100644
index 0000000..647915b
--- /dev/null
+++ b/idsig/Android.bp
@@ -0,0 +1,33 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+ name: "libidsig.defaults",
+ crate_name: "idsig",
+ srcs: ["src/lib.rs"],
+ edition: "2018",
+ prefer_rlib: true,
+ rustlibs: [
+ "libanyhow",
+ "libring",
+ ],
+ proc_macros: ["libnum_derive"],
+ multilib: {
+ lib32: {
+ enabled: false,
+ },
+ },
+}
+
+rust_library {
+ name: "libidsig",
+ defaults: ["libidsig.defaults"],
+}
+
+rust_test {
+ name: "libidsig.test",
+ defaults: ["libidsig.defaults"],
+ test_suites: ["general-tests"],
+ compile_multilib: "first",
+}
diff --git a/idsig/Cargo.toml b/idsig/Cargo.toml
new file mode 100644
index 0000000..10790a7
--- /dev/null
+++ b/idsig/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "idsig"
+version = "0.1.0"
+authors = ["Jiyong Park <jiyong@google.com>"]
+edition = "2018"
+
+[dependencies]
+ring = "0.16"
diff --git a/idsig/src/lib.rs b/idsig/src/lib.rs
new file mode 100644
index 0000000..97c6024
--- /dev/null
+++ b/idsig/src/lib.rs
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! `idsig` provides routines for creating the idsig file that is defined for the APK signature
+//! scheme v4 and for parsing the file.
+
+use ring::digest::{self, Algorithm};
+use std::io::{Cursor, Read, Result, Seek, SeekFrom, Write};
+
+/// `HashTree` is a merkle tree (and its root hash) that is compatible with fs-verity.
+pub struct HashTree {
+ /// Binary presentation of the merkle tree
+ pub tree: Vec<u8>,
+ /// Root hash
+ pub root_hash: Vec<u8>,
+}
+
+impl HashTree {
+ /// Creates merkle tree from `input`, using the given `salt` and hashing `algorithm`. `input`
+ /// is divided into `block_size` chunks.
+ pub fn from<R: Read>(
+ input: &mut R,
+ input_size: usize,
+ salt: &[u8],
+ block_size: usize,
+ algorithm: &'static Algorithm,
+ ) -> Result<Self> {
+ let salt = zero_pad_salt(salt, algorithm);
+ let tree = generate_hash_tree(input, input_size, &salt, block_size, algorithm)?;
+
+ // Root hash is from the first block of the hash or the input data if there is no hash tree
+ // generate which can happen when input data is smaller than block size
+ let root_hash = if tree.is_empty() {
+ hash_one_level(input, input_size, &salt, block_size, algorithm)?
+ } else {
+ let mut ctx = digest::Context::new(algorithm);
+ ctx.update(&salt);
+ ctx.update(&tree[0..block_size]);
+ ctx.finish().as_ref().to_vec()
+ };
+ Ok(HashTree { tree, root_hash })
+ }
+}
+
+/// Calculate hash tree for the blocks in `input`.
+///
+/// This function implements: https://www.kernel.org/doc/html/latest/filesystems/fsverity.html#merkle-tree
+///
+/// The file contents is divided into blocks, where the block size is configurable but is usually
+/// 4096 bytes. The end of the last block is zero-padded if needed. Each block is then hashed,
+/// producing the first level of hashes. Then, the hashes in this first level are grouped into
+/// blocksize-byte blocks (zero-padding the ends as needed) and these blocks are hashed,
+/// producing the second level of hashes. This proceeds up the tree until only a single block
+/// remains.
+fn generate_hash_tree<R: Read>(
+ input: &mut R,
+ input_size: usize,
+ salt: &[u8],
+ block_size: usize,
+ algorithm: &'static Algorithm,
+) -> Result<Vec<u8>> {
+ let digest_size = algorithm.output_len;
+ let (hash_level_offsets, tree_size) =
+ calc_hash_level_offsets(input_size, block_size, digest_size);
+
+ let mut hash_tree = Cursor::new(vec![0; tree_size]);
+ let mut input_size = input_size;
+ for (level, offset) in hash_level_offsets.iter().enumerate() {
+ let hashes = if level == 0 {
+ hash_one_level(input, input_size, salt, block_size, algorithm)?
+ } else {
+ // For the intermediate levels, input is the output from the previous level
+ hash_tree.seek(SeekFrom::Start(hash_level_offsets[level - 1] as u64)).unwrap();
+ hash_one_level(&mut hash_tree, input_size, salt, block_size, algorithm)?
+ };
+ hash_tree.seek(SeekFrom::Start(*offset as u64)).unwrap();
+ hash_tree.write_all(hashes.as_ref()).unwrap();
+ // Output from this level becomes input for the next level
+ input_size = hashes.len();
+ }
+ Ok(hash_tree.into_inner())
+}
+
+/// Calculate hashes for the blocks in `input`. The end of the last block is zero-padded if needed.
+/// Each block is then hashed, producing a stream of hashes for a level.
+fn hash_one_level<R: Read>(
+ input: &mut R,
+ input_size: usize,
+ salt: &[u8],
+ block_size: usize,
+ algorithm: &'static Algorithm,
+) -> Result<Vec<u8>> {
+ // Input is zero padded when it's not multiple of blocks. Note that `take()` is also needed to
+ // not read more than `input_size` from the `input` reader. This is required because `input`
+ // can be from the in-memory hashtree. We need to read only the part of hashtree that is for
+ // the current level.
+ let pad_size = round_to_multiple(input_size, block_size) - input_size;
+ let mut input = input.take(input_size as u64).chain(Cursor::new(vec![0; pad_size]));
+
+ // Read one block from input, write the hash of it to the output. Repeat that for all input
+ // blocks.
+ let mut hashes = Cursor::new(Vec::new());
+ let mut buf = vec![0; block_size];
+ let mut num_blocks = (input_size + block_size - 1) / block_size;
+ while num_blocks > 0 {
+ input.read_exact(&mut buf)?;
+ let mut ctx = digest::Context::new(algorithm);
+ ctx.update(salt);
+ ctx.update(&buf);
+ let hash = ctx.finish();
+ hashes.write_all(hash.as_ref())?;
+ num_blocks -= 1;
+ }
+ Ok(hashes.into_inner())
+}
+
+/// Calculate the size of hashes for each level, and also returns the total size of the hash tree.
+/// This function is needed because hash tree is stored upside down; hashes for level N is stored
+/// "after" hashes for level N + 1.
+fn calc_hash_level_offsets(
+ input_size: usize,
+ block_size: usize,
+ digest_size: usize,
+) -> (Vec<usize>, usize) {
+ // The input is split into multiple blocks and each block is hashed, which becomes the input
+ // for the next level. Size of a single hash is `digest_size`.
+ let mut level_sizes = Vec::new();
+ loop {
+ // Input for this level is from either the last level (if exists), or the input parameter.
+ let input_size = *level_sizes.last().unwrap_or(&input_size);
+ if input_size <= block_size {
+ break;
+ }
+ let num_blocks = (input_size + block_size - 1) / block_size;
+ let hashes_size = round_to_multiple(num_blocks * digest_size, block_size);
+ level_sizes.push(hashes_size);
+ }
+ if level_sizes.is_empty() {
+ return ([].to_vec(), 0);
+ }
+
+ // The hash tree is stored upside down. The top level is at offset 0. The second level comes
+ // next, and so on. Level 0 is located at the end.
+ //
+ // Given level_sizes [10, 3, 1], the offsets for each label are ...
+ //
+ // Level 2 is at offset 0
+ // Level 1 is at offset 1 (because Level 2 is of size 1)
+ // Level 0 is at offset 4 (because Level 1 is of size 3)
+ //
+ // This is done by accumulating the sizes in reverse order (i.e. from the highest level to the
+ // level 1 (not level 0)
+ let mut offsets = level_sizes.iter().rev().take(level_sizes.len() - 1).fold(
+ vec![0; 1], // offset for the top level
+ |mut offsets, size| {
+ offsets.push(offsets.last().unwrap() + size);
+ offsets
+ },
+ );
+ offsets.reverse(); // reverse the offsets again so that index N is for level N
+ let tree_size = level_sizes.iter().sum();
+ (offsets, tree_size)
+}
+
+/// Round `n` up to the nearest multiple of `unit`
+fn round_to_multiple(n: usize, unit: usize) -> usize {
+ (n + unit - 1) & !(unit - 1)
+}
+
+/// Pad zero to salt if necessary.
+///
+/// According to https://www.kernel.org/doc/html/latest/filesystems/fsverity.html:
+///
+/// If a salt was specified, then it’s zero-padded to the closest multiple of the input size of the
+/// hash algorithm’s compression function, e.g. 64 bytes for SHA-256 or 128 bytes for SHA-512. The
+/// padded salt is prepended to every data or Merkle tree block that is hashed.
+fn zero_pad_salt(salt: &[u8], algorithm: &Algorithm) -> Vec<u8> {
+ if salt.is_empty() {
+ salt.to_vec()
+ } else {
+ let padded_len = round_to_multiple(salt.len(), algorithm.block_len);
+ let mut salt = salt.to_vec();
+ salt.resize(padded_len, 0);
+ salt
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::*;
+ use ring::digest;
+ use std::fs::{self, File};
+
+ #[test]
+ fn compare_with_golden_output() -> Result<()> {
+ // The golden outputs are generated by using the `fsverity` utility.
+ let sizes = ["512", "4K", "1M", "10000000"];
+ for size in sizes.iter() {
+ let input_name = format!("testdata/input.{}", size);
+ let mut input = File::open(&input_name)?;
+ let golden_hash_tree = fs::read(format!("testdata/input.{}.hash", size))?;
+ let golden_descriptor = fs::read(format!("testdata/input.{}.descriptor", size))?;
+ let golden_root_hash = &golden_descriptor[16..16 + 32];
+
+ let size = std::fs::metadata(&input_name)?.len() as usize;
+ let salt = vec![1, 2, 3, 4, 5, 6];
+ let ht = HashTree::from(&mut input, size, &salt, 4096, &digest::SHA256)?;
+
+ assert_eq!(golden_hash_tree.as_slice(), ht.tree.as_slice());
+ assert_eq!(golden_root_hash, ht.root_hash.as_slice());
+ }
+ Ok(())
+ }
+}
diff --git a/idsig/testdata/create.sh b/idsig/testdata/create.sh
new file mode 100755
index 0000000..eadfdb2
--- /dev/null
+++ b/idsig/testdata/create.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+sizes="512 4K 1M 10000000"
+for size in $sizes; do
+ echo $size
+ dd if=/dev/random of=input.$size bs=$size count=1
+ fsverity digest input.$size \
+ --hash-alg=sha256 \
+ --salt=010203040506 \
+ --block-size=4096 \
+ --out-merkle-tree input.$size.hash \
+ --out-descriptor input.$size.descriptor
+done
diff --git a/idsig/testdata/input.10000000 b/idsig/testdata/input.10000000
new file mode 100644
index 0000000..6bc5a4b
--- /dev/null
+++ b/idsig/testdata/input.10000000
Binary files differ
diff --git a/idsig/testdata/input.10000000.descriptor b/idsig/testdata/input.10000000.descriptor
new file mode 100644
index 0000000..dc0d096
--- /dev/null
+++ b/idsig/testdata/input.10000000.descriptor
Binary files differ
diff --git a/idsig/testdata/input.10000000.hash b/idsig/testdata/input.10000000.hash
new file mode 100644
index 0000000..354c5c2
--- /dev/null
+++ b/idsig/testdata/input.10000000.hash
Binary files differ
diff --git a/idsig/testdata/input.1M b/idsig/testdata/input.1M
new file mode 100644
index 0000000..7040ec3
--- /dev/null
+++ b/idsig/testdata/input.1M
Binary files differ
diff --git a/idsig/testdata/input.1M.descriptor b/idsig/testdata/input.1M.descriptor
new file mode 100644
index 0000000..f11753d
--- /dev/null
+++ b/idsig/testdata/input.1M.descriptor
Binary files differ
diff --git a/idsig/testdata/input.1M.hash b/idsig/testdata/input.1M.hash
new file mode 100644
index 0000000..689790c
--- /dev/null
+++ b/idsig/testdata/input.1M.hash
Binary files differ
diff --git a/idsig/testdata/input.4K b/idsig/testdata/input.4K
new file mode 100644
index 0000000..99db32a
--- /dev/null
+++ b/idsig/testdata/input.4K
Binary files differ
diff --git a/idsig/testdata/input.4K.descriptor b/idsig/testdata/input.4K.descriptor
new file mode 100644
index 0000000..b120e2f
--- /dev/null
+++ b/idsig/testdata/input.4K.descriptor
Binary files differ
diff --git a/idsig/testdata/input.4K.hash b/idsig/testdata/input.4K.hash
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/idsig/testdata/input.4K.hash
diff --git a/idsig/testdata/input.512 b/idsig/testdata/input.512
new file mode 100644
index 0000000..a57797f
--- /dev/null
+++ b/idsig/testdata/input.512
Binary files differ
diff --git a/idsig/testdata/input.512.descriptor b/idsig/testdata/input.512.descriptor
new file mode 100644
index 0000000..805019b
--- /dev/null
+++ b/idsig/testdata/input.512.descriptor
Binary files differ
diff --git a/idsig/testdata/input.512.hash b/idsig/testdata/input.512.hash
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/idsig/testdata/input.512.hash
diff --git a/microdroid/keymint/MicrodroidKeymasterContext.cpp b/microdroid/keymint/MicrodroidKeymasterContext.cpp
index b5440f3..1d1346b 100644
--- a/microdroid/keymint/MicrodroidKeymasterContext.cpp
+++ b/microdroid/keymint/MicrodroidKeymasterContext.cpp
@@ -55,11 +55,14 @@
// doesn't pose a problem for the current applications but may be a
// candidate for hardening.
auto encrypted_key = EncryptKey(key_material, AES_GCM_WITH_SW_ENFORCED, *hw_enforced,
- *sw_enforced, hidden, root_key_, random_, &error);
- if (error != KM_ERROR_OK) return error;
+ *sw_enforced, hidden, SecureDeletionData{}, root_key_, random_);
+ if (!encrypted_key) return encrypted_key.error();
- *blob = SerializeAuthEncryptedBlob(encrypted_key, *hw_enforced, *sw_enforced, &error);
- return error;
+ auto serialized = SerializeAuthEncryptedBlob(*encrypted_key, *hw_enforced, *sw_enforced,
+ 0 /* key_slot */);
+ if (!serialized) return serialized.error();
+ *blob = *serialized;
+ return KM_ERROR_OK;
}
keymaster_error_t MicrodroidKeymasterContext::ParseKeyBlob(
@@ -71,21 +74,21 @@
error = BuildHiddenAuthorizations(additional_params, &hidden, microdroidSoftwareRootOfTrust);
if (error != KM_ERROR_OK) return error;
- auto deserialized_key = DeserializeAuthEncryptedBlob(blob, &error);
- if (error != KM_ERROR_OK) return error;
+ auto deserialized_key = DeserializeAuthEncryptedBlob(blob);
+ if (!deserialized_key) return deserialized_key.error();
keymaster_algorithm_t algorithm;
- if (!deserialized_key.sw_enforced.GetTagValue(TAG_ALGORITHM, &algorithm)) {
+ if (!deserialized_key->sw_enforced.GetTagValue(TAG_ALGORITHM, &algorithm)) {
return KM_ERROR_INVALID_ARGUMENT;
}
- auto key_material = DecryptKey(deserialized_key, hidden, root_key_, &error);
- if (error != KM_ERROR_OK) return error;
+ auto key_material = DecryptKey(*deserialized_key, hidden, SecureDeletionData{}, root_key_);
+ if (!key_material) return key_material.error();
auto factory = GetKeyFactory(algorithm);
- return factory->LoadKey(move(key_material), additional_params,
- move(deserialized_key.hw_enforced), move(deserialized_key.sw_enforced),
- key);
+ return factory->LoadKey(move(*key_material), additional_params,
+ move(deserialized_key->hw_enforced),
+ move(deserialized_key->sw_enforced), key);
}
static bool UpgradeIntegerTag(keymaster_tag_t tag, uint32_t value, AuthorizationSet* set) {
@@ -137,10 +140,13 @@
auto encrypted_key =
EncryptKey(key->key_material(), AES_GCM_WITH_SW_ENFORCED, key->hw_enforced(),
- key->sw_enforced(), hidden, root_key_, random_, &error);
- if (error != KM_ERROR_OK) return error;
+ key->sw_enforced(), hidden, SecureDeletionData{}, root_key_, random_);
+ if (!encrypted_key) return encrypted_key.error();
- *upgraded_key = SerializeAuthEncryptedBlob(encrypted_key, key->hw_enforced(),
- key->sw_enforced(), &error);
+ auto serialized = SerializeAuthEncryptedBlob(*encrypted_key, key->hw_enforced(),
+ key->sw_enforced(), 0 /* key_slot */);
+ if (!serialized) return serialized.error();
+
+ *upgraded_key = std::move(*serialized);
return error;
}
diff --git a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
index aa7c9ab..a7b855a 100644
--- a/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
+++ b/tests/hostside/java/android/virt/test/MicrodroidTestCase.java
@@ -44,6 +44,11 @@
/* debug */ false);
adbConnectToMicrodroid(getDevice(), cid);
+ // Wait until logd-init starts. The service is one of the last services that are started in
+ // the microdroid boot procedure. Therefore, waiting for the service means that we wait for
+ // the boot to complete. TODO: we need a better marker eventually.
+ tryRunOnMicrodroid("watch -e \"getprop init.svc.logd-reinit | grep '^$'\"");
+
// Test writing to /data partition
runOnMicrodroid("echo MicrodroidTest > /data/local/tmp/test.txt");
assertThat(runOnMicrodroid("cat /data/local/tmp/test.txt"), is("MicrodroidTest"));