Merge "Remove ndk_platform backend. Use the ndk backend."
diff --git a/keystore2/legacykeystore/lib.rs b/keystore2/legacykeystore/lib.rs
index efa0870..e57f159 100644
--- a/keystore2/legacykeystore/lib.rs
+++ b/keystore2/legacykeystore/lib.rs
@@ -526,7 +526,7 @@
use std::time::Duration;
use std::time::Instant;
- static TEST_ALIAS: &str = &"test_alias";
+ static TEST_ALIAS: &str = "test_alias";
static TEST_BLOB1: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 0];
static TEST_BLOB2: &[u8] = &[2, 2, 3, 4, 5, 6, 7, 8, 9, 0];
static TEST_BLOB3: &[u8] = &[3, 2, 3, 4, 5, 6, 7, 8, 9, 0];
@@ -694,9 +694,9 @@
}
let mut db = DB::new(&db_path3).expect("Failed to open database.");
- db.put(3, &TEST_ALIAS, TEST_BLOB3).expect("Failed to add entry (3).");
+ db.put(3, TEST_ALIAS, TEST_BLOB3).expect("Failed to add entry (3).");
- db.remove(3, &TEST_ALIAS).expect("Remove failed (3).");
+ db.remove(3, TEST_ALIAS).expect("Remove failed (3).");
}
});
@@ -710,7 +710,7 @@
let mut db = DB::new(&db_path).expect("Failed to open database.");
// This may return Some or None but it must not fail.
- db.get(3, &TEST_ALIAS).expect("Failed to get entry (4).");
+ db.get(3, TEST_ALIAS).expect("Failed to get entry (4).");
}
});
diff --git a/keystore2/selinux/src/lib.rs b/keystore2/selinux/src/lib.rs
index 5197cf6..cf6dfd3 100644
--- a/keystore2/selinux/src/lib.rs
+++ b/keystore2/selinux/src/lib.rs
@@ -130,7 +130,7 @@
fn deref(&self) -> &Self::Target {
match self {
Self::Raw(p) => unsafe { CStr::from_ptr(*p) },
- Self::CString(cstr) => &cstr,
+ Self::CString(cstr) => cstr,
}
}
}
diff --git a/keystore2/src/attestation_key_utils.rs b/keystore2/src/attestation_key_utils.rs
index ca00539..b6a8e31 100644
--- a/keystore2/src/attestation_key_utils.rs
+++ b/keystore2/src/attestation_key_utils.rs
@@ -60,7 +60,7 @@
let challenge_present = params.iter().any(|kp| kp.tag == Tag::ATTESTATION_CHALLENGE);
match attest_key_descriptor {
None if challenge_present => rem_prov_state
- .get_remotely_provisioned_attestation_key_and_certs(&key, caller_uid, params, db)
+ .get_remotely_provisioned_attestation_key_and_certs(key, caller_uid, params, db)
.context(concat!(
"In get_attest_key_and_cert_chain: ",
"Trying to get remotely provisioned attestation key."
@@ -71,7 +71,7 @@
})
}),
None => Ok(None),
- Some(attest_key) => get_user_generated_attestation_key(&attest_key, caller_uid, db)
+ Some(attest_key) => get_user_generated_attestation_key(attest_key, caller_uid, db)
.context("In get_attest_key_and_cert_chain: Trying to load attest key")
.map(Some),
}
@@ -83,7 +83,7 @@
db: &mut KeystoreDB,
) -> Result<AttestationKeyInfo> {
let (key_id_guard, blob, cert, blob_metadata) =
- load_attest_key_blob_and_cert(&key, caller_uid, db)
+ load_attest_key_blob_and_cert(key, caller_uid, db)
.context("In get_user_generated_attestation_key: Failed to load blob and cert")?;
let issuer_subject: Vec<u8> = parse_subject_from_certificate(&cert).context(
@@ -105,7 +105,7 @@
_ => {
let (key_id_guard, mut key_entry) = db
.load_key_entry(
- &key,
+ key,
KeyType::Client,
KeyEntryLoadBits::BOTH,
caller_uid,
diff --git a/keystore2/src/boot_level_keys.rs b/keystore2/src/boot_level_keys.rs
index 1110caf..08c52af 100644
--- a/keystore2/src/boot_level_keys.rs
+++ b/keystore2/src/boot_level_keys.rs
@@ -243,40 +243,40 @@
fn test_output_is_consistent() -> Result<()> {
let initial_key = b"initial key";
let mut blkc = BootLevelKeyCache::new(ZVec::try_from(initial_key as &[u8])?);
- assert_eq!(true, blkc.level_accessible(0));
- assert_eq!(true, blkc.level_accessible(9));
- assert_eq!(true, blkc.level_accessible(10));
- assert_eq!(true, blkc.level_accessible(100));
+ assert!(blkc.level_accessible(0));
+ assert!(blkc.level_accessible(9));
+ assert!(blkc.level_accessible(10));
+ assert!(blkc.level_accessible(100));
let v0 = blkc.aes_key(0).unwrap().unwrap();
let v10 = blkc.aes_key(10).unwrap().unwrap();
assert_eq!(Some(&v0), blkc.aes_key(0)?.as_ref());
assert_eq!(Some(&v10), blkc.aes_key(10)?.as_ref());
blkc.advance_boot_level(5)?;
- assert_eq!(false, blkc.level_accessible(0));
- assert_eq!(true, blkc.level_accessible(9));
- assert_eq!(true, blkc.level_accessible(10));
- assert_eq!(true, blkc.level_accessible(100));
+ assert!(!blkc.level_accessible(0));
+ assert!(blkc.level_accessible(9));
+ assert!(blkc.level_accessible(10));
+ assert!(blkc.level_accessible(100));
assert_eq!(None, blkc.aes_key(0)?);
assert_eq!(Some(&v10), blkc.aes_key(10)?.as_ref());
blkc.advance_boot_level(10)?;
- assert_eq!(false, blkc.level_accessible(0));
- assert_eq!(false, blkc.level_accessible(9));
- assert_eq!(true, blkc.level_accessible(10));
- assert_eq!(true, blkc.level_accessible(100));
+ assert!(!blkc.level_accessible(0));
+ assert!(!blkc.level_accessible(9));
+ assert!(blkc.level_accessible(10));
+ assert!(blkc.level_accessible(100));
assert_eq!(None, blkc.aes_key(0)?);
assert_eq!(Some(&v10), blkc.aes_key(10)?.as_ref());
blkc.advance_boot_level(0)?;
- assert_eq!(false, blkc.level_accessible(0));
- assert_eq!(false, blkc.level_accessible(9));
- assert_eq!(true, blkc.level_accessible(10));
- assert_eq!(true, blkc.level_accessible(100));
+ assert!(!blkc.level_accessible(0));
+ assert!(!blkc.level_accessible(9));
+ assert!(blkc.level_accessible(10));
+ assert!(blkc.level_accessible(100));
assert_eq!(None, blkc.aes_key(0)?);
assert_eq!(Some(v10), blkc.aes_key(10)?);
blkc.finish();
- assert_eq!(false, blkc.level_accessible(0));
- assert_eq!(false, blkc.level_accessible(9));
- assert_eq!(false, blkc.level_accessible(10));
- assert_eq!(false, blkc.level_accessible(100));
+ assert!(!blkc.level_accessible(0));
+ assert!(!blkc.level_accessible(9));
+ assert!(!blkc.level_accessible(10));
+ assert!(!blkc.level_accessible(100));
assert_eq!(None, blkc.aes_key(0)?);
assert_eq!(None, blkc.aes_key(10)?);
Ok(())
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index 0081bb7..ae2875c 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -142,7 +142,7 @@
let db_tag: i64 = row.get(0).context("Failed to read tag.")?;
metadata.insert(
db_tag,
- KeyMetaEntry::new_from_sql(db_tag, &SqlField::new(1, &row))
+ KeyMetaEntry::new_from_sql(db_tag, &SqlField::new(1, row))
.context("Failed to read KeyMetaEntry.")?,
);
Ok(())
@@ -217,7 +217,7 @@
let db_tag: i64 = row.get(0).context("Failed to read tag.")?;
metadata.insert(
db_tag,
- BlobMetaEntry::new_from_sql(db_tag, &SqlField::new(1, &row))
+ BlobMetaEntry::new_from_sql(db_tag, &SqlField::new(1, row))
.context("Failed to read BlobMetaEntry.")?,
);
Ok(())
@@ -388,12 +388,12 @@
}
/// Returns unix epoch time in milliseconds.
- pub fn to_millis_epoch(&self) -> i64 {
+ pub fn to_millis_epoch(self) -> i64 {
self.0
}
/// Returns unix epoch time in seconds.
- pub fn to_secs_epoch(&self) -> i64 {
+ pub fn to_secs_epoch(self) -> i64 {
self.0 / 1000
}
}
@@ -832,7 +832,7 @@
const UPGRADERS: &'static [fn(&Transaction) -> Result<u32>] = &[Self::from_0_to_1];
/// Name of the file that holds the cross-boot persistent database.
- pub const PERSISTENT_DB_FILENAME: &'static str = &"persistent.sqlite";
+ pub const PERSISTENT_DB_FILENAME: &'static str = "persistent.sqlite";
/// This will create a new database connection connecting the two
/// files persistent.sqlite and perboot.sqlite in the given directory.
@@ -842,7 +842,7 @@
pub fn new(db_root: &Path, gc: Option<Arc<Gc>>) -> Result<Self> {
let _wp = wd::watch_millis("KeystoreDB::new", 500);
- let persistent_path = Self::make_persistent_path(&db_root)?;
+ let persistent_path = Self::make_persistent_path(db_root)?;
let conn = Self::make_connection(&persistent_path)?;
let mut db = Self { conn, gc, perboot: perboot::PERBOOT_DB.clone() };
@@ -1244,7 +1244,7 @@
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let key_descriptor =
KeyDescriptor { domain, nspace, alias: Some(alias.to_string()), blob: None };
- let result = Self::load_key_entry_id(&tx, &key_descriptor, key_type);
+ let result = Self::load_key_entry_id(tx, &key_descriptor, key_type);
match result {
Ok(_) => Ok(true),
Err(error) => match error.root_cause().downcast_ref::<KsError>() {
@@ -1290,7 +1290,7 @@
key_metadata.store_in_db(key_id, tx).context("KeyMetaData::store_in_db failed")?;
Self::set_blob_internal(
- &tx,
+ tx,
key_id,
SubComponentType::KEY_BLOB,
Some(blob),
@@ -1320,10 +1320,10 @@
alias: Some(key_type.alias.into()),
blob: None,
};
- let id = Self::load_key_entry_id(&tx, &key_descriptor, KeyType::Super);
+ let id = Self::load_key_entry_id(tx, &key_descriptor, KeyType::Super);
match id {
Ok(id) => {
- let key_entry = Self::load_key_components(&tx, KeyEntryLoadBits::KM, id)
+ let key_entry = Self::load_key_components(tx, KeyEntryLoadBits::KM, id)
.context("In load_super_key. Failed to load key entry.")?;
Ok(Some((KEY_ID_LOCK.get(id), key_entry)))
}
@@ -1383,7 +1383,7 @@
let (id, entry) = match id {
Some(id) => (
id,
- Self::load_key_components(&tx, KeyEntryLoadBits::KM, id)
+ Self::load_key_components(tx, KeyEntryLoadBits::KM, id)
.context("In get_or_create_key_with.")?,
),
@@ -1409,7 +1409,7 @@
let (blob, metadata) =
create_new_key().context("In get_or_create_key_with.")?;
Self::set_blob_internal(
- &tx,
+ tx,
id,
SubComponentType::KEY_BLOB,
Some(&blob),
@@ -1560,7 +1560,7 @@
.context("In create_key_entry")?,
);
Self::set_blob_internal(
- &tx,
+ tx,
key_id.0,
SubComponentType::KEY_BLOB,
Some(private_key),
@@ -1569,7 +1569,7 @@
let mut metadata = KeyMetaData::new();
metadata.add(KeyMetaEntry::AttestationMacedPublicKey(maced_public_key.to_vec()));
metadata.add(KeyMetaEntry::AttestationRawPubKey(raw_public_key.to_vec()));
- metadata.store_in_db(key_id.0, &tx)?;
+ metadata.store_in_db(key_id.0, tx)?;
Ok(()).no_gc()
})
.context("In create_attestation_key_entry")
@@ -1592,7 +1592,7 @@
let _wp = wd::watch_millis("KeystoreDB::set_blob", 500);
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- Self::set_blob_internal(&tx, key_id.0, sc_type, blob, blob_metadata).need_gc()
+ Self::set_blob_internal(tx, key_id.0, sc_type, blob, blob_metadata).need_gc()
})
.context("In set_blob.")
}
@@ -1606,7 +1606,7 @@
self.with_transaction(TransactionBehavior::Immediate, |tx| {
Self::set_blob_internal(
- &tx,
+ tx,
Self::UNASSIGNED_KEY_ID,
SubComponentType::KEY_BLOB,
Some(blob),
@@ -1699,7 +1699,7 @@
#[cfg(test)]
fn insert_key_metadata(&mut self, key_id: &KeyIdGuard, metadata: &KeyMetaData) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- metadata.store_in_db(key_id.0, &tx).no_gc()
+ metadata.store_in_db(key_id.0, tx).no_gc()
})
.context("In insert_key_metadata.")
}
@@ -1761,16 +1761,16 @@
metadata.add(KeyMetaEntry::AttestationExpirationDate(DateTime::from_millis_epoch(
expiration_date,
)));
- metadata.store_in_db(key_id, &tx).context("Failed to insert key metadata.")?;
+ metadata.store_in_db(key_id, tx).context("Failed to insert key metadata.")?;
Self::set_blob_internal(
- &tx,
+ tx,
key_id,
SubComponentType::CERT_CHAIN,
Some(cert_chain),
None,
)
.context("Failed to insert cert chain")?;
- Self::set_blob_internal(&tx, key_id, SubComponentType::CERT, Some(batch_cert), None)
+ Self::set_blob_internal(tx, key_id, SubComponentType::CERT, Some(batch_cert), None)
.context("Failed to insert cert")?;
Ok(()).no_gc()
})
@@ -1914,7 +1914,7 @@
);
let mut num_deleted = 0;
for id in key_ids_to_check.iter().filter(|kt| kt.1 < curr_time).map(|kt| kt.0) {
- if Self::mark_unreferenced(&tx, id)? {
+ if Self::mark_unreferenced(tx, id)? {
num_deleted += 1;
}
}
@@ -1941,7 +1941,7 @@
.context("Failed to execute statement")?;
let num_deleted = keys_to_delete
.iter()
- .map(|id| Self::mark_unreferenced(&tx, *id))
+ .map(|id| Self::mark_unreferenced(tx, *id))
.collect::<Result<Vec<bool>>>()
.context("Failed to execute mark_unreferenced on a keyid")?
.into_iter()
@@ -2227,7 +2227,7 @@
/// fields, and rebinds the given alias to the new key.
/// The boolean returned is a hint for the garbage collector. If true, a key was replaced,
/// is now unreferenced and needs to be collected.
- #[allow(clippy::clippy::too_many_arguments)]
+ #[allow(clippy::too_many_arguments)]
pub fn store_new_key(
&mut self,
key: &KeyDescriptor,
@@ -2259,11 +2259,11 @@
key_id.id(),
SubComponentType::KEY_BLOB,
Some(blob),
- Some(&blob_metadata),
+ Some(blob_metadata),
)
.context("Trying to insert the key blob.")?;
if let Some(cert) = &cert_info.cert {
- Self::set_blob_internal(tx, key_id.id(), SubComponentType::CERT, Some(&cert), None)
+ Self::set_blob_internal(tx, key_id.id(), SubComponentType::CERT, Some(cert), None)
.context("Trying to insert the certificate.")?;
}
if let Some(cert_chain) = &cert_info.cert_chain {
@@ -2271,7 +2271,7 @@
tx,
key_id.id(),
SubComponentType::CERT_CHAIN,
- Some(&cert_chain),
+ Some(cert_chain),
None,
)
.context("Trying to insert the certificate chain.")?;
@@ -2279,7 +2279,7 @@
Self::insert_keyparameter_internal(tx, &key_id, params)
.context("Trying to insert key parameters.")?;
metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
- let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace, key_type)
+ let need_gc = Self::rebind_alias(tx, &key_id, alias, &domain, namespace, key_type)
.context("Trying to rebind alias.")?;
Ok(key_id).do_gc(need_gc)
})
@@ -2329,7 +2329,7 @@
metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
- let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace, key_type)
+ let need_gc = Self::rebind_alias(tx, &key_id, alias, &domain, namespace, key_type)
.context("Trying to rebind alias.")?;
Ok(key_id).do_gc(need_gc)
})
@@ -2398,7 +2398,7 @@
if access_key.domain == Domain::APP {
access_key.nspace = caller_uid as i64;
}
- let key_id = Self::load_key_entry_id(&tx, &access_key, key_type)
+ let key_id = Self::load_key_entry_id(tx, &access_key, key_type)
.with_context(|| format!("With key.domain = {:?}.", access_key.domain))?;
Ok((key_id, access_key, None))
@@ -2563,7 +2563,7 @@
let tag = Tag(row.get(0).context("Failed to read tag.")?);
let sec_level = SecurityLevel(row.get(2).context("Failed to read sec_level.")?);
parameters.push(
- KeyParameter::new_from_sql(tag, &SqlField::new(1, &row), sec_level)
+ KeyParameter::new_from_sql(tag, &SqlField::new(1, row), sec_level)
.context("Failed to read KeyParameter.")?,
);
Ok(())
@@ -2941,7 +2941,7 @@
}
}
}
- notify_gc = Self::mark_unreferenced(&tx, key_id)
+ notify_gc = Self::mark_unreferenced(tx, key_id)
.context("In unbind_keys_for_user.")?
|| notify_gc;
}
@@ -2955,16 +2955,15 @@
load_bits: KeyEntryLoadBits,
key_id: i64,
) -> Result<KeyEntry> {
- let metadata = KeyMetaData::load_from_db(key_id, &tx).context("In load_key_components.")?;
+ let metadata = KeyMetaData::load_from_db(key_id, tx).context("In load_key_components.")?;
let (has_km_blob, key_blob_info, cert_blob, cert_chain_blob) =
- Self::load_blob_components(key_id, load_bits, &tx)
- .context("In load_key_components.")?;
+ Self::load_blob_components(key_id, load_bits, tx).context("In load_key_components.")?;
- let parameters = Self::load_key_parameters(key_id, &tx)
+ let parameters = Self::load_key_parameters(key_id, tx)
.context("In load_key_components: Trying to load key parameters.")?;
- let km_uuid = Self::get_key_km_uuid(&tx, key_id)
+ let km_uuid = Self::get_key_km_uuid(tx, key_id)
.context("In load_key_components: Trying to get KM uuid.")?;
Ok(KeyEntry {
@@ -3048,7 +3047,7 @@
// But even if we load the access tuple by grant here, the permission
// check denies the attempt to create a grant by grant descriptor.
let (key_id, access_key_descriptor, _) =
- Self::load_access_tuple(&tx, key, KeyType::Client, caller_uid)
+ Self::load_access_tuple(tx, key, KeyType::Client, caller_uid)
.context("In grant")?;
// Perform access control. It is vital that we return here if the permission
@@ -3108,7 +3107,7 @@
// Load the key_id and complete the access control tuple.
// We ignore the access vector here because grants cannot be granted.
let (key_id, access_key_descriptor, _) =
- Self::load_access_tuple(&tx, key, KeyType::Client, caller_uid)
+ Self::load_access_tuple(tx, key, KeyType::Client, caller_uid)
.context("In ungrant.")?;
// Perform access control. We must return here if the permission
@@ -3466,7 +3465,7 @@
load_attestation_key_pool(&mut db, expiration_date, namespace, base_byte)?;
let chain =
db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
- assert_eq!(true, chain.is_some());
+ assert!(chain.is_some());
let cert_chain = chain.unwrap();
assert_eq!(cert_chain.private_key.to_vec(), loaded_values.priv_key);
assert_eq!(cert_chain.batch_cert, loaded_values.batch_cert);
@@ -4309,8 +4308,8 @@
let mut db = new_test_db()?;
const SOURCE_UID: u32 = 1u32;
const DESTINATION_UID: u32 = 2u32;
- static SOURCE_ALIAS: &str = &"SOURCE_ALIAS";
- static DESTINATION_ALIAS: &str = &"DESTINATION_ALIAS";
+ static SOURCE_ALIAS: &str = "SOURCE_ALIAS";
+ static DESTINATION_ALIAS: &str = "DESTINATION_ALIAS";
let key_id_guard =
make_test_key_entry(&mut db, Domain::APP, SOURCE_UID as i64, SOURCE_ALIAS, None)
.context("test_insert_and_load_full_keyentry_from_grant_by_key_id")?;
@@ -4378,8 +4377,8 @@
const SOURCE_UID: u32 = 1u32;
const DESTINATION_UID: u32 = 2u32;
const DESTINATION_NAMESPACE: i64 = 1000i64;
- static SOURCE_ALIAS: &str = &"SOURCE_ALIAS";
- static DESTINATION_ALIAS: &str = &"DESTINATION_ALIAS";
+ static SOURCE_ALIAS: &str = "SOURCE_ALIAS";
+ static DESTINATION_ALIAS: &str = "DESTINATION_ALIAS";
let key_id_guard =
make_test_key_entry(&mut db, Domain::APP, SOURCE_UID as i64, SOURCE_ALIAS, None)
.context("test_insert_and_load_full_keyentry_from_grant_by_key_id")?;
@@ -4446,8 +4445,8 @@
let mut db = new_test_db()?;
const SOURCE_UID: u32 = 1u32;
const DESTINATION_UID: u32 = 2u32;
- static SOURCE_ALIAS: &str = &"SOURCE_ALIAS";
- static DESTINATION_ALIAS: &str = &"DESTINATION_ALIAS";
+ static SOURCE_ALIAS: &str = "SOURCE_ALIAS";
+ static DESTINATION_ALIAS: &str = "DESTINATION_ALIAS";
let key_id_guard =
make_test_key_entry(&mut db, Domain::APP, SOURCE_UID as i64, SOURCE_ALIAS, None)
.context("test_insert_and_load_full_keyentry_from_grant_by_key_id")?;
@@ -4479,9 +4478,9 @@
#[test]
fn test_upgrade_0_to_1() {
- const ALIAS1: &str = &"test_upgrade_0_to_1_1";
- const ALIAS2: &str = &"test_upgrade_0_to_1_2";
- const ALIAS3: &str = &"test_upgrade_0_to_1_3";
+ const ALIAS1: &str = "test_upgrade_0_to_1_1";
+ const ALIAS2: &str = "test_upgrade_0_to_1_2";
+ const ALIAS3: &str = "test_upgrade_0_to_1_3";
const UID: u32 = 33;
let temp_dir = Arc::new(TempDir::new("test_upgrade_0_to_1").unwrap());
let mut db = KeystoreDB::new(temp_dir.path(), None).unwrap();
@@ -5476,7 +5475,7 @@
)?;
//check if super key exists
- assert!(db.key_exists(Domain::APP, 1, &USER_SUPER_KEY.alias, KeyType::Super)?);
+ assert!(db.key_exists(Domain::APP, 1, USER_SUPER_KEY.alias, KeyType::Super)?);
let (_, key_entry) = db.load_super_key(&USER_SUPER_KEY, 1)?.unwrap();
let loaded_super_key = SuperKeyManager::extract_super_key_from_key_entry(
@@ -5582,7 +5581,7 @@
&& updated_stats[&k].unused_size == baseline[&k].unused_size,
"updated_stats:\n{}\nbaseline:\n{}",
stringify(&updated_stats),
- stringify(&baseline)
+ stringify(baseline)
);
}
}
diff --git a/keystore2/src/database/utils.rs b/keystore2/src/database/utils.rs
index 90f5616..b4590da 100644
--- a/keystore2/src/database/utils.rs
+++ b/keystore2/src/database/utils.rs
@@ -44,7 +44,7 @@
loop {
match rows.next().context("In with_rows_extract_all: Failed to unpack row")? {
Some(row) => {
- row_extractor(&row).context("In with_rows_extract_all.")?;
+ row_extractor(row).context("In with_rows_extract_all.")?;
}
None => break Ok(()),
}
diff --git a/keystore2/src/gc.rs b/keystore2/src/gc.rs
index 2010c79..25f08c8 100644
--- a/keystore2/src/gc.rs
+++ b/keystore2/src/gc.rs
@@ -123,7 +123,7 @@
.super_key
.unwrap_key_if_required(&blob_metadata, &blob)
.context("In process_one_key: Trying to unwrap to-be-deleted blob.")?;
- (self.invalidate_key)(&uuid, &*blob)
+ (self.invalidate_key)(uuid, &*blob)
.context("In process_one_key: Trying to invalidate key.")?;
}
}
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index b0af771..a03a61c 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -279,7 +279,7 @@
security_level: &SecurityLevel,
) -> Result<(Strong<dyn IKeyMintDevice>, KeyMintHardwareInfo, Uuid)> {
let mut devices_map = KEY_MINT_DEVICES.lock().unwrap();
- if let Some((dev, hw_info, uuid)) = devices_map.dev_by_sec_level(&security_level) {
+ if let Some((dev, hw_info, uuid)) = devices_map.dev_by_sec_level(security_level) {
Ok((dev, hw_info, uuid))
} else {
let (dev, hw_info) = connect_keymint(security_level).context("In get_keymint_device.")?;
@@ -406,7 +406,7 @@
security_level: &SecurityLevel,
) -> Result<Strong<dyn IRemotelyProvisionedComponent>> {
let mut devices_map = REMOTELY_PROVISIONED_COMPONENT_DEVICES.lock().unwrap();
- if let Some(dev) = devices_map.dev_by_sec_level(&security_level) {
+ if let Some(dev) = devices_map.dev_by_sec_level(security_level) {
Ok(dev)
} else {
let dev = connect_remotely_provisioned_component(security_level)
diff --git a/keystore2/src/id_rotation.rs b/keystore2/src/id_rotation.rs
index dbf0fc9..e3992d8 100644
--- a/keystore2/src/id_rotation.rs
+++ b/keystore2/src/id_rotation.rs
@@ -27,7 +27,7 @@
use std::time::Duration;
const ID_ROTATION_PERIOD: Duration = Duration::from_secs(30 * 24 * 60 * 60); // Thirty days.
-static TIMESTAMP_FILE_NAME: &str = &"timestamp";
+static TIMESTAMP_FILE_NAME: &str = "timestamp";
/// The IdRotationState stores the path to the timestamp file for deferred usage. The data
/// partition is usually not available when Keystore 2.0 starts up. So this object is created
@@ -83,7 +83,7 @@
fn test_had_factory_reset_since_id_rotation() -> Result<()> {
let temp_dir = TempDir::new("test_had_factory_reset_since_id_rotation_")
.expect("Failed to create temp dir.");
- let id_rotation_state = IdRotationState::new(&temp_dir.path());
+ let id_rotation_state = IdRotationState::new(temp_dir.path());
let mut temp_file_path = temp_dir.path().to_owned();
temp_file_path.push(TIMESTAMP_FILE_NAME);
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index cf2ba04..f1f01c6 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -63,7 +63,7 @@
let db_path = Path::new(&dir);
*keystore2::globals::DB_PATH.write().expect("Could not lock DB_PATH.") =
db_path.to_path_buf();
- IdRotationState::new(&db_path)
+ IdRotationState::new(db_path)
} else {
panic!("Must specify a database directory.");
};
diff --git a/keystore2/src/km_compat/lib.rs b/keystore2/src/km_compat/lib.rs
index 56c35bf..8d7310b 100644
--- a/keystore2/src/km_compat/lib.rs
+++ b/keystore2/src/km_compat/lib.rs
@@ -260,7 +260,7 @@
if let Some(mut extras) = extra_params {
kps.append(&mut extras);
}
- let result = legacy.begin(purpose, &blob, &kps, None);
+ let result = legacy.begin(purpose, blob, &kps, None);
assert!(result.is_ok(), "{:?}", result);
result.unwrap()
}
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 6b16d2e..7454cca 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -416,14 +416,14 @@
BlobValue::Encrypted { iv, tag, data } => Ok(Blob {
flags: blob.flags,
value: BlobValue::Decrypted(
- decrypt(&data, &iv, &tag, None, None)
+ decrypt(data, iv, tag, None, None)
.context("In new_from_stream_decrypt_with.")?,
),
}),
BlobValue::PwEncrypted { iv, tag, data, salt, key_size } => Ok(Blob {
flags: blob.flags,
value: BlobValue::Decrypted(
- decrypt(&data, &iv, &tag, Some(salt), Some(*key_size))
+ decrypt(data, iv, tag, Some(salt), Some(*key_size))
.context("In new_from_stream_decrypt_with.")?,
),
}),
@@ -836,7 +836,7 @@
// in are all in the printable range that don't get mangled.
for prefix in Self::KNOWN_KEYSTORE_PREFIXES {
if let Some(alias) = encoded_alias.strip_prefix(prefix) {
- return Self::decode_alias(&alias).ok();
+ return Self::decode_alias(alias).ok();
}
}
None
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_migrator.rs
index f92fd45..65f4b0b 100644
--- a/keystore2/src/legacy_migrator.rs
+++ b/keystore2/src/legacy_migrator.rs
@@ -567,7 +567,7 @@
if let Some(super_key) = self
.legacy_loader
- .load_super_key(user_id, &pw)
+ .load_super_key(user_id, pw)
.context("In check_and_migrate_super_key: Trying to load legacy super key.")?
{
let (blob, blob_metadata) =
@@ -724,8 +724,8 @@
fn deref(&self) -> &Self::Target {
match self {
- Self::Vec(v) => &v,
- Self::ZVec(v) => &v,
+ Self::Vec(v) => v,
+ Self::ZVec(v) => v,
}
}
}
diff --git a/keystore2/src/maintenance.rs b/keystore2/src/maintenance.rs
index 9abc5aa..08fa8d2 100644
--- a/keystore2/src/maintenance.rs
+++ b/keystore2/src/maintenance.rs
@@ -206,9 +206,9 @@
let key_id_guard = match source.domain {
Domain::APP | Domain::SELINUX | Domain::KEY_ID => {
let (key_id_guard, _) = LEGACY_MIGRATOR
- .with_try_migrate(&source, caller_uid, || {
+ .with_try_migrate(source, caller_uid, || {
db.borrow_mut().load_key_entry(
- &source,
+ source,
KeyType::Client,
KeyEntryLoadBits::NONE,
caller_uid,
diff --git a/keystore2/src/permission.rs b/keystore2/src/permission.rs
index 8343a29..11811d9 100644
--- a/keystore2/src/permission.rs
+++ b/keystore2/src/permission.rs
@@ -158,7 +158,7 @@
impl $name {
/// Returns a string representation of the permission as required by
/// `selinux::check_access`.
- pub fn to_selinux(&self) -> &'static str {
+ pub fn to_selinux(self) -> &'static str {
match self {
Self($aidl_name::$def_name) => stringify!($def_selinux_name),
$(Self($aidl_name::$element_name) => stringify!($selinux_name),)*
@@ -266,7 +266,7 @@
impl $name {
/// Returns a string representation of the permission as required by
/// `selinux::check_access`.
- pub fn to_selinux(&self) -> &'static str {
+ pub fn to_selinux(self) -> &'static str {
match self {
Self::$def_name => stringify!($def_selinux_name),
$(Self::$element_name => stringify!($selinux_name),)*
@@ -852,23 +852,19 @@
blob: None,
};
+ assert!(check_key_permission(0, &sctx, KeyPerm::use_(), &key, &None).is_ok());
+ assert!(check_key_permission(0, &sctx, KeyPerm::delete(), &key, &None).is_ok());
+ assert!(check_key_permission(0, &sctx, KeyPerm::get_info(), &key, &None).is_ok());
+ assert!(check_key_permission(0, &sctx, KeyPerm::rebind(), &key, &None).is_ok());
+ assert!(check_key_permission(0, &sctx, KeyPerm::update(), &key, &None).is_ok());
+
if is_su {
- assert!(check_key_permission(0, &sctx, KeyPerm::use_(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::delete(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::get_info(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::rebind(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::update(), &key, &None).is_ok());
assert!(check_key_permission(0, &sctx, KeyPerm::grant(), &key, &None).is_ok());
assert!(check_key_permission(0, &sctx, KeyPerm::manage_blob(), &key, &None).is_ok());
assert!(check_key_permission(0, &sctx, KeyPerm::use_dev_id(), &key, &None).is_ok());
assert!(check_key_permission(0, &sctx, KeyPerm::gen_unique_id(), &key, &None).is_ok());
assert!(check_key_permission(0, &sctx, KeyPerm::req_forced_op(), &key, &None).is_ok());
} else {
- assert!(check_key_permission(0, &sctx, KeyPerm::use_(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::delete(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::get_info(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::rebind(), &key, &None).is_ok());
- assert!(check_key_permission(0, &sctx, KeyPerm::update(), &key, &None).is_ok());
assert_perm_failed!(check_key_permission(0, &sctx, KeyPerm::grant(), &key, &None));
assert_perm_failed!(check_key_permission(
0,
diff --git a/keystore2/src/raw_device.rs b/keystore2/src/raw_device.rs
index 8cef84d..991535f 100644
--- a/keystore2/src/raw_device.rs
+++ b/keystore2/src/raw_device.rs
@@ -120,7 +120,7 @@
blob_metadata.add(BlobMetaEntry::KmUuid(self.km_uuid));
db.store_new_key(
- &key_desc,
+ key_desc,
key_type,
&key_parameters,
&(&creation_result.keyBlob, &blob_metadata),
@@ -148,7 +148,7 @@
key_desc: &KeyDescriptor,
key_type: KeyType,
) -> Result<(KeyIdGuard, KeyEntry)> {
- db.load_key_entry(&key_desc, key_type, KeyEntryLoadBits::KM, AID_KEYSTORE, |_, _| Ok(()))
+ db.load_key_entry(key_desc, key_type, KeyEntryLoadBits::KM, AID_KEYSTORE, |_, _| Ok(()))
.context("In lookup_from_desc: load_key_entry failed.")
}
@@ -228,8 +228,8 @@
};
}
- self.create_and_store_key(db, &key_desc, key_type, |km_dev| {
- km_dev.generateKey(¶ms, None)
+ self.create_and_store_key(db, key_desc, key_type, |km_dev| {
+ km_dev.generateKey(params, None)
})
.context("In lookup_or_generate_key: generate_and_store_key failed")?;
Self::lookup_from_desc(db, key_desc, key_type)
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
index ead24da..40c06e5 100644
--- a/keystore2/src/remote_provisioning.rs
+++ b/keystore2/src/remote_provisioning.rs
@@ -180,7 +180,7 @@
// and therefore will not be attested.
Ok(None)
} else {
- match self.get_rem_prov_attest_key(&key, caller_uid, db) {
+ match self.get_rem_prov_attest_key(key, caller_uid, db) {
Err(e) => {
log::error!(
concat!(
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index e0eabe1..74aba3c 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -241,9 +241,9 @@
_ => {
let (key_id_guard, mut key_entry) = DB
.with::<_, Result<(KeyIdGuard, KeyEntry)>>(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
db.borrow_mut().load_key_entry(
- &key,
+ key,
KeyType::Client,
KeyEntryLoadBits::KM,
caller_uid,
@@ -310,7 +310,7 @@
key_id_guard,
&km_blob,
&blob_metadata,
- &operation_parameters,
+ operation_parameters,
|blob| loop {
match map_km_error({
let _wp = self.watch_millis(
@@ -320,7 +320,7 @@
self.keymint.begin(
purpose,
blob,
- &operation_parameters,
+ operation_parameters,
immediate_hat.as_ref(),
)
}) {
@@ -691,7 +691,7 @@
.with(|db| {
LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
db.borrow_mut().load_key_entry(
- &wrapping_key,
+ wrapping_key,
KeyType::Client,
KeyEntryLoadBits::KM,
caller_uid,
@@ -749,7 +749,7 @@
wrapped_data,
wrapping_blob,
masking_key,
- ¶ms,
+ params,
pw_sid,
fp_sid,
))?;
@@ -769,7 +769,7 @@
upgraded_blob: &[u8],
) -> Result<()> {
let (upgraded_blob_to_be_stored, new_blob_metadata) =
- SuperKeyManager::reencrypt_if_required(key_blob, &upgraded_blob)
+ SuperKeyManager::reencrypt_if_required(key_blob, upgraded_blob)
.context("In store_upgraded_keyblob: Failed to handle super encryption.")?;
let mut new_blob_metadata = new_blob_metadata.unwrap_or_default();
@@ -942,7 +942,7 @@
{
let _wp =
self.watch_millis("In KeystoreSecuritylevel::delete_key: calling deleteKey", 500);
- map_km_error(km_dev.deleteKey(&key_blob)).context("In keymint device deleteKey")
+ map_km_error(km_dev.deleteKey(key_blob)).context("In keymint device deleteKey")
}
}
}
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index 50374fe..b35fe36 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -132,9 +132,9 @@
let caller_uid = ThreadState::get_calling_uid();
let (key_id_guard, mut key_entry) = DB
.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
db.borrow_mut().load_key_entry(
- &key,
+ key,
KeyType::Client,
KeyEntryLoadBits::PUBLIC,
caller_uid,
@@ -183,9 +183,9 @@
) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
DB.with::<_, Result<()>>(|db| {
- let entry = match LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ let entry = match LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
db.borrow_mut().load_key_entry(
- &key,
+ key,
KeyType::Client,
KeyEntryLoadBits::NONE,
caller_uid,
@@ -307,8 +307,8 @@
fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
DB.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
- db.borrow_mut().unbind_key(&key, KeyType::Client, caller_uid, |k, av| {
+ LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+ db.borrow_mut().unbind_key(key, KeyType::Client, caller_uid, |k, av| {
check_key_permission(KeyPerm::delete(), k, &av).context("During delete_key.")
})
})
@@ -325,9 +325,9 @@
) -> Result<KeyDescriptor> {
let caller_uid = ThreadState::get_calling_uid();
DB.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
db.borrow_mut().grant(
- &key,
+ key,
caller_uid,
grantee_uid as u32,
access_vector,
@@ -340,7 +340,7 @@
fn ungrant(&self, key: &KeyDescriptor, grantee_uid: i32) -> Result<()> {
DB.with(|db| {
- db.borrow_mut().ungrant(&key, ThreadState::get_calling_uid(), grantee_uid as u32, |k| {
+ db.borrow_mut().ungrant(key, ThreadState::get_calling_uid(), grantee_uid as u32, |k| {
check_key_permission(KeyPerm::grant(), k, &None)
})
})
diff --git a/keystore2/src/shared_secret_negotiation.rs b/keystore2/src/shared_secret_negotiation.rs
index 64bc2c3..e32b675 100644
--- a/keystore2/src/shared_secret_negotiation.rs
+++ b/keystore2/src/shared_secret_negotiation.rs
@@ -149,14 +149,15 @@
.collect::<Result<Vec<_>>>()
.map(|v| v.into_iter().flatten())
.and_then(|i| {
- let participants_aidl: Vec<SharedSecretParticipant> =
+ Ok(i.chain(
get_aidl_instances(SHARED_SECRET_PACKAGE_NAME, 1, SHARED_SECRET_INTERFACE_NAME)
.as_vec()
.context("In list_participants: Trying to convert KM1.0 names to vector.")?
.into_iter()
.map(|name| SharedSecretParticipant::Aidl(name.to_string()))
- .collect();
- Ok(i.chain(participants_aidl.into_iter()))
+ .collect::<Vec<_>>()
+ .into_iter(),
+ ))
})
.context("In list_participants.")?
.collect())
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 17718da..4b71bb5 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -396,7 +396,7 @@
.get_or_create_key_with(
Domain::APP,
user as u64 as i64,
- &USER_SUPER_KEY.alias,
+ USER_SUPER_KEY.alias,
crate::database::KEYSTORE_UUID,
|| {
// For backward compatibility we need to check if there is a super key present.
@@ -499,7 +499,7 @@
user_id: UserId,
) -> Result<bool> {
let key_in_db = db
- .key_exists(Domain::APP, user_id as u64 as i64, &USER_SUPER_KEY.alias, KeyType::Super)
+ .key_exists(Domain::APP, user_id as u64 as i64, USER_SUPER_KEY.alias, KeyType::Super)
.context("In super_key_exists_in_db_for_user.")?;
if key_in_db {
@@ -735,7 +735,7 @@
match Enforcements::super_encryption_required(domain, key_parameters, flags) {
SuperEncryptionType::None => Ok((key_blob.to_vec(), BlobMetaData::new())),
SuperEncryptionType::LskfBound => self
- .super_encrypt_on_key_init(db, legacy_migrator, user_id, &key_blob)
+ .super_encrypt_on_key_init(db, legacy_migrator, user_id, key_blob)
.context(concat!(
"In handle_super_encryption_on_key_init. ",
"Failed to super encrypt with LskfBound key."
@@ -744,7 +744,7 @@
let mut data = self.data.lock().unwrap();
let entry = data.user_keys.entry(user_id).or_default();
if let Some(super_key) = entry.screen_lock_bound.as_ref() {
- Self::encrypt_with_aes_super_key(key_blob, &super_key).context(concat!(
+ Self::encrypt_with_aes_super_key(key_blob, super_key).context(concat!(
"In handle_super_encryption_on_key_init. ",
"Failed to encrypt with ScreenLockBound key."
))
@@ -1213,8 +1213,8 @@
fn deref(&self) -> &Self::Target {
match self {
- Self::Sensitive { key, .. } => &key,
- Self::NonSensitive(key) => &key,
+ Self::Sensitive { key, .. } => key,
+ Self::NonSensitive(key) => key,
Self::Ref(key) => key,
}
}
diff --git a/keystore2/src/utils.rs b/keystore2/src/utils.rs
index 83b6853..d71a4fc 100644
--- a/keystore2/src/utils.rs
+++ b/keystore2/src/utils.rs
@@ -43,7 +43,7 @@
pub fn check_keystore_permission(perm: KeystorePerm) -> anyhow::Result<()> {
ThreadState::with_calling_sid(|calling_sid| {
permission::check_keystore_permission(
- &calling_sid.ok_or_else(Error::sys).context(
+ calling_sid.ok_or_else(Error::sys).context(
"In check_keystore_permission: Cannot check permission without calling_sid.",
)?,
perm,
@@ -57,7 +57,7 @@
pub fn check_grant_permission(access_vec: KeyPermSet, key: &KeyDescriptor) -> anyhow::Result<()> {
ThreadState::with_calling_sid(|calling_sid| {
permission::check_grant_permission(
- &calling_sid.ok_or_else(Error::sys).context(
+ calling_sid.ok_or_else(Error::sys).context(
"In check_grant_permission: Cannot check permission without calling_sid.",
)?,
access_vec,
@@ -77,7 +77,7 @@
ThreadState::with_calling_sid(|calling_sid| {
permission::check_key_permission(
ThreadState::get_calling_uid(),
- &calling_sid
+ calling_sid
.ok_or_else(Error::sys)
.context("In check_key_permission: Cannot check permission without calling_sid.")?,
perm,
diff --git a/ondevice-signing/CertUtils.cpp b/ondevice-signing/CertUtils.cpp
index 8d4f273..d67bea6 100644
--- a/ondevice-signing/CertUtils.cpp
+++ b/ondevice-signing/CertUtils.cpp
@@ -369,20 +369,6 @@
return result;
}
-Result<std::vector<uint8_t>> extractRsaPublicKeyFromX509(const std::vector<uint8_t>& derCert) {
- auto derCertBytes = derCert.data();
- bssl::UniquePtr<X509> decoded_cert(d2i_X509(nullptr, &derCertBytes, derCert.size()));
- if (decoded_cert.get() == nullptr) {
- return Error() << "Failed to decode X509 certificate.";
- }
- bssl::UniquePtr<EVP_PKEY> decoded_pkey(X509_get_pubkey(decoded_cert.get()));
- if (decoded_pkey == nullptr) {
- return Error() << "Failed to extract public key from x509 cert";
- }
-
- return extractRsaPublicKey(decoded_pkey.get());
-}
-
Result<CertInfo> verifyAndExtractCertInfoFromX509(const std::string& path,
const std::vector<uint8_t>& publicKey) {
auto public_key = modulusToRsaPkey(publicKey);
diff --git a/ondevice-signing/CertUtils.h b/ondevice-signing/CertUtils.h
index 1ed4c06..fe703fa 100644
--- a/ondevice-signing/CertUtils.h
+++ b/ondevice-signing/CertUtils.h
@@ -60,9 +60,6 @@
extractPublicKeyFromSubjectPublicKeyInfo(const std::vector<uint8_t>& subjectKeyInfo);
android::base::Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::string& path);
-android::base::Result<std::vector<uint8_t>>
-extractRsaPublicKeyFromX509(const std::vector<uint8_t>& x509);
-
android::base::Result<CertInfo>
verifyAndExtractCertInfoFromX509(const std::string& path, const std::vector<uint8_t>& publicKey);
diff --git a/ondevice-signing/FakeCompOs.cpp b/ondevice-signing/FakeCompOs.cpp
index cd54e28..596d6e2 100644
--- a/ondevice-signing/FakeCompOs.cpp
+++ b/ondevice-signing/FakeCompOs.cpp
@@ -53,7 +53,8 @@
// TODO: Allocate a namespace for CompOS
const int64_t kCompOsNamespace = 101;
-Result<std::unique_ptr<FakeCompOs>> FakeCompOs::newInstance() {
+Result<std::unique_ptr<FakeCompOs>>
+FakeCompOs::startInstance(const std::string& /*instanceImagePath*/) {
std::unique_ptr<FakeCompOs> compOs(new FakeCompOs);
auto init = compOs->initialize();
if (init.ok()) {
@@ -88,69 +89,6 @@
return {};
}
-Result<FakeCompOs::KeyData> FakeCompOs::generateKey() const {
- std::vector<KeyParameter> params;
-
- KeyParameter algo;
- algo.tag = Tag::ALGORITHM;
- algo.value = KeyParameterValue::make<KeyParameterValue::algorithm>(Algorithm::RSA);
- params.push_back(algo);
-
- KeyParameter key_size;
- key_size.tag = Tag::KEY_SIZE;
- key_size.value = KeyParameterValue::make<KeyParameterValue::integer>(kRsaKeySize);
- params.push_back(key_size);
-
- KeyParameter digest;
- digest.tag = Tag::DIGEST;
- digest.value = KeyParameterValue::make<KeyParameterValue::digest>(Digest::SHA_2_256);
- params.push_back(digest);
-
- KeyParameter padding;
- padding.tag = Tag::PADDING;
- padding.value =
- KeyParameterValue::make<KeyParameterValue::paddingMode>(PaddingMode::RSA_PKCS1_1_5_SIGN);
- params.push_back(padding);
-
- KeyParameter exponent;
- exponent.tag = Tag::RSA_PUBLIC_EXPONENT;
- exponent.value = KeyParameterValue::make<KeyParameterValue::longInteger>(kRsaKeyExponent);
- params.push_back(exponent);
-
- KeyParameter purpose;
- purpose.tag = Tag::PURPOSE;
- purpose.value = KeyParameterValue::make<KeyParameterValue::keyPurpose>(KeyPurpose::SIGN);
- params.push_back(purpose);
-
- KeyParameter auth;
- auth.tag = Tag::NO_AUTH_REQUIRED;
- auth.value = KeyParameterValue::make<KeyParameterValue::boolValue>(true);
- params.push_back(auth);
-
- KeyDescriptor descriptor;
- descriptor.domain = Domain::BLOB;
- descriptor.nspace = kCompOsNamespace;
-
- KeyMetadata metadata;
- auto status = mSecurityLevel->generateKey(descriptor, {}, params, 0, {}, &metadata);
- if (!status.isOk()) {
- return Error() << "Failed to generate key";
- }
-
- auto& cert = metadata.certificate;
- if (!cert) {
- return Error() << "No certificate.";
- }
-
- auto& blob = metadata.key.blob;
- if (!blob) {
- return Error() << "No blob.";
- }
-
- KeyData key_data{std::move(metadata.certificate.value()), std::move(metadata.key.blob.value())};
- return key_data;
-}
-
Result<FakeCompOs::ByteVector> FakeCompOs::signData(const ByteVector& keyBlob,
const ByteVector& data) const {
KeyDescriptor descriptor;
diff --git a/ondevice-signing/FakeCompOs.h b/ondevice-signing/FakeCompOs.h
index eb1a8dd..6c7a445 100644
--- a/ondevice-signing/FakeCompOs.h
+++ b/ondevice-signing/FakeCompOs.h
@@ -34,14 +34,9 @@
public:
using ByteVector = std::vector<uint8_t>;
- struct KeyData {
- ByteVector cert;
- ByteVector blob;
- };
- static android::base::Result<std::unique_ptr<FakeCompOs>> newInstance();
-
- android::base::Result<KeyData> generateKey() const;
+ static android::base::Result<std::unique_ptr<FakeCompOs>>
+ startInstance(const std::string& instanceImagePath);
android::base::Result<void> loadAndVerifyKey(const ByteVector& keyBlob,
const ByteVector& publicKey) const;
diff --git a/ondevice-signing/odsign.rc b/ondevice-signing/odsign.rc
index 044bae7..de09fc0 100644
--- a/ondevice-signing/odsign.rc
+++ b/ondevice-signing/odsign.rc
@@ -2,5 +2,8 @@
class core
user root
group system
- oneshot
disabled # does not start with the core class
+
+# Note that odsign is not oneshot, but stopped manually when it exits. This
+# ensures that if odsign crashes during a module update, apexd will detect
+# those crashes and roll back the update.
diff --git a/ondevice-signing/odsign_main.cpp b/ondevice-signing/odsign_main.cpp
index ff7a105..bba39b8 100644
--- a/ondevice-signing/odsign_main.cpp
+++ b/ondevice-signing/odsign_main.cpp
@@ -51,34 +51,35 @@
const std::string kArtArtifactsDir = "/data/misc/apexdata/com.android.art/dalvik-cache";
-static const char* kOdrefreshPath = "/apex/com.android.art/bin/odrefresh";
+constexpr const char* kOdrefreshPath = "/apex/com.android.art/bin/odrefresh";
-static const char* kFsVerityProcPath = "/proc/sys/fs/verity";
+constexpr const char* kFsVerityProcPath = "/proc/sys/fs/verity";
-static const bool kForceCompilation = false;
-static const bool kUseCompOs = false; // STOPSHIP if true
+constexpr bool kForceCompilation = false;
+constexpr bool kUseCompOs = false; // STOPSHIP if true
-static const char* kVirtApexPath = "/apex/com.android.virt";
+constexpr const char* kCompOsApexPath = "/apex/com.android.compos";
const std::string kCompOsCert = "/data/misc/odsign/compos_key.cert";
-const std::string kCompOsPublicKey = "/data/misc/odsign/compos_key.pubkey";
-const std::string kCompOsKeyBlob = "/data/misc/odsign/compos_key.blob";
+const std::string kCompOsPublicKey = "/data/misc/apexdata/com.android.compos/compos_key.pubkey";
+const std::string kCompOsKeyBlob = "/data/misc/apexdata/com.android.compos/compos_key.blob";
+const std::string kCompOsInstance = "/data/misc/apexdata/com.android.compos/compos_instance.img";
+
const std::string kCompOsPendingPublicKey =
"/data/misc/apexdata/com.android.compos/compos_pending_key.pubkey";
const std::string kCompOsPendingKeyBlob =
"/data/misc/apexdata/com.android.compos/compos_pending_key.blob";
+const std::string kCompOsPendingInstance =
+ "/data/misc/apexdata/com.android.compos/compos_pending_instance.img";
const std::string kCompOsPendingArtifactsDir = "/data/misc/apexdata/com.android.art/compos-pending";
-static const char* kOdsignVerificationDoneProp = "odsign.verification.done";
-static const char* kOdsignKeyDoneProp = "odsign.key.done";
+constexpr const char* kOdsignVerificationDoneProp = "odsign.verification.done";
+constexpr const char* kOdsignKeyDoneProp = "odsign.key.done";
-static const char* kOdsignVerificationStatusProp = "odsign.verification.success";
-static const char* kOdsignVerificationStatusValid = "1";
-static const char* kOdsignVerificationStatusError = "0";
+constexpr const char* kOdsignVerificationStatusProp = "odsign.verification.success";
+constexpr const char* kOdsignVerificationStatusValid = "1";
+constexpr const char* kOdsignVerificationStatusError = "0";
-static void writeBytesToFile(const std::vector<uint8_t>& bytes, const std::string& path) {
- std::string str(bytes.begin(), bytes.end());
- android::base::WriteStringToFile(str, path);
-}
+constexpr const char* kStopServiceProp = "ctl.stop";
static std::vector<uint8_t> readBytesFromFile(const std::string& path) {
std::string str;
@@ -86,6 +87,16 @@
return std::vector<uint8_t>(str.begin(), str.end());
}
+static bool rename(const std::string& from, const std::string& to) {
+ std::error_code ec;
+ std::filesystem::rename(from, to, ec);
+ if (ec) {
+ LOG(ERROR) << "Can't rename " << from << " to " << to << ": " << ec.message();
+ return false;
+ }
+ return true;
+}
+
static int removeDirectory(const std::string& directory) {
std::error_code ec;
auto num_removed = std::filesystem::remove_all(directory, ec);
@@ -129,7 +140,7 @@
}
bool compOsPresent() {
- return access(kVirtApexPath, F_OK) == 0;
+ return access(kCompOsApexPath, F_OK) == 0;
}
Result<void> verifyExistingRootCert(const SigningKey& key) {
@@ -192,38 +203,60 @@
return existingCertInfo.value().subjectRsaPublicKey;
}
-Result<std::vector<uint8_t>> verifyOrGenerateCompOsKey(const SigningKey& signingKey) {
- std::unique_ptr<FakeCompOs> compOs;
- std::vector<uint8_t> keyBlob;
+// Attempt to start a CompOS VM from the given instance image and then get it to
+// verify the public key & key blob. Returns the RsaPublicKey bytes if
+// successful, an empty vector if any of the files are not present, or an error
+// otherwise.
+Result<std::vector<uint8_t>> loadAndVerifyCompOsKey(const std::string& instanceFile,
+ const std::string& publicKeyFile,
+ const std::string& keyBlobFile) {
+ if (access(instanceFile.c_str(), F_OK) != 0 || access(publicKeyFile.c_str(), F_OK) != 0 ||
+ access(keyBlobFile.c_str(), F_OK) != 0) {
+ return {};
+ }
+
+ auto compOsStatus = FakeCompOs::startInstance(instanceFile);
+ if (!compOsStatus.ok()) {
+ return Error() << "Failed to start CompOs instance " << instanceFile << ": "
+ << compOsStatus.error();
+ }
+ auto& compOs = compOsStatus.value();
+
+ auto publicKey = readBytesFromFile(publicKeyFile);
+ auto keyBlob = readBytesFromFile(keyBlobFile);
+ auto response = compOs->loadAndVerifyKey(keyBlob, publicKey);
+ if (!response.ok()) {
+ return response.error();
+ }
+
+ return publicKey;
+}
+
+Result<std::vector<uint8_t>> verifyCompOsKey(const SigningKey& signingKey) {
std::vector<uint8_t> publicKey;
- bool new_key = true;
// If a pending key has been generated we don't know if it is the correct
- // one for the current CompOS VM, so we need to start it and ask it.
- if (access(kCompOsPendingPublicKey.c_str(), F_OK) == 0 &&
- access(kCompOsPendingKeyBlob.c_str(), F_OK) == 0) {
- auto compOsStatus = FakeCompOs::newInstance();
- if (!compOsStatus.ok()) {
- return Error() << "Failed to start CompOs: " << compOsStatus.error();
- }
- compOs = std::move(compOsStatus.value());
-
- auto pendingKeyBlob = readBytesFromFile(kCompOsPendingKeyBlob);
- auto pendingPublicKey = readBytesFromFile(kCompOsPendingPublicKey);
-
- auto response = compOs->loadAndVerifyKey(pendingKeyBlob, pendingPublicKey);
- if (response.ok()) {
+ // one for the pending CompOS VM, so we need to start it and ask it.
+ auto pendingPublicKey = loadAndVerifyCompOsKey(kCompOsPendingInstance, kCompOsPendingPublicKey,
+ kCompOsPendingKeyBlob);
+ if (pendingPublicKey.ok()) {
+ if (!pendingPublicKey->empty()) {
LOG(INFO) << "Verified pending CompOs key";
- keyBlob = std::move(pendingKeyBlob);
- publicKey = std::move(pendingPublicKey);
- } else {
- LOG(WARNING) << "Failed to verify pending CompOs key: " << response.error();
- // And fall through to looking at the current key.
+
+ if (rename(kCompOsPendingInstance, kCompOsInstance) &&
+ rename(kCompOsPendingPublicKey, kCompOsPublicKey) &&
+ rename(kCompOsPendingKeyBlob, kCompOsKeyBlob)) {
+ publicKey = std::move(*pendingPublicKey);
+ }
}
- // Whether they're good or bad, we've finished with these files.
- unlink(kCompOsPendingKeyBlob.c_str());
- unlink(kCompOsPendingPublicKey.c_str());
+ } else {
+ LOG(WARNING) << "Failed to verify pending CompOs key: " << pendingPublicKey.error();
+ // And fall through to dealing with any current key.
}
+ // Whether good or bad, we've finished with these files.
+ unlink(kCompOsPendingInstance.c_str());
+ unlink(kCompOsPendingKeyBlob.c_str());
+ unlink(kCompOsPendingPublicKey.c_str());
if (publicKey.empty()) {
// Alternatively if we signed a cert for the key on a previous boot, then we
@@ -237,61 +270,31 @@
}
}
- if (compOs == nullptr) {
- auto compOsStatus = FakeCompOs::newInstance();
- if (!compOsStatus.ok()) {
- return Error() << "Failed to start CompOs: " << compOsStatus.error();
- }
- compOs = std::move(compOsStatus.value());
- }
-
// Otherwise, if there is an existing key that we haven't signed yet, then we can sign it
// now if CompOS confirms it's OK.
if (publicKey.empty()) {
- if (access(kCompOsPublicKey.c_str(), F_OK) == 0 &&
- access(kCompOsKeyBlob.c_str(), F_OK) == 0) {
- auto currentKeyBlob = readBytesFromFile(kCompOsKeyBlob);
- auto currentPublicKey = readBytesFromFile(kCompOsPublicKey);
-
- auto response = compOs->loadAndVerifyKey(currentKeyBlob, currentPublicKey);
- if (response.ok()) {
+ auto currentPublicKey =
+ loadAndVerifyCompOsKey(kCompOsInstance, kCompOsPublicKey, kCompOsKeyBlob);
+ if (currentPublicKey.ok()) {
+ if (!currentPublicKey->empty()) {
LOG(INFO) << "Verified existing CompOs key";
- keyBlob = std::move(currentKeyBlob);
- publicKey = std::move(currentPublicKey);
- new_key = false;
- } else {
- LOG(WARNING) << "Failed to verify existing CompOs key: " << response.error();
+ publicKey = std::move(*currentPublicKey);
}
+ } else {
+ LOG(WARNING) << "Failed to verify existing CompOs key: " << currentPublicKey.error();
+ // Delete so we won't try again on next boot.
+ unlink(kCompOsInstance.c_str());
+ unlink(kCompOsKeyBlob.c_str());
+ unlink(kCompOsPublicKey.c_str());
}
}
- // If all else has failed we need to ask CompOS to generate a new key.
if (publicKey.empty()) {
- auto keyData = compOs->generateKey();
- if (!keyData.ok()) {
- return Error() << "Failed to generate key: " << keyData.error();
- }
- auto publicKeyStatus = extractRsaPublicKeyFromX509(keyData.value().cert);
- if (!publicKeyStatus.ok()) {
- return Error() << "Failed to extract CompOs public key" << publicKeyStatus.error();
- }
-
- LOG(INFO) << "Generated new CompOs key";
-
- keyBlob = std::move(keyData.value().blob);
- publicKey = std::move(publicKeyStatus.value());
+ return Error() << "No valid CompOs key present.";
}
- // We've finished with CompOs now, let it exit.
- compOs.reset();
-
- // One way or another we now have a valid key pair. Persist the data for
- // CompOS, and a cert so we can simplify the checks on subsequent boots.
-
- if (new_key) {
- writeBytesToFile(keyBlob, kCompOsKeyBlob);
- writeBytesToFile(publicKey, kCompOsPublicKey);
- }
+ // One way or another we now have a valid key pair. Persist a certificate so
+ // we can simplify the checks on subsequent boots.
auto signFunction = [&](const std::string& to_be_signed) {
return signingKey.sign(to_be_signed);
@@ -461,7 +464,7 @@
}
Result<std::vector<uint8_t>> addCompOsCertToFsVerityKeyring(const SigningKey& signingKey) {
- auto publicKey = verifyOrGenerateCompOsKey(signingKey);
+ auto publicKey = verifyCompOsKey(signingKey);
if (!publicKey.ok()) {
return publicKey.error();
}
@@ -499,14 +502,11 @@
// No useful current artifacts, lets see if the CompOs ones are ok
LOG(INFO) << "Current artifacts are out of date, switching to pending artifacts";
removeDirectory(kArtArtifactsDir);
- std::error_code ec;
- std::filesystem::rename(kCompOsPendingArtifactsDir, kArtArtifactsDir, ec);
- if (ec) {
- LOG(ERROR) << "Can't rename " << kCompOsPendingArtifactsDir << " to " << kArtArtifactsDir
- << ": " << ec.message();
+ if (!rename(kCompOsPendingArtifactsDir, kArtArtifactsDir)) {
removeDirectory(kCompOsPendingArtifactsDir);
return art::odrefresh::ExitCode::kCompilationRequired;
}
+
// TODO: Make sure that we check here that the contents of the artifacts
// correspond to their filenames (and extensions) - the CompOs signatures
// can't guarantee that.
@@ -552,8 +552,10 @@
// Tell init we don't need to use our key anymore
SetProperty(kOdsignKeyDoneProp, "1");
// Tell init we're done with verification, and that it was an error
- SetProperty(kOdsignVerificationDoneProp, "1");
SetProperty(kOdsignVerificationStatusProp, kOdsignVerificationStatusError);
+ SetProperty(kOdsignVerificationDoneProp, "1");
+ // Tell init it shouldn't try to restart us - see odsign.rc
+ SetProperty(kStopServiceProp, "odsign");
};
auto scope_guard = android::base::make_scope_guard(errorScopeGuard);
@@ -605,7 +607,7 @@
if (supportsCompOs) {
auto compos_key = addCompOsCertToFsVerityKeyring(*key);
if (!compos_key.ok()) {
- LOG(ERROR) << compos_key.error();
+ LOG(WARNING) << compos_key.error();
} else {
odrefresh_status =
checkCompOsPendingArtifacts(compos_key.value(), *key, &digests_verified);
@@ -668,8 +670,10 @@
// At this point, we're done with the key for sure
SetProperty(kOdsignKeyDoneProp, "1");
// And we did a successful verification
- SetProperty(kOdsignVerificationDoneProp, "1");
SetProperty(kOdsignVerificationStatusProp, kOdsignVerificationStatusValid);
+ SetProperty(kOdsignVerificationDoneProp, "1");
+ // Tell init it shouldn't try to restart us - see odsign.rc
+ SetProperty(kStopServiceProp, "odsign");
return 0;
}
diff --git a/provisioner/rkp_factory_extraction_tool.cpp b/provisioner/rkp_factory_extraction_tool.cpp
index c439b99..2e59dbd 100644
--- a/provisioner/rkp_factory_extraction_tool.cpp
+++ b/provisioner/rkp_factory_extraction_tool.cpp
@@ -22,6 +22,7 @@
#include <cppbor.h>
#include <gflags/gflags.h>
#include <keymaster/cppcose/cppcose.h>
+#include <openssl/base64.h>
#include <remote_prov/remote_prov_utils.h>
#include <sys/random.h>
@@ -49,6 +50,26 @@
constexpr size_t kChallengeSize = 16;
+std::string toBase64(const std::vector<uint8_t>& buffer) {
+ size_t base64Length;
+ int rc = EVP_EncodedLength(&base64Length, buffer.size());
+ if (!rc) {
+ std::cerr << "Error getting base64 length. Size overflow?" << std::endl;
+ exit(-1);
+ }
+
+ std::string base64(base64Length, ' ');
+ rc = EVP_EncodeBlock(reinterpret_cast<uint8_t*>(base64.data()), buffer.data(), buffer.size());
+ ++rc; // Account for NUL, which BoringSSL does not for some reason.
+ if (rc != base64Length) {
+ std::cerr << "Error writing base64. Expected " << base64Length
+ << " bytes to be written, but " << rc << " bytes were actually written."
+ << std::endl;
+ exit(-1);
+ }
+ return base64;
+}
+
std::vector<uint8_t> generateChallenge() {
std::vector<uint8_t> challenge(kChallengeSize);
@@ -96,7 +117,10 @@
std::cerr << "Failed to generate test EEK somehow: " << eekOrErr.message() << std::endl;
exit(-1);
}
- auto [eek, ignored_pubkey, ignored_privkey] = eekOrErr.moveValue();
+ auto [eek, pubkey, privkey] = eekOrErr.moveValue();
+ std::cout << "EEK raw keypair:" << std::endl;
+ std::cout << " pub: " << toBase64(pubkey) << std::endl;
+ std::cout << " priv: " << toBase64(privkey) << std::endl;
return eek;
}