Merge "Fix tests"
diff --git a/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl b/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
index d045345..0d4c30f 100644
--- a/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
+++ b/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
@@ -16,6 +16,7 @@
package android.security.remoteprovisioning;
+import android.hardware.security.keymint.ProtectedData;
import android.hardware.security.keymint.SecurityLevel;
import android.security.remoteprovisioning.AttestationPoolStatus;
@@ -83,10 +84,13 @@
* @param secLevel The security level to specify which KM instance from which to generate a
* CSR.
*
- * @return A CBOR blob composed of various encrypted/signed elements from the TA in a byte[]
+ * @param protectedData The encrypted CBOR blob generated by the remote provisioner
+ *
+ * @return A CBOR blob composed of various elements required by the server to verify the
+ * request.
*/
byte[] generateCsr(in boolean testMode, in int numCsr, in byte[] eek, in byte[] challenge,
- in SecurityLevel secLevel);
+ in SecurityLevel secLevel, out ProtectedData protectedData);
/**
* This method provides a way for the returned attestation certificate chains to be provisioned
@@ -95,7 +99,10 @@
*
* @param publicKey The raw public key encoded in the leaf certificate.
*
- * @param cert An X.509, DER encoded certificate chain.
+ * @param batchCert The batch certificate corresponding to the attestation key. Separated for
+ * the purpose of making Subject lookup for KM attestation easier.
+ *
+ * @param certs An X.509, DER encoded certificate chain for the attestation key.
*
* @param expirationDate The expiration date on the certificate chain, provided by the caller
* for convenience.
@@ -103,8 +110,8 @@
* @param secLevel The security level representing the KM instance containing the key that this
* chain corresponds to.
*/
- void provisionCertChain(in byte[] publicKey, in byte[] certs, in long expirationDate,
- in SecurityLevel secLevel);
+ void provisionCertChain(in byte[] publicKey, in byte[] batchCert, in byte[] certs,
+ in long expirationDate, in SecurityLevel secLevel);
/**
* This method allows the caller to instruct KeyStore to generate and store a key pair to be
@@ -117,4 +124,13 @@
* @param secLevel The security level to specify which KM instance should generate a key pair.
*/
void generateKeyPair(in boolean is_test_mode, in SecurityLevel secLevel);
+
+ /**
+ * This method returns the SecurityLevels of whichever instances of
+ * IRemotelyProvisionedComponent are running on the device. The RemoteProvisioner app needs to
+ * know which KM instances it should be generating and managing attestation keys for.
+ *
+ * @return The array of security levels.
+ */
+ SecurityLevel[] getSecurityLevels();
}
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index 3217857..db06bff 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -584,6 +584,7 @@
#[allow(dead_code)]
pub struct CertificateChain {
private_key: ZVec,
+ batch_cert: ZVec,
cert_chain: ZVec,
}
@@ -794,6 +795,7 @@
pub struct PerBootDbKeepAlive(Connection);
impl KeystoreDB {
+ const UNASSIGNED_KEY_ID: i64 = -1i64;
const PERBOOT_DB_FILE_NAME: &'static str = &"file:perboot.sqlite?mode=memory&cache=shared";
/// The alias of the user super key.
@@ -1459,6 +1461,24 @@
.context("In set_blob.")
}
+ /// Why would we insert a deleted blob? This weird function is for the purpose of legacy
+ /// key migration in the case where we bulk delete all the keys of an app or even a user.
+ /// We use this to insert key blobs into the database which can then be garbage collected
+ /// lazily by the key garbage collector.
+ pub fn set_deleted_blob(&mut self, blob: &[u8], blob_metadata: &BlobMetaData) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ Self::set_blob_internal(
+ &tx,
+ Self::UNASSIGNED_KEY_ID,
+ SubComponentType::KEY_BLOB,
+ Some(blob),
+ Some(blob_metadata),
+ )
+ .need_gc()
+ })
+ .context("In set_deleted_blob.")
+ }
+
fn set_blob_internal(
tx: &Transaction,
key_id: i64,
@@ -1551,6 +1571,7 @@
pub fn store_signed_attestation_certificate_chain(
&mut self,
raw_public_key: &[u8],
+ batch_cert: &[u8],
cert_chain: &[u8],
expiration_date: i64,
km_uuid: &Uuid,
@@ -1609,6 +1630,8 @@
None,
)
.context("Failed to insert cert chain")?;
+ Self::set_blob_internal(&tx, key_id, SubComponentType::CERT, Some(batch_cert), None)
+ .context("Failed to insert cert")?;
Ok(()).no_gc()
})
.context("In store_signed_attestation_certificate_chain: ")
@@ -1859,19 +1882,21 @@
|row| Ok((row.get(0)?, row.get(1)?)),
)?
.collect::<rusqlite::Result<Vec<(SubComponentType, Vec<u8>)>>>()
- .context("In retrieve_attestation_key_and_cert_chain: query failed.")?;
+ .context("query failed.")?;
if rows.is_empty() {
return Ok(None).no_gc();
- } else if rows.len() != 2 {
+ } else if rows.len() != 3 {
return Err(KsError::sys()).context(format!(
concat!(
- "In retrieve_attestation_key_and_cert_chain: Expected to get a single attestation",
- "key chain but instead got {}."),
+ "Expected to get a single attestation",
+ "key, cert, and cert chain for a total of 3 entries, but instead got {}."
+ ),
rows.len()
));
}
let mut km_blob: Vec<u8> = Vec::new();
let mut cert_chain_blob: Vec<u8> = Vec::new();
+ let mut batch_cert_blob: Vec<u8> = Vec::new();
for row in rows {
let sub_type: SubComponentType = row.0;
match sub_type {
@@ -1881,15 +1906,20 @@
SubComponentType::CERT_CHAIN => {
cert_chain_blob = row.1;
}
+ SubComponentType::CERT => {
+ batch_cert_blob = row.1;
+ }
_ => Err(KsError::sys()).context("Unknown or incorrect subcomponent type.")?,
}
}
Ok(Some(CertificateChain {
private_key: ZVec::try_from(km_blob)?,
+ batch_cert: ZVec::try_from(batch_cert_blob)?,
cert_chain: ZVec::try_from(cert_chain_blob)?,
}))
.no_gc()
})
+ .context("In retrieve_attestation_key_and_cert_chain:")
}
/// Updates the alias column of the given key id `newid` with the given alias,
@@ -2735,7 +2765,10 @@
// otherwise return the id.
fn insert_with_retry(inserter: impl Fn(i64) -> rusqlite::Result<usize>) -> Result<i64> {
loop {
- let newid: i64 = random();
+ let newid: i64 = match random() {
+ Self::UNASSIGNED_KEY_ID => continue, // UNASSIGNED_KEY_ID cannot be assigned.
+ i => i,
+ };
match inserter(newid) {
// If the id already existed, try again.
Err(rusqlite::Error::SqliteFailure(
@@ -3133,8 +3166,9 @@
db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
assert_eq!(true, chain.is_some());
let cert_chain = chain.unwrap();
- assert_eq!(cert_chain.private_key.to_vec(), loaded_values[2]);
- assert_eq!(cert_chain.cert_chain.to_vec(), loaded_values[1]);
+ assert_eq!(cert_chain.private_key.to_vec(), loaded_values.priv_key);
+ assert_eq!(cert_chain.batch_cert.to_vec(), loaded_values.batch_cert);
+ assert_eq!(cert_chain.cert_chain.to_vec(), loaded_values.cert_chain);
Ok(())
}
@@ -3169,6 +3203,7 @@
let private_key: Vec<u8> = vec![0x04, 0x05, 0x06];
let raw_public_key: Vec<u8> = vec![0x07, 0x08, 0x09];
let cert_chain: Vec<u8> = vec![0x0a, 0x0b, 0x0c];
+ let batch_cert: Vec<u8> = vec![0x0d, 0x0e, 0x0f];
db.create_attestation_key_entry(
&public_key,
&raw_public_key,
@@ -3181,6 +3216,7 @@
assert_eq!(status.total, 4);
db.store_signed_attestation_certificate_chain(
&raw_public_key,
+ &batch_cert,
&cert_chain,
20,
&KEYSTORE_UUID,
@@ -3215,9 +3251,9 @@
.conn
.query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
.expect("Failed to get blob entry row count.");
- // We expect 6 rows here because there are two blobs per attestation key, i.e.,
- // One key and one certificate.
- assert_eq!(blob_entry_row_count, 6);
+ // We expect 9 rows here because there are three blobs per attestation key, i.e.,
+ // one key, one certificate chain, and one certificate.
+ assert_eq!(blob_entry_row_count, 9);
assert_eq!(db.delete_expired_attestation_keys()?, 2);
@@ -3225,8 +3261,9 @@
db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
assert!(cert_chain.is_some());
let value = cert_chain.unwrap();
- assert_eq!(entry_values[1], value.cert_chain.to_vec());
- assert_eq!(entry_values[2], value.private_key.to_vec());
+ assert_eq!(entry_values.batch_cert, value.batch_cert.to_vec());
+ assert_eq!(entry_values.cert_chain, value.cert_chain.to_vec());
+ assert_eq!(entry_values.priv_key, value.private_key.to_vec());
cert_chain = db.retrieve_attestation_key_and_cert_chain(
Domain::APP,
@@ -3248,9 +3285,9 @@
.conn
.query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
.expect("Failed to get blob entry row count.");
- // There shound be 2 blob entries left, because we deleted two of the attestation
- // key entries with two blobs each.
- assert_eq!(blob_entry_row_count, 2);
+ // There shound be 3 blob entries left, because we deleted two of the attestation
+ // key entries with three blobs each.
+ assert_eq!(blob_entry_row_count, 3);
Ok(())
}
@@ -4323,30 +4360,33 @@
.collect::<Result<Vec<_>>>()
}
+ struct RemoteProvValues {
+ cert_chain: Vec<u8>,
+ priv_key: Vec<u8>,
+ batch_cert: Vec<u8>,
+ }
+
fn load_attestation_key_pool(
db: &mut KeystoreDB,
expiration_date: i64,
namespace: i64,
base_byte: u8,
- ) -> Result<Vec<Vec<u8>>> {
- let mut chain: Vec<Vec<u8>> = Vec::new();
+ ) -> Result<RemoteProvValues> {
let public_key: Vec<u8> = vec![base_byte, 0x02 * base_byte];
let cert_chain: Vec<u8> = vec![0x03 * base_byte, 0x04 * base_byte];
let priv_key: Vec<u8> = vec![0x05 * base_byte, 0x06 * base_byte];
let raw_public_key: Vec<u8> = vec![0x0b * base_byte, 0x0c * base_byte];
+ let batch_cert: Vec<u8> = vec![base_byte * 0x0d, base_byte * 0x0e];
db.create_attestation_key_entry(&public_key, &raw_public_key, &priv_key, &KEYSTORE_UUID)?;
db.store_signed_attestation_certificate_chain(
&raw_public_key,
+ &batch_cert,
&cert_chain,
expiration_date,
&KEYSTORE_UUID,
)?;
db.assign_attestation_key(Domain::APP, namespace, &KEYSTORE_UUID)?;
- chain.push(public_key);
- chain.push(cert_chain);
- chain.push(priv_key);
- chain.push(raw_public_key);
- Ok(chain)
+ Ok(RemoteProvValues { cert_chain, priv_key, batch_cert })
}
// Note: The parameters and SecurityLevel associations are nonsensical. This
diff --git a/keystore2/src/error.rs b/keystore2/src/error.rs
index 7227f62..d67f5f4 100644
--- a/keystore2/src/error.rs
+++ b/keystore2/src/error.rs
@@ -57,6 +57,10 @@
/// Wraps a Binder status code.
#[error("Binder transaction error {0:?}")]
BinderTransaction(StatusCode),
+ /// Wraps a Remote Provisioning ErrorCode as defined by the IRemotelyProvisionedComponent
+ /// AIDL interface spec.
+ #[error("Error::Rp({0:?})")]
+ Rp(ErrorCode),
}
impl Error {
@@ -101,6 +105,16 @@
})
}
+/// Helper function to map the binder status we get from calls into a RemotelyProvisionedComponent
+/// to a Keystore Error. We don't create an anyhow error here to make
+/// it easier to evaluate service specific errors.
+pub fn map_rem_prov_error<T>(r: BinderResult<T>) -> Result<T, Error> {
+ r.map_err(|s| match s.exception_code() {
+ ExceptionCode::SERVICE_SPECIFIC => Error::Rp(ErrorCode(s.service_specific_error())),
+ e_code => Error::Binder(e_code, 0),
+ })
+}
+
/// This function is similar to map_km_error only that we don't expect
/// any KeyMint error codes, we simply preserve the exception code and optional
/// service specific exception.
@@ -164,6 +178,7 @@
let rc = match root_cause.downcast_ref::<Error>() {
Some(Error::Rc(rcode)) => rcode.0,
Some(Error::Km(ec)) => ec.0,
+ Some(Error::Rp(_)) => ResponseCode::SYSTEM_ERROR.0,
// If an Error::Binder reaches this stage we report a system error.
// The exception code and possible service specific error will be
// printed in the error log above.
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 8cc0106..9668ee3 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -29,8 +29,8 @@
};
use crate::{enforcements::Enforcements, error::map_km_error};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- IKeyMintDevice::IKeyMintDevice, KeyMintHardwareInfo::KeyMintHardwareInfo,
- SecurityLevel::SecurityLevel,
+ IKeyMintDevice::IKeyMintDevice, IRemotelyProvisionedComponent::IRemotelyProvisionedComponent,
+ KeyMintHardwareInfo::KeyMintHardwareInfo, SecurityLevel::SecurityLevel,
};
use android_hardware_security_keymint::binder::{StatusCode, Strong};
use android_security_compat::aidl::android::security::compat::IKeystoreCompatService::IKeystoreCompatService;
@@ -128,6 +128,21 @@
}
}
+#[derive(Default)]
+struct RemotelyProvisionedDevicesMap {
+ devices_by_sec_level: HashMap<SecurityLevel, Asp>,
+}
+
+impl RemotelyProvisionedDevicesMap {
+ fn dev_by_sec_level(&self, sec_level: &SecurityLevel) -> Option<Asp> {
+ self.devices_by_sec_level.get(sec_level).map(|dev| (*dev).clone())
+ }
+
+ fn insert(&mut self, sec_level: SecurityLevel, dev: Asp) {
+ self.devices_by_sec_level.insert(sec_level, dev);
+ }
+}
+
lazy_static! {
/// The path where keystore stores all its keys.
pub static ref DB_PATH: Mutex<PathBuf> = Mutex::new(
@@ -138,6 +153,8 @@
static ref KEY_MINT_DEVICES: Mutex<DevicesMap> = Default::default();
/// Timestamp service.
static ref TIME_STAMP_DEVICE: Mutex<Option<Asp>> = Default::default();
+ /// RemotelyProvisionedComponent HAL devices.
+ static ref REMOTELY_PROVISIONED_COMPONENT_DEVICES: Mutex<RemotelyProvisionedDevicesMap> = Default::default();
/// A single on-demand worker thread that handles deferred tasks with two different
/// priorities.
pub static ref ASYNC_TASK: Arc<AsyncTask> = Default::default();
@@ -276,3 +293,45 @@
Ok(dev)
}
}
+
+static REMOTE_PROVISIONING_HAL_SERVICE_NAME: &str =
+ "android.hardware.security.keymint.IRemotelyProvisionedComponent";
+
+fn connect_remotely_provisioned_component(security_level: &SecurityLevel) -> Result<Asp> {
+ let service_name = match *security_level {
+ SecurityLevel::TRUSTED_ENVIRONMENT => {
+ format!("{}/default", REMOTE_PROVISIONING_HAL_SERVICE_NAME)
+ }
+ SecurityLevel::STRONGBOX => format!("{}/strongbox", REMOTE_PROVISIONING_HAL_SERVICE_NAME),
+ _ => {
+ // Given the integration of IRemotelyProvisionedComponent with KeyMint, it is reasonable
+ // to return HARDWARE_TYPE_UNAVAILABLE as a Km error if it cannot be found.
+ return Err(Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE))
+ .context("In connect_remotely_provisioned_component.");
+ }
+ };
+
+ let rem_prov_hal: Strong<dyn IRemotelyProvisionedComponent> =
+ map_binder_status_code(binder::get_interface(&service_name))
+ .context(concat!(
+ "In connect_remotely_provisioned_component: Trying to connect to",
+ " RemotelyProvisionedComponent service."
+ ))
+ .map_err(|e| e)?;
+ Ok(Asp::new(rem_prov_hal.as_binder()))
+}
+
+/// Get a remote provisiong component device for the given security level either from the cache or
+/// by making a new connection. Returns the device.
+pub fn get_remotely_provisioned_component(security_level: &SecurityLevel) -> Result<Asp> {
+ let mut devices_map = REMOTELY_PROVISIONED_COMPONENT_DEVICES.lock().unwrap();
+ if let Some(dev) = devices_map.dev_by_sec_level(&security_level) {
+ Ok(dev)
+ } else {
+ let dev = connect_remotely_provisioned_component(security_level)
+ .context("In get_remotely_provisioned_component.")?;
+ devices_map.insert(*security_level, dev);
+ // Unwrap must succeed because we just inserted it.
+ Ok(devices_map.dev_by_sec_level(security_level).unwrap())
+ }
+}
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index 30e3e22..9dc59a2 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -17,6 +17,7 @@
use keystore2::apc::ApcManager;
use keystore2::authorization::AuthorizationManager;
use keystore2::globals::ENFORCEMENTS;
+use keystore2::remote_provisioning::RemoteProvisioningService;
use keystore2::service::KeystoreService;
use keystore2::user_manager::UserManager;
use log::{error, info};
@@ -25,6 +26,7 @@
static KS2_SERVICE_NAME: &str = "android.system.keystore2";
static APC_SERVICE_NAME: &str = "android.security.apc";
static AUTHORIZATION_SERVICE_NAME: &str = "android.security.authorization";
+static REMOTE_PROVISIONING_SERVICE_NAME: &str = "android.security.remoteprovisioning";
static USER_MANAGER_SERVICE_NAME: &str = "android.security.usermanager";
/// Keystore 2.0 takes one argument which is a path indicating its designated working directory.
@@ -98,6 +100,20 @@
},
);
+ // Devices with KS2 and KM 1.0 may not have any IRemotelyProvisionedComponent HALs at all. Do
+ // not panic if new_native_binder returns failure because it could not find the TEE HAL.
+ if let Ok(remote_provisioning_service) = RemoteProvisioningService::new_native_binder() {
+ binder::add_service(
+ REMOTE_PROVISIONING_SERVICE_NAME,
+ remote_provisioning_service.as_binder(),
+ )
+ .unwrap_or_else(|e| {
+ panic!(
+ "Failed to register service {} because of {:?}.",
+ REMOTE_PROVISIONING_SERVICE_NAME, e
+ );
+ });
+ }
info!("Successfully registered Keystore 2.0 service.");
info!("Joining thread pool now.");
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 1981022..b51f644 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -27,6 +27,7 @@
};
use anyhow::{Context, Result};
use keystore2_crypto::{aes_gcm_decrypt, derive_key_from_password, ZVec};
+use std::collections::{HashMap, HashSet};
use std::{convert::TryInto, fs::File, path::Path, path::PathBuf};
use std::{
fs,
@@ -724,6 +725,31 @@
Ok(result)
}
+ /// List all keystore entries belonging to the given user. Returns a map of UIDs
+ /// to sets of decoded aliases.
+ pub fn list_keystore_entries_for_user(
+ &self,
+ user_id: u32,
+ ) -> Result<HashMap<u32, HashSet<String>>> {
+ let user_entries = self
+ .list_user(user_id)
+ .context("In list_keystore_entries_for_user: Trying to list user.")?;
+
+ let result =
+ user_entries.into_iter().fold(HashMap::<u32, HashSet<String>>::new(), |mut acc, v| {
+ if let Some(sep_pos) = v.find('_') {
+ if let Ok(uid) = v[0..sep_pos].parse::<u32>() {
+ if let Some(alias) = Self::extract_alias(&v[sep_pos + 1..]) {
+ let entry = acc.entry(uid).or_default();
+ entry.insert(alias);
+ }
+ }
+ }
+ acc
+ });
+ Ok(result)
+ }
+
/// List all keystore entries belonging to the given uid.
pub fn list_keystore_entries_for_uid(&self, uid: u32) -> Result<Vec<String>> {
let user_id = uid_to_android_user(uid);
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_migrator.rs
index 60c6bca..9ffe86c 100644
--- a/keystore2/src/legacy_migrator.rs
+++ b/keystore2/src/legacy_migrator.rs
@@ -19,6 +19,7 @@
KeystoreDB, Uuid, KEYSTORE_UUID,
};
use crate::error::Error;
+use crate::key_parameter::KeyParameterValue;
use crate::legacy_blob::BlobValue;
use crate::utils::uid_to_android_user;
use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
@@ -66,6 +67,11 @@
}
}
+enum BulkDeleteRequest {
+ Uid(u32),
+ User(u32),
+}
+
struct LegacyMigratorState {
recently_migrated: HashSet<RecentMigration>,
recently_migrated_super_key: HashSet<u32>,
@@ -356,6 +362,31 @@
}
}
+ /// Deletes all keys belonging to the given uid, migrating them into the database
+ /// for subsequent garbage collection if necessary.
+ pub fn bulk_delete_uid(&self, uid: u32, keep_non_super_encrypted_keys: bool) -> Result<()> {
+ let result = self.do_serialized(move |migrator_state| {
+ migrator_state.bulk_delete(BulkDeleteRequest::Uid(uid), keep_non_super_encrypted_keys)
+ });
+
+ result.unwrap_or(Ok(()))
+ }
+
+ /// Deletes all keys belonging to the given android user, migrating them into the database
+ /// for subsequent garbage collection if necessary.
+ pub fn bulk_delete_user(
+ &self,
+ user_id: u32,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ let result = self.do_serialized(move |migrator_state| {
+ migrator_state
+ .bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
+ });
+
+ result.unwrap_or(Ok(()))
+ }
+
/// Queries the legacy database for the presence of a super key for the given user.
pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
let result =
@@ -539,6 +570,111 @@
}
}
+ /// Key migrator request to be run by do_serialized.
+ /// See LegacyMigrator::bulk_delete_uid and LegacyMigrator::bulk_delete_user.
+ fn bulk_delete(
+ &mut self,
+ bulk_delete_request: BulkDeleteRequest,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ let (aliases, user_id) = match bulk_delete_request {
+ BulkDeleteRequest::Uid(uid) => (
+ self.legacy_loader
+ .list_keystore_entries_for_uid(uid)
+ .context("In bulk_delete: Trying to get aliases for uid.")
+ .map(|aliases| {
+ let mut h = HashMap::<u32, HashSet<String>>::new();
+ h.insert(uid, aliases.into_iter().collect());
+ h
+ })?,
+ uid_to_android_user(uid),
+ ),
+ BulkDeleteRequest::User(user_id) => (
+ self.legacy_loader
+ .list_keystore_entries_for_user(user_id)
+ .context("In bulk_delete: Trying to get aliases for user_id.")?,
+ user_id,
+ ),
+ };
+
+ let super_key_id = self
+ .db
+ .load_super_key(user_id)
+ .context("In bulk_delete: Failed to load super key")?
+ .map(|(_, entry)| entry.id());
+
+ for (uid, alias) in aliases
+ .into_iter()
+ .map(|(uid, aliases)| aliases.into_iter().map(move |alias| (uid, alias)))
+ .flatten()
+ {
+ let (km_blob_params, _, _) = self
+ .legacy_loader
+ .load_by_uid_alias(uid, &alias, None)
+ .context("In bulk_delete: Trying to load legacy blob.")?;
+
+ // Determine if the key needs special handling to be deleted.
+ let (need_gc, is_super_encrypted) = km_blob_params
+ .as_ref()
+ .map(|(blob, params)| {
+ (
+ params.iter().any(|kp| {
+ KeyParameterValue::RollbackResistance == *kp.key_parameter_value()
+ }),
+ blob.is_encrypted(),
+ )
+ })
+ .unwrap_or((false, false));
+
+ if keep_non_super_encrypted_keys && !is_super_encrypted {
+ continue;
+ }
+
+ if need_gc {
+ let mark_deleted = match km_blob_params
+ .map(|(blob, _)| (blob.is_strongbox(), blob.take_value()))
+ {
+ Some((is_strongbox, BlobValue::Encrypted { iv, tag, data })) => {
+ let mut blob_metadata = BlobMetaData::new();
+ if let (Ok(km_uuid), Some(super_key_id)) =
+ (self.get_km_uuid(is_strongbox), super_key_id)
+ {
+ blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+ blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
+ blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
+ blob_metadata
+ .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
+ Some((LegacyBlob::Vec(data), blob_metadata))
+ } else {
+ // Oh well - we tried our best, but if we cannot determine which
+ // KeyMint instance we have to send this blob to, we cannot
+ // do more than delete the key from the file system.
+ // And if we don't know which key wraps this key we cannot
+ // unwrap it for KeyMint either.
+ None
+ }
+ }
+ Some((_, BlobValue::Decrypted(data))) => {
+ Some((LegacyBlob::ZVec(data), BlobMetaData::new()))
+ }
+ _ => None,
+ };
+
+ if let Some((blob, blob_metadata)) = mark_deleted {
+ self.db.set_deleted_blob(&blob, &blob_metadata).context(concat!(
+ "In bulk_delete: Trying to insert deleted ",
+ "blob into the database for garbage collection."
+ ))?;
+ }
+ }
+
+ self.legacy_loader
+ .remove_keystore_entry(uid, &alias)
+ .context("In bulk_delete: Trying to remove migrated key.")?;
+ }
+ Ok(())
+ }
+
fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
Ok(self.recently_migrated_super_key.contains(&user_id)
|| self.legacy_loader.has_super_key(user_id))
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
index fe38504..d606b6a 100644
--- a/keystore2/src/remote_provisioning.rs
+++ b/keystore2/src/remote_provisioning.rs
@@ -19,28 +19,54 @@
//! certificate chains signed by some root authority and stored in a keystore SQLite
//! DB.
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use std::collections::HashMap;
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ IRemotelyProvisionedComponent::IRemotelyProvisionedComponent, MacedPublicKey::MacedPublicKey,
+ ProtectedData::ProtectedData, SecurityLevel::SecurityLevel,
+};
use android_security_remoteprovisioning::aidl::android::security::remoteprovisioning::{
AttestationPoolStatus::AttestationPoolStatus, IRemoteProvisioning::BnRemoteProvisioning,
IRemoteProvisioning::IRemoteProvisioning,
};
use android_security_remoteprovisioning::binder::Strong;
-use anyhow::Result;
+use anyhow::{Context, Result};
-use crate::error::map_or_log_err;
-use crate::globals::{get_keymint_device, DB};
+use crate::error::{self, map_or_log_err, map_rem_prov_error};
+use crate::globals::{get_keymint_device, get_remotely_provisioned_component, DB};
+use crate::utils::Asp;
/// Implementation of the IRemoteProvisioning service.
+#[derive(Default)]
pub struct RemoteProvisioningService {
- // TODO(b/179222809): Add the remote provisioner hal aidl interface when available
+ device_by_sec_level: HashMap<SecurityLevel, Asp>,
}
impl RemoteProvisioningService {
+ fn get_dev_by_sec_level(
+ &self,
+ sec_level: &SecurityLevel,
+ ) -> Result<Strong<dyn IRemotelyProvisionedComponent>> {
+ if let Some(dev) = self.device_by_sec_level.get(sec_level) {
+ dev.get_interface().context("In get_dev_by_sec_level.")
+ } else {
+ Err(error::Error::sys()).context(concat!(
+ "In get_dev_by_sec_level: Remote instance for requested security level",
+ " not found."
+ ))
+ }
+ }
+
/// Creates a new instance of the remote provisioning service
pub fn new_native_binder() -> Result<Strong<dyn IRemoteProvisioning>> {
- let result = BnRemoteProvisioning::new_binder(Self {});
- Ok(result)
+ let mut result: Self = Default::default();
+ let dev = get_remotely_provisioned_component(&SecurityLevel::TRUSTED_ENVIRONMENT)
+ .context("In new_native_binder: Failed to get TEE Remote Provisioner instance.")?;
+ result.device_by_sec_level.insert(SecurityLevel::TRUSTED_ENVIRONMENT, dev);
+ if let Ok(dev) = get_remotely_provisioned_component(&SecurityLevel::STRONGBOX) {
+ result.device_by_sec_level.insert(SecurityLevel::STRONGBOX, dev);
+ }
+ Ok(BnRemoteProvisioning::new_binder(result))
}
/// Populates the AttestationPoolStatus parcelable with information about how many
@@ -54,6 +80,11 @@
let (_, _, uuid) = get_keymint_device(&sec_level)?;
DB.with::<_, Result<AttestationPoolStatus>>(|db| {
let mut db = db.borrow_mut();
+ // delete_expired_attestation_keys is always safe to call, and will remove anything
+ // older than the date at the time of calling. No work should be done on the
+ // attestation keys unless the pool status is checked first, so this call should be
+ // enough to routinely clean out expired keys.
+ db.delete_expired_attestation_keys()?;
Ok(db.get_attestation_pool_status(expired_by, &uuid)?)
})
}
@@ -68,15 +99,34 @@
/// baked in root of trust in the underlying IRemotelyProvisionedComponent instance.
pub fn generate_csr(
&self,
- _test_mode: bool,
- _num_csr: i32,
- _eek: &[u8],
- _challenge: &[u8],
- _sec_level: SecurityLevel,
+ test_mode: bool,
+ num_csr: i32,
+ eek: &[u8],
+ challenge: &[u8],
+ sec_level: SecurityLevel,
+ protected_data: &mut ProtectedData,
) -> Result<Vec<u8>> {
- // TODO(b/179222809): implement with actual remote provisioner AIDL when available. For now
- // it isnice to have some junk values
- Ok(vec![0, 1, 3, 3])
+ let dev = self.get_dev_by_sec_level(&sec_level)?;
+ let (_, _, uuid) = get_keymint_device(&sec_level)?;
+ let keys_to_sign = DB.with::<_, Result<Vec<MacedPublicKey>>>(|db| {
+ let mut db = db.borrow_mut();
+ Ok(db
+ .fetch_unsigned_attestation_keys(num_csr, &uuid)?
+ .iter()
+ .map(|key| MacedPublicKey { macedKey: key.to_vec() })
+ .collect())
+ })?;
+ let mut mac = Vec::<u8>::with_capacity(32);
+ map_rem_prov_error(dev.generateCertificateRequest(
+ test_mode,
+ &keys_to_sign,
+ eek,
+ challenge,
+ &mut mac,
+ protected_data,
+ ))
+ .context("In generate_csr: Failed to generate csr")?;
+ Ok(mac)
}
/// Provisions a certificate chain for a key whose CSR was included in generate_csr. The
@@ -87,6 +137,7 @@
pub fn provision_cert_chain(
&self,
public_key: &[u8],
+ batch_cert: &[u8],
certs: &[u8],
expiration_date: i64,
sec_level: SecurityLevel,
@@ -96,6 +147,7 @@
let (_, _, uuid) = get_keymint_device(&sec_level)?;
Ok(db.store_signed_attestation_certificate_chain(
public_key,
+ batch_cert,
certs, /* DER encoded certificate chain */
expiration_date,
&uuid,
@@ -107,8 +159,35 @@
/// `is_test_mode` indicates whether or not the returned public key should be marked as being
/// for testing in order to differentiate them from private keys. If the call is successful,
/// the key pair is then added to the database.
- pub fn generate_key_pair(&self, _is_test_mode: bool, _sec_level: SecurityLevel) -> Result<()> {
- Ok(())
+ pub fn generate_key_pair(&self, is_test_mode: bool, sec_level: SecurityLevel) -> Result<()> {
+ let (_, _, uuid) = get_keymint_device(&sec_level)?;
+ let dev = self.get_dev_by_sec_level(&sec_level)?;
+ let mut maced_key = MacedPublicKey { macedKey: Vec::new() };
+ let priv_key =
+ map_rem_prov_error(dev.generateEcdsaP256KeyPair(is_test_mode, &mut maced_key))
+ .context("In generate_key_pair: Failed to generated ECDSA keypair.")?;
+ // TODO(b/180392379): This is a brittle hack that relies on the consistent formatting of
+ // the returned CBOR blob in order to extract the public key.
+ let data = &maced_key.macedKey;
+ if data.len() < 85 {
+ return Err(error::Error::sys()).context(concat!(
+ "In generate_key_pair: CBOR blob returned from",
+ "RemotelyProvisionedComponent is definitely malformatted or empty."
+ ));
+ }
+ let mut raw_key: Vec<u8> = vec![0; 64];
+ raw_key[0..32].clone_from_slice(&data[18..18 + 32]);
+ raw_key[32..64].clone_from_slice(&data[53..53 + 32]);
+ DB.with::<_, Result<()>>(|db| {
+ let mut db = db.borrow_mut();
+ Ok(db.create_attestation_key_entry(&maced_key.macedKey, &raw_key, &priv_key, &uuid)?)
+ })
+ }
+
+ /// Checks the security level of each available IRemotelyProvisionedComponent hal and returns
+ /// all levels in an array to the caller.
+ pub fn get_security_levels(&self) -> Result<Vec<SecurityLevel>> {
+ Ok(self.device_by_sec_level.keys().cloned().collect())
}
}
@@ -132,18 +211,26 @@
eek: &[u8],
challenge: &[u8],
sec_level: SecurityLevel,
+ protected_data: &mut ProtectedData,
) -> binder::public_api::Result<Vec<u8>> {
- map_or_log_err(self.generate_csr(test_mode, num_csr, eek, challenge, sec_level), Ok)
+ map_or_log_err(
+ self.generate_csr(test_mode, num_csr, eek, challenge, sec_level, protected_data),
+ Ok,
+ )
}
fn provisionCertChain(
&self,
public_key: &[u8],
+ batch_cert: &[u8],
certs: &[u8],
expiration_date: i64,
sec_level: SecurityLevel,
) -> binder::public_api::Result<()> {
- map_or_log_err(self.provision_cert_chain(public_key, certs, expiration_date, sec_level), Ok)
+ map_or_log_err(
+ self.provision_cert_chain(public_key, batch_cert, certs, expiration_date, sec_level),
+ Ok,
+ )
}
fn generateKeyPair(
@@ -153,4 +240,8 @@
) -> binder::public_api::Result<()> {
map_or_log_err(self.generate_key_pair(is_test_mode, sec_level), Ok)
}
+
+ fn getSecurityLevels(&self) -> binder::public_api::Result<Vec<SecurityLevel>> {
+ map_or_log_err(self.get_security_levels(), Ok)
+ }
}
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 156d20d..5ee685a 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -451,9 +451,10 @@
match key_blob_before_upgrade {
KeyBlob::Sensitive(_, super_key) => {
let (key, metadata) = Self::encrypt_with_super_key(key_after_upgrade, super_key)
- .context(
- "In reencrypt_on_upgrade_if_required. Failed to re-super-encrypt key on key upgrade.",
- )?;
+ .context(concat!(
+ "In reencrypt_on_upgrade_if_required. ",
+ "Failed to re-super-encrypt key on key upgrade."
+ ))?;
Ok((KeyBlob::NonSensitive(key), Some(metadata)))
}
_ => Ok((KeyBlob::Ref(key_after_upgrade), None)),
@@ -520,8 +521,9 @@
if password.is_none() {
//transitioning to swiping, delete only the super key in database and cache, and
//super-encrypted keys in database (and in KM)
- Self::reset_user(db, skm, user_id, true)
- .context("In get_with_password_changed.")?;
+ Self::reset_user(db, skm, legacy_migrator, user_id, true).context(
+ "In get_with_password_changed: Trying to delete keys from the db.",
+ )?;
//Lskf is now removed in Keystore
Ok(UserState::Uninitialized)
} else {
@@ -570,10 +572,14 @@
pub fn reset_user(
db: &mut KeystoreDB,
skm: &SuperKeyManager,
+ legacy_migrator: &LegacyMigrator,
user_id: u32,
keep_non_super_encrypted_keys: bool,
) -> Result<()> {
// mark keys created on behalf of the user as unreferenced.
+ legacy_migrator
+ .bulk_delete_user(user_id, keep_non_super_encrypted_keys)
+ .context("In reset_user: Trying to delete legacy keys.")?;
db.unbind_keys_for_user(user_id as u32, keep_non_super_encrypted_keys)
.context("In reset user. Error in unbinding keys.")?;
@@ -583,18 +589,18 @@
}
}
-/// This enum represents two states a Keymint Blob can be in, w.r.t super encryption.
-/// Sensitive variant represents a Keymint blob that is supposed to be super encrypted,
-/// but unwrapped during usage. Therefore, it has the super key along with the unwrapped key.
-/// Ref variant represents a Keymint blob that is not required to super encrypt or that is
-/// already super encrypted.
+/// This enum represents three states a KeyMint Blob can be in, w.r.t super encryption.
+/// `Sensitive` holds the non encrypted key and a reference to its super key.
+/// `NonSensitive` holds a non encrypted key that is never supposed to be encrypted.
+/// `Ref` holds a reference to a key blob when it does not need to be modified if its
+/// life time allows it.
pub enum KeyBlob<'a> {
Sensitive(ZVec, SuperKey),
NonSensitive(Vec<u8>),
Ref(&'a [u8]),
}
-/// Deref returns a reference to the key material in both variants.
+/// Deref returns a reference to the key material in any variant.
impl<'a> Deref for KeyBlob<'a> {
type Target = [u8];
diff --git a/keystore2/src/user_manager.rs b/keystore2/src/user_manager.rs
index 8b7aad9..8e09144 100644
--- a/keystore2/src/user_manager.rs
+++ b/keystore2/src/user_manager.rs
@@ -59,12 +59,12 @@
.context("In on_user_password_changed.")?
{
UserState::LskfLocked => {
- //error - password can not be changed when the device is locked
+ // Error - password can not be changed when the device is locked
Err(KeystoreError::Rc(ResponseCode::LOCKED))
.context("In on_user_password_changed. Device is locked.")
}
_ => {
- //LskfLocked is the only error case for password change
+ // LskfLocked is the only error case for password change
Ok(())
}
}
@@ -74,7 +74,16 @@
// Check permission. Function should return if this failed. Therefore having '?' at the end
// is very important.
check_keystore_permission(KeystorePerm::change_user()).context("In add_or_remove_user.")?;
- DB.with(|db| UserState::reset_user(&mut db.borrow_mut(), &SUPER_KEY, user_id as u32, false))
+ DB.with(|db| {
+ UserState::reset_user(
+ &mut db.borrow_mut(),
+ &SUPER_KEY,
+ &LEGACY_MIGRATOR,
+ user_id as u32,
+ false,
+ )
+ })
+ .context("In add_or_remove_user: Trying to delete keys from db.")
}
}
diff --git a/ondevice-signing/Android.bp b/ondevice-signing/Android.bp
index d47a04f..5db19b7 100644
--- a/ondevice-signing/Android.bp
+++ b/ondevice-signing/Android.bp
@@ -71,7 +71,7 @@
tidy_checks: tidy_errors,
tidy_checks_as_errors: tidy_errors,
tidy_flags: [
- "-format-style='file'",
+ "-format-style=file",
],
}
diff --git a/ondevice-signing/Keymaster.cpp b/ondevice-signing/Keymaster.cpp
index d43828a..267dec8 100644
--- a/ondevice-signing/Keymaster.cpp
+++ b/ondevice-signing/Keymaster.cpp
@@ -68,7 +68,7 @@
mDevice = devToUse;
- return true;
+ return mDevice != nullptr;
}
std::optional<Keymaster> Keymaster::getInstance() {
diff --git a/ondevice-signing/VerityUtils.cpp b/ondevice-signing/VerityUtils.cpp
index 579d3d8..b4a6a54 100644
--- a/ondevice-signing/VerityUtils.cpp
+++ b/ondevice-signing/VerityUtils.cpp
@@ -76,9 +76,10 @@
static Result<std::vector<uint8_t>> signDigest(const KeymasterSigningKey& key,
const std::vector<uint8_t>& digest) {
- struct fsverity_signed_digest* d = NULL;
+ fsverity_signed_digest* d;
size_t signed_digest_size = sizeof(*d) + digest.size();
- d = (struct fsverity_signed_digest*)malloc(signed_digest_size);
+ std::unique_ptr<uint8_t[]> digest_buffer{new uint8_t[signed_digest_size]};
+ d = (fsverity_signed_digest*)digest_buffer.get();
memcpy(d->magic, "FSVerity", 8);
d->digest_algorithm = cpu_to_le16(FS_VERITY_HASH_ALG_SHA256);
diff --git a/ondevice-signing/odsign_main.cpp b/ondevice-signing/odsign_main.cpp
index 2ef9511..3baba68 100644
--- a/ondevice-signing/odsign_main.cpp
+++ b/ondevice-signing/odsign_main.cpp
@@ -50,6 +50,7 @@
Result<void> addCertToFsVerityKeyring(const std::string& path) {
const char* const argv[] = {kFsVerityInitPath, "--load-extra-key", "fsv_ods"};
+ // NOLINTNEXTLINE(android-cloexec-open): Deliberately not O_CLOEXEC
int fd = open(path.c_str(), O_RDONLY);
pid_t pid = fork();
if (pid == 0) {
@@ -91,7 +92,7 @@
return KeymasterSigningKey::loadFromBlobAndVerify(kSigningKeyBlob);
}
-Result<void> verifyAndLoadExistingCert(const KeymasterSigningKey& key) {
+Result<void> verifyExistingCert(const KeymasterSigningKey& key) {
if (access(kSigningKeyCert.c_str(), F_OK) < 0) {
return ErrnoError() << "Key certificate not found: " << kSigningKeyCert;
}
@@ -109,11 +110,6 @@
<< " does not match signing public key.";
}
- auto cert_add_result = addCertToFsVerityKeyring(kSigningKeyCert);
- if (!cert_add_result.ok()) {
- return cert_add_result.error();
- }
-
// At this point, we know the cert matches
return {};
}
@@ -174,7 +170,7 @@
LOG(INFO) << "Found and verified existing key: " << kSigningKeyBlob;
}
- auto existing_cert = verifyAndLoadExistingCert(key.value());
+ auto existing_cert = verifyExistingCert(key.value());
if (!existing_cert.ok()) {
LOG(WARNING) << existing_cert.error().message();
@@ -185,15 +181,15 @@
// TODO apparently the key become invalid - delete the blob / cert
return -1;
}
- auto cert_add_result = addCertToFsVerityKeyring(kSigningKeyCert);
- if (!cert_add_result.ok()) {
- LOG(ERROR) << "Failed to add certificate to fs-verity keyring: "
- << cert_add_result.error().message();
- return -1;
- }
} else {
LOG(INFO) << "Found and verified existing public key certificate: " << kSigningKeyCert;
}
+ auto cert_add_result = addCertToFsVerityKeyring(kSigningKeyCert);
+ if (!cert_add_result.ok()) {
+ LOG(ERROR) << "Failed to add certificate to fs-verity keyring: "
+ << cert_add_result.error().message();
+ return -1;
+ }
auto verityStatus = verifyAllFilesInVerity(kArtArtifactsDir);
if (!verityStatus.ok()) {