Merge "Fail early if supplementary info isn't populated" into main
diff --git a/keystore2/src/crypto/lib.rs b/keystore2/src/crypto/lib.rs
index 09b84ec..b6f308b 100644
--- a/keystore2/src/crypto/lib.rs
+++ b/keystore2/src/crypto/lib.rs
@@ -317,7 +317,7 @@
}
}
-impl<'a> BorrowedECPoint<'a> {
+impl BorrowedECPoint<'_> {
/// Get the wrapped EC_POINT object.
pub fn get_point(&self) -> &EC_POINT {
// Safety: We only create BorrowedECPoint objects for valid EC_POINTs.
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index 66b123e..626a1c0 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -2440,8 +2440,10 @@
.context("Trying to delete grants.")?;
// The associated blobentry rows are not immediately deleted when the owning keyentry is
// removed, because a KeyMint `deleteKey()` invocation is needed (specifically for the
- // `KEY_BLOB`). Mark the affected rows with `state=Orphaned` so a subsequent garbage
- // collection can do this.
+ // `KEY_BLOB`). That should not be done from within the database transaction. Also, calls
+ // to `deleteKey()` need to be delayed until the boot has completed, to avoid making
+ // permanent changes during an OTA before the point of no return. Mark the affected rows
+ // with `state=Orphaned` so a subsequent garbage collection can do the `deleteKey()`.
tx.execute(
"UPDATE persistent.blobentry SET state = ? WHERE keyentryid = ?",
params![BlobState::Orphaned, key_id],
diff --git a/keystore2/src/gc.rs b/keystore2/src/gc.rs
index f2341e3..9741671 100644
--- a/keystore2/src/gc.rs
+++ b/keystore2/src/gc.rs
@@ -22,6 +22,7 @@
use crate::{
async_task,
database::{KeystoreDB, SupersededBlob, Uuid},
+ globals,
super_key::SuperKeyManager,
};
use anyhow::{Context, Result};
@@ -135,6 +136,17 @@
/// Processes one key and then schedules another attempt until it runs out of blobs to delete.
fn step(&mut self) {
self.notified.store(0, Ordering::Relaxed);
+ if !globals::boot_completed() {
+ // Garbage collection involves a operation (`IKeyMintDevice::deleteKey()`) that cannot
+ // be rolled back in some cases (specifically, when the key is rollback-resistant), even
+ // if the Keystore database is restored to the version of an earlier userdata filesystem
+ // checkpoint.
+ //
+ // This means that we should not perform GC until boot has fully completed, and any
+ // in-progress OTA is definitely not going to be rolled back.
+ log::info!("skip GC as boot not completed");
+ return;
+ }
if let Err(e) = self.process_one_key() {
log::error!("Error trying to delete blob entry. {:?}", e);
}
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 3b9c631..9ee2a1e 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -46,7 +46,11 @@
use anyhow::{Context, Result};
use binder::FromIBinder;
use binder::{get_declared_instances, is_declared};
-use std::sync::{Arc, LazyLock, Mutex, RwLock};
+use rustutils::system_properties::PropertyWatcher;
+use std::sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc, LazyLock, Mutex, RwLock,
+};
use std::{cell::RefCell, sync::Once};
use std::{collections::HashMap, path::Path, path::PathBuf};
@@ -449,3 +453,40 @@
.ok_or(Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE))
.context(ks_err!("Failed to get rpc for sec level {:?}", *security_level))
}
+
+/// Whether boot is complete.
+static BOOT_COMPLETED: AtomicBool = AtomicBool::new(false);
+
+/// Indicate whether boot is complete.
+///
+/// This in turn indicates whether it is safe to make permanent changes to state.
+pub fn boot_completed() -> bool {
+ BOOT_COMPLETED.load(Ordering::Acquire)
+}
+
+/// Monitor the system property for boot complete. This blocks and so needs to be run in a separate
+/// thread.
+pub fn await_boot_completed() {
+ // Use a fairly long watchdog timeout of 5 minutes here. This blocks until the device
+ // boots, which on a very slow device (e.g., emulator for a non-native architecture) can
+ // take minutes. Blocking here would be unexpected only if it never finishes.
+ let _wp = wd::watch_millis("await_boot_completed", 300_000);
+ log::info!("monitoring for sys.boot_completed=1");
+ while let Err(e) = watch_for_boot_completed() {
+ log::error!("failed to watch for boot_completed: {e:?}");
+ std::thread::sleep(std::time::Duration::from_secs(5));
+ }
+
+ BOOT_COMPLETED.store(true, Ordering::Release);
+ log::info!("wait_for_boot_completed done, triggering GC");
+
+ // Garbage collection may have been skipped until now, so trigger a check.
+ GC.notify_gc();
+}
+
+fn watch_for_boot_completed() -> Result<()> {
+ let mut w = PropertyWatcher::new("sys.boot_completed")
+ .context(ks_err!("PropertyWatcher::new failed"))?;
+ w.wait_for_value("1", None).context(ks_err!("Failed to wait for sys.boot_completed"))?;
+ Ok(())
+}
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index 178b36c..008e6fe 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -93,6 +93,7 @@
ENFORCEMENTS.install_confirmation_token_receiver(confirmation_token_receiver);
+ std::thread::spawn(keystore2::globals::await_boot_completed);
entropy::register_feeder();
shared_secret_negotiation::perform_shared_secret_negotiation();
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 42fd764..3e65753 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -1218,7 +1218,7 @@
Ref(&'a [u8]),
}
-impl<'a> KeyBlob<'a> {
+impl KeyBlob<'_> {
pub fn force_reencrypt(&self) -> bool {
if let KeyBlob::Sensitive { force_reencrypt, .. } = self {
*force_reencrypt
@@ -1229,7 +1229,7 @@
}
/// Deref returns a reference to the key material in any variant.
-impl<'a> Deref for KeyBlob<'a> {
+impl Deref for KeyBlob<'_> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
diff --git a/keystore2/test_utils/attestation/lib.rs b/keystore2/test_utils/attestation/lib.rs
index 8ae4fc0..31d3314 100644
--- a/keystore2/test_utils/attestation/lib.rs
+++ b/keystore2/test_utils/attestation/lib.rs
@@ -63,7 +63,7 @@
pub hw_enforced: AuthorizationList<'a>,
}
-impl<'a> AssociatedOid for AttestationExtension<'a> {
+impl AssociatedOid for AttestationExtension<'_> {
const OID: ObjectIdentifier = ATTESTATION_EXTENSION_OID;
}
@@ -112,7 +112,7 @@
pub version: i64,
}
-impl<'a> DerOrd for PackageInfoRecord<'a> {
+impl DerOrd for PackageInfoRecord<'_> {
fn der_cmp(&self, other: &Self) -> Result<std::cmp::Ordering, der::Error> {
self.package_name.der_cmp(&other.package_name)
}
@@ -139,7 +139,7 @@
pub auths: Cow<'a, [KeyParameter]>,
}
-impl<'a> From<Vec<KeyParameter>> for AuthorizationList<'a> {
+impl From<Vec<KeyParameter>> for AuthorizationList<'_> {
/// Build an `AuthorizationList` using a set of key parameters.
fn from(auths: Vec<KeyParameter>) -> Self {
AuthorizationList { auths: auths.into() }
@@ -149,7 +149,7 @@
impl<'a> Sequence<'a> for AuthorizationList<'a> {}
/// Stub (non-)implementation of DER-encoding, needed to implement [`Sequence`].
-impl<'a> EncodeValue for AuthorizationList<'a> {
+impl EncodeValue for AuthorizationList<'_> {
fn value_len(&self) -> der::Result<Length> {
unimplemented!("Only decoding is implemented");
}
diff --git a/keystore2/tests/keystore2_client_authorizations_tests.rs b/keystore2/tests/keystore2_client_authorizations_tests.rs
index 377dcbe..a546799 100644
--- a/keystore2/tests/keystore2_client_authorizations_tests.rs
+++ b/keystore2/tests/keystore2_client_authorizations_tests.rs
@@ -522,6 +522,10 @@
#[test]
fn keystore2_gen_key_auth_usage_count_limit() {
let sl = SecLevel::tee();
+ if sl.is_keymaster() {
+ // `USAGE_COUNT_LIMIT` is supported from KeyMint1.0
+ return;
+ }
const MAX_USES_COUNT: i32 = 3;
let gen_params = authorizations::AuthSetBuilder::new()
@@ -546,6 +550,10 @@
#[test]
fn keystore2_gen_key_auth_usage_count_limit_one() {
let sl = SecLevel::tee();
+ if sl.is_keymaster() {
+ // `USAGE_COUNT_LIMIT` is supported from KeyMint1.0
+ return;
+ }
const MAX_USES_COUNT: i32 = 1;
let gen_params = authorizations::AuthSetBuilder::new()
@@ -569,6 +577,10 @@
#[test]
fn keystore2_gen_non_attested_key_auth_usage_count_limit() {
let sl = SecLevel::tee();
+ if sl.is_keymaster() {
+ // `USAGE_COUNT_LIMIT` is supported from KeyMint1.0
+ return;
+ }
const MAX_USES_COUNT: i32 = 2;
let gen_params = authorizations::AuthSetBuilder::new()
@@ -631,6 +643,12 @@
#[test]
fn keystore2_gen_key_auth_include_unique_id_success() {
let sl = SecLevel::tee();
+ if sl.is_keymaster() {
+ // b/387208956 - Some older devices with Keymaster implementations fail to generate an
+ // attestation key with `INCLUDE_UNIQUE_ID`, but this was not previously tested. Skip this
+ // test on devices with Keymaster implementation.
+ return;
+ }
let alias_first = "ks_test_auth_tags_test_1";
if let Some(unique_id_first) = gen_key_including_unique_id(&sl, alias_first) {
@@ -1030,6 +1048,11 @@
assert!(result.is_err());
assert_eq!(result.unwrap_err(), Error::Rc(ResponseCode::INVALID_ARGUMENT));
+ if sl.get_keymint_version() < 400 {
+ // Module hash will only be populated in KeyMint if the underlying device is KeyMint V4+.
+ return;
+ }
+
// Generate an attestation.
let alias = "ks_module_info_test";
let params = authorizations::AuthSetBuilder::new()
diff --git a/keystore2/tests/keystore2_client_test_utils.rs b/keystore2/tests/keystore2_client_test_utils.rs
index aa084ca..b9a8243 100644
--- a/keystore2/tests/keystore2_client_test_utils.rs
+++ b/keystore2/tests/keystore2_client_test_utils.rs
@@ -519,9 +519,7 @@
// then returns an empty byte vector.
pub fn get_system_prop(name: &str) -> Vec<u8> {
match rustutils::system_properties::read(name) {
- Ok(Some(value)) => {
- return value.as_bytes().to_vec();
- }
+ Ok(Some(value)) => value.as_bytes().to_vec(),
_ => {
vec![]
}
diff --git a/keystore2/watchdog/Android.bp b/keystore2/watchdog/Android.bp
index 5074388..9a99f10 100644
--- a/keystore2/watchdog/Android.bp
+++ b/keystore2/watchdog/Android.bp
@@ -26,6 +26,7 @@
crate_name: "watchdog_rs",
srcs: ["src/lib.rs"],
rustlibs: [
+ "libchrono",
"liblog_rust",
],
}
diff --git a/keystore2/watchdog/src/lib.rs b/keystore2/watchdog/src/lib.rs
index b4a1e0f..f6a1291 100644
--- a/keystore2/watchdog/src/lib.rs
+++ b/keystore2/watchdog/src/lib.rs
@@ -64,6 +64,18 @@
context: Option<Box<dyn std::fmt::Debug + Send + 'static>>,
}
+impl Record {
+ // Return a string representation of the start time of the record.
+ //
+ // Times are hard. This may not be accurate (e.g. if the system clock has been modified since
+ // the watchdog started), but it's _really_ useful to get a starting wall time for overrunning
+ // watchdogs.
+ fn started_utc(&self) -> String {
+ let started_utc = chrono::Utc::now() - self.started.elapsed();
+ format!("{}", started_utc.format("%m-%d %H:%M:%S%.3f UTC"))
+ }
+}
+
struct WatchdogState {
state: State,
thread: Option<thread::JoinHandle<()>>,
@@ -137,8 +149,13 @@
.filter(|(_, r)| r.deadline.saturating_duration_since(now) == Duration::new(0, 0))
.collect();
- log::warn!("When extracting from a bug report, please include this header");
- log::warn!("and all {} records below.", overdue_records.len());
+ log::warn!(
+ concat!(
+ "When extracting from a bug report, please include this header ",
+ "and all {} records below (to footer)"
+ ),
+ overdue_records.len()
+ );
// Watch points can be nested, i.e., a single thread may have multiple armed
// watch points. And the most recent on each thread (thread recent) is closest to the point
@@ -169,9 +186,10 @@
match &r.context {
Some(ctx) => {
log::warn!(
- "{:?} {} Pending: {:?} Overdue {:?} for {:?}",
+ "{:?} {} Started: {} Pending: {:?} Overdue {:?} for {:?}",
i.tid,
i.id,
+ r.started_utc(),
r.started.elapsed(),
r.deadline.elapsed(),
ctx
@@ -179,9 +197,10 @@
}
None => {
log::warn!(
- "{:?} {} Pending: {:?} Overdue {:?}",
+ "{:?} {} Started: {} Pending: {:?} Overdue {:?}",
i.tid,
i.id,
+ r.started_utc(),
r.started.elapsed(),
r.deadline.elapsed()
);
@@ -200,17 +219,19 @@
if timeout_left == Duration::new(0, 0) {
match &record.context {
Some(ctx) => log::info!(
- "Watchdog complete for: {:?} {} Pending: {:?} Overdue {:?} for {:?}",
+ "Watchdog complete for: {:?} {} Started: {} Pending: {:?} Overdue {:?} for {:?}",
index.tid,
index.id,
+ record.started_utc(),
record.started.elapsed(),
record.deadline.elapsed(),
ctx
),
None => log::info!(
- "Watchdog complete for: {:?} {} Pending: {:?} Overdue {:?}",
+ "Watchdog complete for: {:?} {} Started: {} Pending: {:?} Overdue {:?}",
index.tid,
index.id,
+ record.started_utc(),
record.started.elapsed(),
record.deadline.elapsed()
),