Add support for keystore2 storage metrics
Add pull atoms for getting information about current state of keystore2
storage.
Bug: 172013262
Test: atest system/keystore/keystore2
Test: statsd_testdrive 10103
Change-Id: I0ee115d9bc65d17e6533c4520a1b65067cd2260c
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index da482f1..f645069 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -72,6 +72,9 @@
use android_security_remoteprovisioning::aidl::android::security::remoteprovisioning::{
AttestationPoolStatus::AttestationPoolStatus,
};
+use statslog_rust::keystore2_storage_stats::{
+ Keystore2StorageStats, StorageType as StatsdStorageType,
+};
use keystore2_crypto::ZVec;
use lazy_static::lazy_static;
@@ -826,6 +829,9 @@
const UNASSIGNED_KEY_ID: i64 = -1i64;
const PERBOOT_DB_FILE_NAME: &'static str = &"file:perboot.sqlite?mode=memory&cache=shared";
+ /// Name of the file that holds the cross-boot persistent database.
+ pub const PERSISTENT_DB_FILENAME: &'static str = &"persistent.sqlite";
+
/// This creates a PerBootDbKeepAlive object to keep the per boot database alive.
pub fn keep_perboot_db_alive() -> Result<PerBootDbKeepAlive> {
let conn = Connection::open_in_memory()
@@ -844,7 +850,7 @@
pub fn new(db_root: &Path, gc: Option<Gc>) -> Result<Self> {
// Build the path to the sqlite file.
let mut persistent_path = db_root.to_path_buf();
- persistent_path.push("persistent.sqlite");
+ persistent_path.push(Self::PERSISTENT_DB_FILENAME);
// Now convert them to strings prefixed with "file:"
let mut persistent_path_str = "file:".to_owned();
@@ -1039,6 +1045,100 @@
Ok(conn)
}
+ fn do_table_size_query(
+ &mut self,
+ storage_type: StatsdStorageType,
+ query: &str,
+ params: &[&str],
+ ) -> Result<Keystore2StorageStats> {
+ let (total, unused) = self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ tx.query_row(query, params, |row| Ok((row.get(0)?, row.get(1)?)))
+ .with_context(|| {
+ format!("get_storage_stat: Error size of storage type {}", storage_type as i32)
+ })
+ .no_gc()
+ })?;
+ Ok(Keystore2StorageStats { storage_type, size: total, unused_size: unused })
+ }
+
+ fn get_total_size(&mut self) -> Result<Keystore2StorageStats> {
+ self.do_table_size_query(
+ StatsdStorageType::Database,
+ "SELECT page_count * page_size, freelist_count * page_size
+ FROM pragma_page_count('persistent'),
+ pragma_page_size('persistent'),
+ persistent.pragma_freelist_count();",
+ &[],
+ )
+ }
+
+ fn get_table_size(
+ &mut self,
+ storage_type: StatsdStorageType,
+ schema: &str,
+ table: &str,
+ ) -> Result<Keystore2StorageStats> {
+ self.do_table_size_query(
+ storage_type,
+ "SELECT pgsize,unused FROM dbstat(?1)
+ WHERE name=?2 AND aggregate=TRUE;",
+ &[schema, table],
+ )
+ }
+
+ /// Fetches a storage statisitics atom for a given storage type. For storage
+ /// types that map to a table, information about the table's storage is
+ /// returned. Requests for storage types that are not DB tables return None.
+ pub fn get_storage_stat(
+ &mut self,
+ storage_type: StatsdStorageType,
+ ) -> Result<Keystore2StorageStats> {
+ match storage_type {
+ StatsdStorageType::Database => self.get_total_size(),
+ StatsdStorageType::KeyEntry => {
+ self.get_table_size(storage_type, "persistent", "keyentry")
+ }
+ StatsdStorageType::KeyEntryIdIndex => {
+ self.get_table_size(storage_type, "persistent", "keyentry_id_index")
+ }
+ StatsdStorageType::KeyEntryDomainNamespaceIndex => {
+ self.get_table_size(storage_type, "persistent", "keyentry_domain_namespace_index")
+ }
+ StatsdStorageType::BlobEntry => {
+ self.get_table_size(storage_type, "persistent", "blobentry")
+ }
+ StatsdStorageType::BlobEntryKeyEntryIdIndex => {
+ self.get_table_size(storage_type, "persistent", "blobentry_keyentryid_index")
+ }
+ StatsdStorageType::KeyParameter => {
+ self.get_table_size(storage_type, "persistent", "keyparameter")
+ }
+ StatsdStorageType::KeyParameterKeyEntryIdIndex => {
+ self.get_table_size(storage_type, "persistent", "keyparameter_keyentryid_index")
+ }
+ StatsdStorageType::KeyMetadata => {
+ self.get_table_size(storage_type, "persistent", "keymetadata")
+ }
+ StatsdStorageType::KeyMetadataKeyEntryIdIndex => {
+ self.get_table_size(storage_type, "persistent", "keymetadata_keyentryid_index")
+ }
+ StatsdStorageType::Grant => self.get_table_size(storage_type, "persistent", "grant"),
+ StatsdStorageType::AuthToken => {
+ self.get_table_size(storage_type, "perboot", "authtoken")
+ }
+ StatsdStorageType::BlobMetadata => {
+ self.get_table_size(storage_type, "persistent", "blobmetadata")
+ }
+ StatsdStorageType::BlobMetadataBlobEntryIdIndex => {
+ self.get_table_size(storage_type, "persistent", "blobmetadata_blobentryid_index")
+ }
+ _ => Err(anyhow::Error::msg(format!(
+ "Unsupported storage type: {}",
+ storage_type as i32
+ ))),
+ }
+ }
+
/// This function is intended to be used by the garbage collector.
/// It deletes the blob given by `blob_id_to_delete`. It then tries to find a superseded
/// key blob that might need special handling by the garbage collector.
@@ -3006,6 +3106,8 @@
use rusqlite::NO_PARAMS;
use rusqlite::{Error, TransactionBehavior};
use std::cell::RefCell;
+ use std::collections::BTreeMap;
+ use std::fmt::Write;
use std::sync::atomic::{AtomicU8, Ordering};
use std::sync::Arc;
use std::thread;
@@ -4931,4 +5033,192 @@
assert_eq!(secret_bytes, &*decrypted_secret_bytes);
Ok(())
}
+
+ fn get_valid_statsd_storage_types() -> Vec<StatsdStorageType> {
+ vec![
+ StatsdStorageType::KeyEntry,
+ StatsdStorageType::KeyEntryIdIndex,
+ StatsdStorageType::KeyEntryDomainNamespaceIndex,
+ StatsdStorageType::BlobEntry,
+ StatsdStorageType::BlobEntryKeyEntryIdIndex,
+ StatsdStorageType::KeyParameter,
+ StatsdStorageType::KeyParameterKeyEntryIdIndex,
+ StatsdStorageType::KeyMetadata,
+ StatsdStorageType::KeyMetadataKeyEntryIdIndex,
+ StatsdStorageType::Grant,
+ StatsdStorageType::AuthToken,
+ StatsdStorageType::BlobMetadata,
+ StatsdStorageType::BlobMetadataBlobEntryIdIndex,
+ ]
+ }
+
+ /// Perform a simple check to ensure that we can query all the storage types
+ /// that are supported by the DB. Check for reasonable values.
+ #[test]
+ fn test_query_all_valid_table_sizes() -> Result<()> {
+ const PAGE_SIZE: i64 = 4096;
+
+ let mut db = new_test_db()?;
+
+ for t in get_valid_statsd_storage_types() {
+ let stat = db.get_storage_stat(t)?;
+ assert!(stat.size >= PAGE_SIZE);
+ assert!(stat.size >= stat.unused_size);
+ }
+
+ Ok(())
+ }
+
+ fn get_storage_stats_map(db: &mut KeystoreDB) -> BTreeMap<i32, Keystore2StorageStats> {
+ get_valid_statsd_storage_types()
+ .into_iter()
+ .map(|t| (t as i32, db.get_storage_stat(t).unwrap()))
+ .collect()
+ }
+
+ fn assert_storage_increased(
+ db: &mut KeystoreDB,
+ increased_storage_types: Vec<StatsdStorageType>,
+ baseline: &mut BTreeMap<i32, Keystore2StorageStats>,
+ ) {
+ for storage in increased_storage_types {
+ // Verify the expected storage increased.
+ let new = db.get_storage_stat(storage).unwrap();
+ let storage = storage as i32;
+ let old = &baseline[&storage];
+ assert!(new.size >= old.size, "{}: {} >= {}", storage, new.size, old.size);
+ assert!(
+ new.unused_size <= old.unused_size,
+ "{}: {} <= {}",
+ storage,
+ new.unused_size,
+ old.unused_size
+ );
+
+ // Update the baseline with the new value so that it succeeds in the
+ // later comparison.
+ baseline.insert(storage, new);
+ }
+
+ // Get an updated map of the storage and verify there were no unexpected changes.
+ let updated_stats = get_storage_stats_map(db);
+ assert_eq!(updated_stats.len(), baseline.len());
+
+ for &k in baseline.keys() {
+ let stringify = |map: &BTreeMap<i32, Keystore2StorageStats>| -> String {
+ let mut s = String::new();
+ for &k in map.keys() {
+ writeln!(&mut s, " {}: {}, {}", &k, map[&k].size, map[&k].unused_size)
+ .expect("string concat failed");
+ }
+ s
+ };
+
+ assert!(
+ updated_stats[&k].size == baseline[&k].size
+ && updated_stats[&k].unused_size == baseline[&k].unused_size,
+ "updated_stats:\n{}\nbaseline:\n{}",
+ stringify(&updated_stats),
+ stringify(&baseline)
+ );
+ }
+ }
+
+ #[test]
+ fn test_verify_key_table_size_reporting() -> Result<()> {
+ let mut db = new_test_db()?;
+ let mut working_stats = get_storage_stats_map(&mut db);
+
+ let key_id = db.create_key_entry(&Domain::APP, &42, &KEYSTORE_UUID)?;
+ assert_storage_increased(
+ &mut db,
+ vec![
+ StatsdStorageType::KeyEntry,
+ StatsdStorageType::KeyEntryIdIndex,
+ StatsdStorageType::KeyEntryDomainNamespaceIndex,
+ ],
+ &mut working_stats,
+ );
+
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
+ db.set_blob(&key_id, SubComponentType::KEY_BLOB, Some(TEST_KEY_BLOB), None)?;
+ assert_storage_increased(
+ &mut db,
+ vec![
+ StatsdStorageType::BlobEntry,
+ StatsdStorageType::BlobEntryKeyEntryIdIndex,
+ StatsdStorageType::BlobMetadata,
+ StatsdStorageType::BlobMetadataBlobEntryIdIndex,
+ ],
+ &mut working_stats,
+ );
+
+ let params = make_test_params(None);
+ db.insert_keyparameter(&key_id, ¶ms)?;
+ assert_storage_increased(
+ &mut db,
+ vec![StatsdStorageType::KeyParameter, StatsdStorageType::KeyParameterKeyEntryIdIndex],
+ &mut working_stats,
+ );
+
+ let mut metadata = KeyMetaData::new();
+ metadata.add(KeyMetaEntry::CreationDate(DateTime::from_millis_epoch(123456789)));
+ db.insert_key_metadata(&key_id, &metadata)?;
+ assert_storage_increased(
+ &mut db,
+ vec![StatsdStorageType::KeyMetadata, StatsdStorageType::KeyMetadataKeyEntryIdIndex],
+ &mut working_stats,
+ );
+
+ let mut sum = 0;
+ for stat in working_stats.values() {
+ sum += stat.size;
+ }
+ let total = db.get_storage_stat(StatsdStorageType::Database)?.size;
+ assert!(sum <= total, "Expected sum <= total. sum: {}, total: {}", sum, total);
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_verify_auth_table_size_reporting() -> Result<()> {
+ let mut db = new_test_db()?;
+ let mut working_stats = get_storage_stats_map(&mut db);
+ db.insert_auth_token(&HardwareAuthToken {
+ challenge: 123,
+ userId: 456,
+ authenticatorId: 789,
+ authenticatorType: kmhw_authenticator_type::ANY,
+ timestamp: Timestamp { milliSeconds: 10 },
+ mac: b"mac".to_vec(),
+ })?;
+ assert_storage_increased(&mut db, vec![StatsdStorageType::AuthToken], &mut working_stats);
+ Ok(())
+ }
+
+ #[test]
+ fn test_verify_grant_table_size_reporting() -> Result<()> {
+ const OWNER: i64 = 1;
+ let mut db = new_test_db()?;
+ make_test_key_entry(&mut db, Domain::APP, OWNER, TEST_ALIAS, None)?;
+
+ let mut working_stats = get_storage_stats_map(&mut db);
+ db.grant(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: 0,
+ alias: Some(TEST_ALIAS.to_string()),
+ blob: None,
+ },
+ OWNER as u32,
+ 123,
+ key_perm_set![KeyPerm::use_()],
+ |_, _| Ok(()),
+ )?;
+
+ assert_storage_increased(&mut db, vec![StatsdStorageType::Grant], &mut working_stats);
+
+ Ok(())
+ }
}
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index df7ba26..fd8a492 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -17,6 +17,7 @@
use keystore2::entropy;
use keystore2::globals::ENFORCEMENTS;
use keystore2::maintenance::Maintenance;
+use keystore2::metrics;
use keystore2::remote_provisioning::RemoteProvisioningService;
use keystore2::service::KeystoreService;
use keystore2::{apc::ApcManager, shared_secret_negotiation};
@@ -142,6 +143,13 @@
},
);
+ std::thread::spawn(|| {
+ match metrics::register_pull_metrics_callbacks() {
+ Err(e) => error!("register_pull_metrics_callbacks failed: {:?}.", e),
+ _ => info!("Pull metrics callbacks successfully registered."),
+ };
+ });
+
info!("Successfully registered Keystore 2.0 service.");
info!("Joining thread pool now.");
diff --git a/keystore2/src/metrics.rs b/keystore2/src/metrics.rs
index c5dd582..71c2f3f 100644
--- a/keystore2/src/metrics.rs
+++ b/keystore2/src/metrics.rs
@@ -14,6 +14,7 @@
//! This module provides convenience functions for keystore2 logging.
use crate::error::get_error_code;
+use crate::globals::DB;
use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
use crate::operation::Outcome;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
@@ -22,15 +23,22 @@
KeyParameter::KeyParameter, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
SecurityLevel::SecurityLevel,
};
-use statslog_rust::keystore2_key_creation_event_reported::{
- Algorithm as StatsdAlgorithm, EcCurve as StatsdEcCurve, KeyOrigin as StatsdKeyOrigin,
- Keystore2KeyCreationEventReported, SecurityLevel as StatsdKeyCreationSecurityLevel,
- UserAuthType as StatsdUserAuthType,
+use anyhow::Result;
+use keystore2_system_property::PropertyWatcher;
+use statslog_rust::{
+ keystore2_key_creation_event_reported::{
+ Algorithm as StatsdAlgorithm, EcCurve as StatsdEcCurve, KeyOrigin as StatsdKeyOrigin,
+ Keystore2KeyCreationEventReported, SecurityLevel as StatsdKeyCreationSecurityLevel,
+ UserAuthType as StatsdUserAuthType,
+ },
+ keystore2_key_operation_event_reported::{
+ Keystore2KeyOperationEventReported, Outcome as StatsdOutcome, Purpose as StatsdKeyPurpose,
+ SecurityLevel as StatsdKeyOperationSecurityLevel,
+ },
+ keystore2_storage_stats::StorageType as StatsdStorageType,
};
-use statslog_rust::keystore2_key_operation_event_reported::{
- Keystore2KeyOperationEventReported, Outcome as StatsdOutcome, Purpose as StatsdKeyPurpose,
- SecurityLevel as StatsdKeyOperationSecurityLevel,
-};
+use statslog_rust_header::Atoms;
+use statspull_rust::{set_pull_atom_callback, StatsPullResult};
fn create_default_key_creation_atom() -> Keystore2KeyCreationEventReported {
// If a value is not present, fields represented by bitmaps and i32 fields
@@ -75,7 +83,7 @@
pub fn log_key_creation_event_stats<U>(
sec_level: SecurityLevel,
key_params: &[KeyParameter],
- result: &anyhow::Result<U>,
+ result: &Result<U>,
) {
let key_creation_event_stats =
construct_key_creation_event_stats(sec_level, key_params, result);
@@ -119,7 +127,7 @@
fn construct_key_creation_event_stats<U>(
sec_level: SecurityLevel,
key_params: &[KeyParameter],
- result: &anyhow::Result<U>,
+ result: &Result<U>,
) -> Keystore2KeyCreationEventReported {
let mut key_creation_event_atom = create_default_key_creation_atom();
@@ -375,6 +383,55 @@
}
bitmap
}
+
+/// Registers pull metrics callbacks
+pub fn register_pull_metrics_callbacks() -> Result<()> {
+ // Before registering the callbacks with statsd, we have to wait for the system to finish
+ // booting up. This avoids possible races that may occur at startup. For example, statsd
+ // depends on a companion service, and if registration happens too soon it will fail since
+ // the companion service isn't up yet.
+ let mut watcher = PropertyWatcher::new("sys.boot_completed")?;
+ loop {
+ watcher.wait()?;
+ let value = watcher.read(|_name, value| Ok(value.trim().to_string()));
+ if value? == "1" {
+ set_pull_atom_callback(Atoms::Keystore2StorageStats, None, pull_metrics_callback);
+ break;
+ }
+ }
+ Ok(())
+}
+
+fn pull_metrics_callback() -> StatsPullResult {
+ let mut result = StatsPullResult::new();
+ let mut append = |stat| {
+ match stat {
+ Ok(s) => result.push(Box::new(s)),
+ Err(error) => {
+ log::error!("pull_metrics_callback: Error getting storage stat: {}", error)
+ }
+ };
+ };
+ DB.with(|db| {
+ let mut db = db.borrow_mut();
+ append(db.get_storage_stat(StatsdStorageType::Database));
+ append(db.get_storage_stat(StatsdStorageType::KeyEntry));
+ append(db.get_storage_stat(StatsdStorageType::KeyEntryIdIndex));
+ append(db.get_storage_stat(StatsdStorageType::KeyEntryDomainNamespaceIndex));
+ append(db.get_storage_stat(StatsdStorageType::BlobEntry));
+ append(db.get_storage_stat(StatsdStorageType::BlobEntryKeyEntryIdIndex));
+ append(db.get_storage_stat(StatsdStorageType::KeyParameter));
+ append(db.get_storage_stat(StatsdStorageType::KeyParameterKeyEntryIdIndex));
+ append(db.get_storage_stat(StatsdStorageType::KeyMetadata));
+ append(db.get_storage_stat(StatsdStorageType::KeyMetadataKeyEntryIdIndex));
+ append(db.get_storage_stat(StatsdStorageType::Grant));
+ append(db.get_storage_stat(StatsdStorageType::AuthToken));
+ append(db.get_storage_stat(StatsdStorageType::BlobMetadata));
+ append(db.get_storage_stat(StatsdStorageType::BlobMetadataBlobEntryIdIndex));
+ });
+ result
+}
+
/// Enum defining the bit position for each padding mode. Since padding mode can be repeatable, it
/// is represented using a bitmap.
#[allow(non_camel_case_types)]