Merge changes I5b5018d4,I688fff83,Ib99d689d into sc-dev
* changes:
On-device signing: Remove Keymaster implementation.
On-device signing: verify the public key.
On-device signing: Switch to using a TEE-backed keystore key.
diff --git a/keystore/keystore_cli_v2.cpp b/keystore/keystore_cli_v2.cpp
index 6e45ee2..43f72a9 100644
--- a/keystore/keystore_cli_v2.cpp
+++ b/keystore/keystore_cli_v2.cpp
@@ -56,7 +56,7 @@
keymint::AuthorizationSet parameters;
};
-constexpr const char keystore2_service_name[] = "android.system.keystore2";
+constexpr const char keystore2_service_name[] = "android.system.keystore2.IKeystoreService/default";
int unwrapError(const ndk::ScopedAStatus& status) {
if (status.isOk()) return 0;
@@ -769,7 +769,7 @@
sec_level->generateKey(keyDescriptor(name), {} /* attestationKey */, params.vector_data(),
0 /* flags */, {} /* entropy */, &keyMetadata);
- if (rc.isOk()) {
+ if (!rc.isOk()) {
std::cerr << "GenerateKey failed: " << rc.getDescription() << std::endl;
return unwrapError(rc);
}
diff --git a/keystore/tests/Android.bp b/keystore/tests/Android.bp
index 249cb77..39601eb 100644
--- a/keystore/tests/Android.bp
+++ b/keystore/tests/Android.bp
@@ -62,9 +62,9 @@
"libgtest_main",
"libutils",
"liblog",
+ "android.security.apc-ndk_platform",
],
shared_libs: [
- "android.security.apc-ndk_platform",
"libbinder_ndk",
],
sanitize: {
diff --git a/keystore2/Android.bp b/keystore2/Android.bp
index 0ba49ed..32493c0 100644
--- a/keystore2/Android.bp
+++ b/keystore2/Android.bp
@@ -145,4 +145,6 @@
],
vintf_fragments: ["android.system.keystore2-service.xml"],
+
+ required: ["keystore_cli_v2"],
}
diff --git a/keystore2/aidl/Android.bp b/keystore2/aidl/Android.bp
index 06fdb48..1bfc98a 100644
--- a/keystore2/aidl/Android.bp
+++ b/keystore2/aidl/Android.bp
@@ -24,7 +24,7 @@
aidl_interface {
name: "android.security.attestationmanager",
srcs: [ "android/security/attestationmanager/*.aidl", ],
- imports: [ "android.hardware.security.keymint" ],
+ imports: [ "android.hardware.security.keymint-V1" ],
unstable: true,
backend: {
java: {
@@ -45,8 +45,8 @@
name: "android.security.authorization",
srcs: [ "android/security/authorization/*.aidl" ],
imports: [
- "android.hardware.security.keymint",
- "android.hardware.security.secureclock",
+ "android.hardware.security.keymint-V1",
+ "android.hardware.security.secureclock-V1",
],
unstable: true,
backend: {
@@ -86,9 +86,9 @@
name: "android.security.compat",
srcs: [ "android/security/compat/*.aidl" ],
imports: [
- "android.hardware.security.keymint",
- "android.hardware.security.secureclock",
- "android.hardware.security.sharedsecret",
+ "android.hardware.security.keymint-V1",
+ "android.hardware.security.secureclock-V1",
+ "android.hardware.security.sharedsecret-V1",
],
unstable: true,
backend: {
@@ -110,7 +110,7 @@
name: "android.security.remoteprovisioning",
srcs: [ "android/security/remoteprovisioning/*.aidl" ],
imports: [
- "android.hardware.security.keymint",
+ "android.hardware.security.keymint-V1",
],
unstable: true,
backend: {
@@ -132,7 +132,7 @@
name: "android.security.maintenance",
srcs: [ "android/security/maintenance/*.aidl" ],
imports: [
- "android.system.keystore2",
+ "android.system.keystore2-V1",
],
unstable: true,
backend: {
diff --git a/keystore2/selinux/Android.bp b/keystore2/selinux/Android.bp
index 18063d3..748e406 100644
--- a/keystore2/selinux/Android.bp
+++ b/keystore2/selinux/Android.bp
@@ -34,6 +34,7 @@
rustlibs: [
"libanyhow",
+ "liblazy_static",
"liblog_rust",
"libselinux_bindgen",
"libthiserror",
@@ -56,6 +57,7 @@
rustlibs: [
"libandroid_logger",
"libanyhow",
+ "liblazy_static",
"liblog_rust",
"libselinux_bindgen",
"libthiserror",
diff --git a/keystore2/selinux/src/lib.rs b/keystore2/selinux/src/lib.rs
index cc707e7..5197cf6 100644
--- a/keystore2/selinux/src/lib.rs
+++ b/keystore2/selinux/src/lib.rs
@@ -20,6 +20,13 @@
//! * selabel_lookup for the keystore2_key backend.
//! And it provides an owning wrapper around context strings `Context`.
+use anyhow::Context as AnyhowContext;
+use anyhow::{anyhow, Result};
+use lazy_static::lazy_static;
+pub use selinux::pid_t;
+use selinux::SELABEL_CTX_ANDROID_KEYSTORE2_KEY;
+use selinux::SELINUX_CB_LOG;
+use selinux_bindgen as selinux;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
@@ -29,18 +36,18 @@
use std::ptr;
use std::sync;
-use selinux_bindgen as selinux;
-
-use anyhow::Context as AnyhowContext;
-use anyhow::{anyhow, Result};
-
-use selinux::SELABEL_CTX_ANDROID_KEYSTORE2_KEY;
-use selinux::SELINUX_CB_LOG;
-
-pub use selinux::pid_t;
-
static SELINUX_LOG_INIT: sync::Once = sync::Once::new();
+lazy_static! {
+ /// `selinux_check_access` is only thread safe if avc_init was called with lock callbacks.
+ /// However, avc_init is deprecated and not exported by androids version of libselinux.
+ /// `selinux_set_callbacks` does not allow setting lock callbacks. So the only option
+ /// that remains right now is to put a big lock around calls into libselinux.
+ /// TODO b/188079221 It should suffice to protect `selinux_check_access` but until we are
+ /// certain of that, we leave the extra locks in place
+ static ref LIB_SELINUX_LOCK: sync::Mutex<()> = Default::default();
+}
+
fn redirect_selinux_logs_to_logcat() {
// `selinux_set_callback` assigns the static lifetime function pointer
// `selinux_log_callback` to a static lifetime variable.
@@ -164,6 +171,8 @@
/// `selinux_android_keystore2_key_context_handle`.
pub fn new() -> Result<Self> {
init_logger_once();
+ let _lock = LIB_SELINUX_LOCK.lock().unwrap();
+
let handle = unsafe { selinux::selinux_android_keystore2_key_context_handle() };
if handle.is_null() {
return Err(anyhow!(Error::sys("Failed to open KeystoreKeyBackend")));
@@ -192,6 +201,8 @@
match unsafe {
// No need to initialize the logger here because it cannot run unless
// KeystoreKeyBackend::new has run.
+ let _lock = LIB_SELINUX_LOCK.lock().unwrap();
+
selinux::selabel_lookup(self.handle, &mut con, c_key.as_ptr(), Self::BACKEND_TYPE)
} {
0 => {
@@ -219,6 +230,8 @@
/// * Err(io::Error::last_os_error()) if getcon failed.
pub fn getcon() -> Result<Context> {
init_logger_once();
+ let _lock = LIB_SELINUX_LOCK.lock().unwrap();
+
let mut con: *mut c_char = ptr::null_mut();
match unsafe { selinux::getcon(&mut con) } {
0 => {
@@ -241,6 +254,8 @@
/// * Err(io::Error::last_os_error()) if getpidcon failed.
pub fn getpidcon(pid: selinux::pid_t) -> Result<Context> {
init_logger_once();
+ let _lock = LIB_SELINUX_LOCK.lock().unwrap();
+
let mut con: *mut c_char = ptr::null_mut();
match unsafe { selinux::getpidcon(pid, &mut con) } {
0 => {
@@ -267,6 +282,7 @@
/// the access check.
pub fn check_access(source: &CStr, target: &CStr, tclass: &str, perm: &str) -> Result<()> {
init_logger_once();
+
let c_tclass = CString::new(tclass).with_context(|| {
format!("check_access: Failed to convert tclass \"{}\" to CString.", tclass)
})?;
@@ -275,6 +291,8 @@
})?;
match unsafe {
+ let _lock = LIB_SELINUX_LOCK.lock().unwrap();
+
selinux::selinux_check_access(
source.as_ptr(),
target.as_ptr(),
diff --git a/keystore2/src/async_task.rs b/keystore2/src/async_task.rs
index 4d0034a..e130024 100644
--- a/keystore2/src/async_task.rs
+++ b/keystore2/src/async_task.rs
@@ -170,6 +170,7 @@
{
let (ref condvar, ref state) = *self.state;
let mut state = state.lock().unwrap();
+
if hi_prio {
state.hi_prio_req.push_back(Box::new(f));
} else {
diff --git a/keystore2/src/audit_log.rs b/keystore2/src/audit_log.rs
index 30fc155..3d7d26e 100644
--- a/keystore2/src/audit_log.rs
+++ b/keystore2/src/audit_log.rs
@@ -25,14 +25,15 @@
const TAG_KEY_GENERATED: u32 = 210024;
const TAG_KEY_IMPORTED: u32 = 210025;
const TAG_KEY_DESTROYED: u32 = 210026;
+const TAG_KEY_INTEGRITY_VIOLATION: u32 = 210032;
-const NAMESPACE_MASK: i64 = 0x80000000;
+const FLAG_NAMESPACE: i64 = 0x80000000;
-/// For app domain returns calling app uid, for SELinux domain returns masked namespace.
-fn key_owner(key: &KeyDescriptor, calling_app: uid_t) -> i32 {
- match key.domain {
- Domain::APP => calling_app as i32,
- Domain::SELINUX => (key.nspace | NAMESPACE_MASK) as i32,
+/// Encode key owner as either uid or namespace with a flag.
+fn key_owner(domain: Domain, nspace: i64, uid: i32) -> i32 {
+ match domain {
+ Domain::APP => uid,
+ Domain::SELINUX => (nspace | FLAG_NAMESPACE) as i32,
_ => {
log::info!("Not logging audit event for key with unexpected domain");
0
@@ -55,12 +56,29 @@
log_key_event(TAG_KEY_DESTROYED, key, calling_app, success);
}
+/// Logs key integrity violation to NIAP audit log.
+pub fn log_key_integrity_violation(key: &KeyDescriptor) {
+ with_log_context(TAG_KEY_INTEGRITY_VIOLATION, |ctx| {
+ let owner = key_owner(key.domain, key.nspace, key.nspace as i32);
+ ctx.append_str(key.alias.as_ref().map_or("none", String::as_str)).append_i32(owner)
+ })
+}
+
fn log_key_event(tag: u32, key: &KeyDescriptor, calling_app: uid_t, success: bool) {
- if let Some(ctx) = LogContext::new(LogIdSecurity, tag) {
- let event = ctx
- .append_i32(if success { 1 } else { 0 })
+ with_log_context(tag, |ctx| {
+ let owner = key_owner(key.domain, key.nspace, calling_app as i32);
+ ctx.append_i32(if success { 1 } else { 0 })
.append_str(key.alias.as_ref().map_or("none", String::as_str))
- .append_i32(key_owner(key, calling_app));
+ .append_i32(owner)
+ })
+}
+
+fn with_log_context<F>(tag: u32, f: F)
+where
+ F: Fn(LogContext) -> LogContext,
+{
+ if let Some(ctx) = LogContext::new(LogIdSecurity, tag) {
+ let event = f(ctx);
LOGS_HANDLER.queue_lo(move |_| {
event.write();
});
diff --git a/keystore2/src/authorization.rs b/keystore2/src/authorization.rs
index d07dab5..777089f 100644
--- a/keystore2/src/authorization.rs
+++ b/keystore2/src/authorization.rs
@@ -121,7 +121,7 @@
// Check keystore permission.
check_keystore_permission(KeystorePerm::add_auth()).context("In add_auth_token.")?;
- ENFORCEMENTS.add_auth_token(auth_token.clone())?;
+ ENFORCEMENTS.add_auth_token(auth_token.clone());
Ok(())
}
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index e1185f3..2930162 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -41,10 +41,12 @@
//! from the database module these functions take permission check
//! callbacks.
+mod perboot;
+
use crate::impl_metadata; // This is in db_utils.rs
use crate::key_parameter::{KeyParameter, Tag};
use crate::permission::KeyPermSet;
-use crate::utils::{get_current_time_in_seconds, watchdog as wd, AID_USER_OFFSET};
+use crate::utils::{get_current_time_in_milliseconds, watchdog as wd, AID_USER_OFFSET};
use crate::{
db_utils::{self, SqlField},
gc::Gc,
@@ -61,9 +63,6 @@
HardwareAuthToken::HardwareAuthToken,
HardwareAuthenticatorType::HardwareAuthenticatorType, SecurityLevel::SecurityLevel,
};
-use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
- Timestamp::Timestamp,
-};
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, KeyDescriptor::KeyDescriptor,
};
@@ -734,32 +733,28 @@
pub struct KeystoreDB {
conn: Connection,
gc: Option<Arc<Gc>>,
+ perboot: Arc<perboot::PerbootDB>,
}
/// Database representation of the monotonic time retrieved from the system call clock_gettime with
-/// CLOCK_MONOTONIC_RAW. Stores monotonic time as i64 in seconds.
+/// CLOCK_MONOTONIC_RAW. Stores monotonic time as i64 in milliseconds.
#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Ord, PartialOrd)]
pub struct MonotonicRawTime(i64);
impl MonotonicRawTime {
/// Constructs a new MonotonicRawTime
pub fn now() -> Self {
- Self(get_current_time_in_seconds())
+ Self(get_current_time_in_milliseconds())
}
- /// Constructs a new MonotonicRawTime from a given number of seconds.
- pub fn from_secs(val: i64) -> Self {
- Self(val)
+ /// Returns the value of MonotonicRawTime in milliseconds as i64
+ pub fn milliseconds(&self) -> i64 {
+ self.0
}
/// Returns the integer value of MonotonicRawTime as i64
pub fn seconds(&self) -> i64 {
- self.0
- }
-
- /// Returns the value of MonotonicRawTime in milli seconds as i64
- pub fn milli_seconds(&self) -> i64 {
- self.0 * 1000
+ self.0 / 1000
}
/// Like i64::checked_sub.
@@ -782,8 +777,10 @@
/// This struct encapsulates the information to be stored in the database about the auth tokens
/// received by keystore.
+#[derive(Clone)]
pub struct AuthTokenEntry {
auth_token: HardwareAuthToken,
+ // Time received in milliseconds
time_received: MonotonicRawTime,
}
@@ -828,19 +825,22 @@
impl KeystoreDB {
const UNASSIGNED_KEY_ID: i64 = -1i64;
- const PERBOOT_DB_FILE_NAME: &'static str = &"file:perboot.sqlite?mode=memory&cache=shared";
/// Name of the file that holds the cross-boot persistent database.
pub const PERSISTENT_DB_FILENAME: &'static str = &"persistent.sqlite";
- /// This creates a PerBootDbKeepAlive object to keep the per boot database alive.
- pub fn keep_perboot_db_alive() -> Result<PerBootDbKeepAlive> {
- let conn = Connection::open_in_memory()
- .context("In keep_perboot_db_alive: Failed to initialize SQLite connection.")?;
-
- conn.execute("ATTACH DATABASE ? as perboot;", params![Self::PERBOOT_DB_FILE_NAME])
- .context("In keep_perboot_db_alive: Failed to attach database perboot.")?;
- Ok(PerBootDbKeepAlive(conn))
+ /// Set write-ahead logging mode on the persistent database found in `db_root`.
+ pub fn set_wal_mode(db_root: &Path) -> Result<()> {
+ let path = Self::make_persistent_path(&db_root)?;
+ let conn =
+ Connection::open(path).context("In KeystoreDB::set_wal_mode: Failed to open DB")?;
+ let mode: String = conn
+ .pragma_update_and_check(None, "journal_mode", &"WAL", |row| row.get(0))
+ .context("In KeystoreDB::set_wal_mode: Failed to set journal_mode")?;
+ match mode.as_str() {
+ "wal" => Ok(()),
+ _ => Err(anyhow!("Unable to set WAL mode, db is still in {} mode.", mode)),
+ }
}
/// This will create a new database connection connecting the two
@@ -851,20 +851,10 @@
pub fn new(db_root: &Path, gc: Option<Arc<Gc>>) -> Result<Self> {
let _wp = wd::watch_millis("KeystoreDB::new", 500);
- // Build the path to the sqlite file.
- let mut persistent_path = db_root.to_path_buf();
- persistent_path.push(Self::PERSISTENT_DB_FILENAME);
+ let persistent_path = Self::make_persistent_path(&db_root)?;
+ let conn = Self::make_connection(&persistent_path)?;
- // Now convert them to strings prefixed with "file:"
- let mut persistent_path_str = "file:".to_owned();
- persistent_path_str.push_str(&persistent_path.to_string_lossy());
-
- let conn = Self::make_connection(&persistent_path_str, &Self::PERBOOT_DB_FILE_NAME)?;
-
- // On busy fail Immediately. It is unlikely to succeed given a bug in sqlite.
- conn.busy_handler(None).context("In KeystoreDB::new: Failed to set busy handler.")?;
-
- let mut db = Self { conn, gc };
+ let mut db = Self { conn, gc, perboot: perboot::PERBOOT_DB.clone() };
db.with_transaction(TransactionBehavior::Immediate, |tx| {
Self::init_tables(tx).context("Trying to initialize tables.").no_gc()
})?;
@@ -978,41 +968,22 @@
)
.context("Failed to initialize \"grant\" table.")?;
- //TODO: only drop the following two perboot tables if this is the first start up
- //during the boot (b/175716626).
- // tx.execute("DROP TABLE IF EXISTS perboot.authtoken;", NO_PARAMS)
- // .context("Failed to drop perboot.authtoken table")?;
- tx.execute(
- "CREATE TABLE IF NOT EXISTS perboot.authtoken (
- id INTEGER PRIMARY KEY,
- challenge INTEGER,
- user_id INTEGER,
- auth_id INTEGER,
- authenticator_type INTEGER,
- timestamp INTEGER,
- mac BLOB,
- time_received INTEGER,
- UNIQUE(user_id, auth_id, authenticator_type));",
- NO_PARAMS,
- )
- .context("Failed to initialize \"authtoken\" table.")?;
-
- // tx.execute("DROP TABLE IF EXISTS perboot.metadata;", NO_PARAMS)
- // .context("Failed to drop perboot.metadata table")?;
- // metadata table stores certain miscellaneous information required for keystore functioning
- // during a boot cycle, as key-value pairs.
- tx.execute(
- "CREATE TABLE IF NOT EXISTS perboot.metadata (
- key TEXT,
- value BLOB,
- UNIQUE(key));",
- NO_PARAMS,
- )
- .context("Failed to initialize \"metadata\" table.")?;
Ok(())
}
- fn make_connection(persistent_file: &str, perboot_file: &str) -> Result<Connection> {
+ fn make_persistent_path(db_root: &Path) -> Result<String> {
+ // Build the path to the sqlite file.
+ let mut persistent_path = db_root.to_path_buf();
+ persistent_path.push(Self::PERSISTENT_DB_FILENAME);
+
+ // Now convert them to strings prefixed with "file:"
+ let mut persistent_path_str = "file:".to_owned();
+ persistent_path_str.push_str(&persistent_path.to_string_lossy());
+
+ Ok(persistent_path_str)
+ }
+
+ fn make_connection(persistent_file: &str) -> Result<Connection> {
let conn =
Connection::open_in_memory().context("Failed to initialize SQLite connection.")?;
@@ -1030,20 +1001,10 @@
}
break;
}
- loop {
- if let Err(e) = conn
- .execute("ATTACH DATABASE ? as perboot;", params![perboot_file])
- .context("Failed to attach database perboot.")
- {
- if Self::is_locked_error(&e) {
- std::thread::sleep(std::time::Duration::from_micros(500));
- continue;
- } else {
- return Err(e);
- }
- }
- break;
- }
+
+ // Drop the cache size from default (2M) to 0.5M
+ conn.execute("PRAGMA persistent.cache_size = -500;", params![])
+ .context("Failed to decrease cache size for persistent db")?;
Ok(conn)
}
@@ -1129,7 +1090,15 @@
}
StatsdStorageType::Grant => self.get_table_size(storage_type, "persistent", "grant"),
StatsdStorageType::AuthToken => {
- self.get_table_size(storage_type, "perboot", "authtoken")
+ // Since the table is actually a BTreeMap now, unused_size is not meaningfully
+ // reportable
+ // Size provided is only an approximation
+ Ok(Keystore2StorageStats {
+ storage_type,
+ size: (self.perboot.auth_tokens_len() * std::mem::size_of::<AuthTokenEntry>())
+ as i64,
+ unused_size: 0,
+ })
}
StatsdStorageType::BlobMetadata => {
self.get_table_size(storage_type, "persistent", "blobmetadata")
@@ -1456,18 +1425,6 @@
.context("In get_or_create_key_with.")
}
- /// SQLite3 seems to hold a shared mutex while running the busy handler when
- /// waiting for the database file to become available. This makes it
- /// impossible to successfully recover from a locked database when the
- /// transaction holding the device busy is in the same process on a
- /// different connection. As a result the busy handler has to time out and
- /// fail in order to make progress.
- ///
- /// Instead, we set the busy handler to None (return immediately). And catch
- /// Busy and Locked errors (the latter occur on in memory databases with
- /// shared cache, e.g., the per-boot database.) and restart the transaction
- /// after a grace period of half a millisecond.
- ///
/// Creates a transaction with the given behavior and executes f with the new transaction.
/// The transaction is committed only if f returns Ok and retried if DatabaseBusy
/// or DatabaseLocked is encountered.
@@ -2438,11 +2395,12 @@
let mut stmt = tx
.prepare(
"SELECT keyentryid, access_vector FROM persistent.grant
- WHERE grantee = ? AND id = ?;",
+ WHERE grantee = ? AND id = ? AND
+ (SELECT state FROM persistent.keyentry WHERE id = keyentryid) = ?;",
)
.context("Domain::GRANT prepare statement failed")?;
let mut rows = stmt
- .query(params![caller_uid as i64, key.nspace])
+ .query(params![caller_uid as i64, key.nspace, KeyLifeCycle::Live])
.context("Domain:Grant: query failed.")?;
let (key_id, access_vector): (i64, i32) =
db_utils::with_rows_extract_one(&mut rows, |row| {
@@ -3009,19 +2967,28 @@
/// Returns a list of KeyDescriptors in the selected domain/namespace.
/// The key descriptors will have the domain, nspace, and alias field set.
/// Domain must be APP or SELINUX, the caller must make sure of that.
- pub fn list(&mut self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
+ pub fn list(
+ &mut self,
+ domain: Domain,
+ namespace: i64,
+ key_type: KeyType,
+ ) -> Result<Vec<KeyDescriptor>> {
let _wp = wd::watch_millis("KeystoreDB::list", 500);
self.with_transaction(TransactionBehavior::Deferred, |tx| {
let mut stmt = tx
.prepare(
"SELECT alias FROM persistent.keyentry
- WHERE domain = ? AND namespace = ? AND alias IS NOT NULL AND state = ?;",
+ WHERE domain = ?
+ AND namespace = ?
+ AND alias IS NOT NULL
+ AND state = ?
+ AND key_type = ?;",
)
.context("In list: Failed to prepare.")?;
let mut rows = stmt
- .query(params![domain.0 as u32, namespace, KeyLifeCycle::Live])
+ .query(params![domain.0 as u32, namespace, KeyLifeCycle::Live, key_type])
.context("In list: Failed to query.")?;
let mut descriptors: Vec<KeyDescriptor> = Vec::new();
@@ -3171,110 +3138,59 @@
}
}
- /// Insert or replace the auth token based on the UNIQUE constraint of the auth token table
- pub fn insert_auth_token(&mut self, auth_token: &HardwareAuthToken) -> Result<()> {
- let _wp = wd::watch_millis("KeystoreDB::insert_auth_token", 500);
-
- self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute(
- "INSERT OR REPLACE INTO perboot.authtoken (challenge, user_id, auth_id,
- authenticator_type, timestamp, mac, time_received) VALUES(?, ?, ?, ?, ?, ?, ?);",
- params![
- auth_token.challenge,
- auth_token.userId,
- auth_token.authenticatorId,
- auth_token.authenticatorType.0 as i32,
- auth_token.timestamp.milliSeconds as i64,
- auth_token.mac,
- MonotonicRawTime::now(),
- ],
- )
- .context("In insert_auth_token: failed to insert auth token into the database")?;
- Ok(()).no_gc()
- })
+ /// Insert or replace the auth token based on (user_id, auth_id, auth_type)
+ pub fn insert_auth_token(&mut self, auth_token: &HardwareAuthToken) {
+ self.perboot.insert_auth_token_entry(AuthTokenEntry::new(
+ auth_token.clone(),
+ MonotonicRawTime::now(),
+ ))
}
/// Find the newest auth token matching the given predicate.
- pub fn find_auth_token_entry<F>(
- &mut self,
- p: F,
- ) -> Result<Option<(AuthTokenEntry, MonotonicRawTime)>>
+ pub fn find_auth_token_entry<F>(&self, p: F) -> Option<(AuthTokenEntry, MonotonicRawTime)>
where
F: Fn(&AuthTokenEntry) -> bool,
{
- let _wp = wd::watch_millis("KeystoreDB::find_auth_token_entry", 500);
-
- self.with_transaction(TransactionBehavior::Deferred, |tx| {
- let mut stmt = tx
- .prepare("SELECT * from perboot.authtoken ORDER BY time_received DESC;")
- .context("Prepare statement failed.")?;
-
- let mut rows = stmt.query(NO_PARAMS).context("Failed to query.")?;
-
- while let Some(row) = rows.next().context("Failed to get next row.")? {
- let entry = AuthTokenEntry::new(
- HardwareAuthToken {
- challenge: row.get(1)?,
- userId: row.get(2)?,
- authenticatorId: row.get(3)?,
- authenticatorType: HardwareAuthenticatorType(row.get(4)?),
- timestamp: Timestamp { milliSeconds: row.get(5)? },
- mac: row.get(6)?,
- },
- row.get(7)?,
- );
- if p(&entry) {
- return Ok(Some((
- entry,
- Self::get_last_off_body(tx)
- .context("In find_auth_token_entry: Trying to get last off body")?,
- )))
- .no_gc();
- }
- }
- Ok(None).no_gc()
- })
- .context("In find_auth_token_entry.")
+ self.perboot.find_auth_token_entry(p).map(|entry| (entry, self.get_last_off_body()))
}
/// Insert last_off_body into the metadata table at the initialization of auth token table
- pub fn insert_last_off_body(&mut self, last_off_body: MonotonicRawTime) -> Result<()> {
- let _wp = wd::watch_millis("KeystoreDB::insert_last_off_body", 500);
-
- self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute(
- "INSERT OR REPLACE INTO perboot.metadata (key, value) VALUES (?, ?);",
- params!["last_off_body", last_off_body],
- )
- .context("In insert_last_off_body: failed to insert.")?;
- Ok(()).no_gc()
- })
+ pub fn insert_last_off_body(&self, last_off_body: MonotonicRawTime) {
+ self.perboot.set_last_off_body(last_off_body)
}
/// Update last_off_body when on_device_off_body is called
- pub fn update_last_off_body(&mut self, last_off_body: MonotonicRawTime) -> Result<()> {
- let _wp = wd::watch_millis("KeystoreDB::update_last_off_body", 500);
-
- self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute(
- "UPDATE perboot.metadata SET value = ? WHERE key = ?;",
- params![last_off_body, "last_off_body"],
- )
- .context("In update_last_off_body: failed to update.")?;
- Ok(()).no_gc()
- })
+ pub fn update_last_off_body(&self, last_off_body: MonotonicRawTime) {
+ self.perboot.set_last_off_body(last_off_body)
}
/// Get last_off_body time when finding auth tokens
- fn get_last_off_body(tx: &Transaction) -> Result<MonotonicRawTime> {
- let _wp = wd::watch_millis("KeystoreDB::get_last_off_body", 500);
+ fn get_last_off_body(&self) -> MonotonicRawTime {
+ self.perboot.get_last_off_body()
+ }
- tx.query_row(
- "SELECT value from perboot.metadata WHERE key = ?;",
- params!["last_off_body"],
- |row| row.get(0),
- )
- .context("In get_last_off_body: query_row failed.")
+ /// Load descriptor of a key by key id
+ pub fn load_key_descriptor(&mut self, key_id: i64) -> Result<Option<KeyDescriptor>> {
+ let _wp = wd::watch_millis("KeystoreDB::load_key_descriptor", 500);
+
+ self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ tx.query_row(
+ "SELECT domain, namespace, alias FROM persistent.keyentry WHERE id = ?;",
+ params![key_id],
+ |row| {
+ Ok(KeyDescriptor {
+ domain: Domain(row.get(0)?),
+ nspace: row.get(1)?,
+ alias: row.get(2)?,
+ blob: None,
+ })
+ },
+ )
+ .optional()
+ .context("Trying to load key descriptor")
+ .no_gc()
+ })
+ .context("In load_key_descriptor.")
}
}
@@ -3297,8 +3213,9 @@
use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
Timestamp::Timestamp,
};
+ use rusqlite::DatabaseName::Attached;
use rusqlite::NO_PARAMS;
- use rusqlite::{Error, TransactionBehavior};
+ use rusqlite::TransactionBehavior;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::fmt::Write;
@@ -3310,9 +3227,9 @@
use std::time::Instant;
fn new_test_db() -> Result<KeystoreDB> {
- let conn = KeystoreDB::make_connection("file::memory:", "file::memory:")?;
+ let conn = KeystoreDB::make_connection("file::memory:")?;
- let mut db = KeystoreDB { conn, gc: None };
+ let mut db = KeystoreDB { conn, gc: None, perboot: Arc::new(perboot::PerbootDB::new()) };
db.with_transaction(TransactionBehavior::Immediate, |tx| {
KeystoreDB::init_tables(tx).context("Failed to initialize tables.").no_gc()
})?;
@@ -3398,15 +3315,6 @@
assert_eq!(tables[3], "keyentry");
assert_eq!(tables[4], "keymetadata");
assert_eq!(tables[5], "keyparameter");
- let tables = db
- .conn
- .prepare("SELECT name from perboot.sqlite_master WHERE type='table' ORDER BY name;")?
- .query_map(params![], |row| row.get(0))?
- .collect::<rusqlite::Result<Vec<String>>>()?;
-
- assert_eq!(tables.len(), 2);
- assert_eq!(tables[0], "authtoken");
- assert_eq!(tables[1], "metadata");
Ok(())
}
@@ -3421,8 +3329,8 @@
timestamp: Timestamp { milliSeconds: 500 },
mac: String::from("mac").into_bytes(),
};
- db.insert_auth_token(&auth_token1)?;
- let auth_tokens_returned = get_auth_tokens(&mut db)?;
+ db.insert_auth_token(&auth_token1);
+ let auth_tokens_returned = get_auth_tokens(&db);
assert_eq!(auth_tokens_returned.len(), 1);
// insert another auth token with the same values for the columns in the UNIQUE constraint
@@ -3436,8 +3344,8 @@
mac: String::from("mac").into_bytes(),
};
- db.insert_auth_token(&auth_token2)?;
- let mut auth_tokens_returned = get_auth_tokens(&mut db)?;
+ db.insert_auth_token(&auth_token2);
+ let mut auth_tokens_returned = get_auth_tokens(&db);
assert_eq!(auth_tokens_returned.len(), 1);
if let Some(auth_token) = auth_tokens_returned.pop() {
@@ -3455,33 +3363,16 @@
mac: String::from("mac").into_bytes(),
};
- db.insert_auth_token(&auth_token3)?;
- let auth_tokens_returned = get_auth_tokens(&mut db)?;
+ db.insert_auth_token(&auth_token3);
+ let auth_tokens_returned = get_auth_tokens(&db);
assert_eq!(auth_tokens_returned.len(), 2);
Ok(())
}
// utility function for test_auth_token_table_invariant()
- fn get_auth_tokens(db: &mut KeystoreDB) -> Result<Vec<AuthTokenEntry>> {
- let mut stmt = db.conn.prepare("SELECT * from perboot.authtoken;")?;
-
- let auth_token_entries: Vec<AuthTokenEntry> = stmt
- .query_map(NO_PARAMS, |row| {
- Ok(AuthTokenEntry::new(
- HardwareAuthToken {
- challenge: row.get(1)?,
- userId: row.get(2)?,
- authenticatorId: row.get(3)?,
- authenticatorType: HardwareAuthenticatorType(row.get(4)?),
- timestamp: Timestamp { milliSeconds: row.get(5)? },
- mac: row.get(6)?,
- },
- row.get(7)?,
- ))
- })?
- .collect::<Result<Vec<AuthTokenEntry>, Error>>()?;
- Ok(auth_token_entries)
+ fn get_auth_tokens(db: &KeystoreDB) -> Vec<AuthTokenEntry> {
+ db.perboot.get_all_auth_token_entries()
}
#[test]
@@ -4865,7 +4756,7 @@
})
.collect();
list_o_descriptors.sort();
- let mut list_result = db.list(*domain, *namespace)?;
+ let mut list_result = db.list(*domain, *namespace, KeyType::Client)?;
list_result.sort();
assert_eq!(list_o_descriptors, list_result);
@@ -4895,7 +4786,7 @@
loaded_entries.sort_unstable();
assert_eq!(list_o_ids, loaded_entries);
}
- assert_eq!(Vec::<KeyDescriptor>::new(), db.list(Domain::SELINUX, 101)?);
+ assert_eq!(Vec::<KeyDescriptor>::new(), db.list(Domain::SELINUX, 101, KeyType::Client)?);
Ok(())
}
@@ -5335,17 +5226,17 @@
#[test]
fn test_last_off_body() -> Result<()> {
let mut db = new_test_db()?;
- db.insert_last_off_body(MonotonicRawTime::now())?;
+ db.insert_last_off_body(MonotonicRawTime::now());
let tx = db.conn.transaction_with_behavior(TransactionBehavior::Immediate)?;
- let last_off_body_1 = KeystoreDB::get_last_off_body(&tx)?;
tx.commit()?;
+ let last_off_body_1 = db.get_last_off_body();
let one_second = Duration::from_secs(1);
thread::sleep(one_second);
- db.update_last_off_body(MonotonicRawTime::now())?;
+ db.update_last_off_body(MonotonicRawTime::now());
let tx2 = db.conn.transaction_with_behavior(TransactionBehavior::Immediate)?;
- let last_off_body_2 = KeystoreDB::get_last_off_body(&tx2)?;
tx2.commit()?;
- assert!(last_off_body_1.seconds() < last_off_body_2.seconds());
+ let last_off_body_2 = db.get_last_off_body();
+ assert!(last_off_body_1 < last_off_body_2);
Ok(())
}
@@ -5358,11 +5249,11 @@
make_test_key_entry(&mut db, Domain::APP, 110000, TEST_ALIAS, None)?;
db.unbind_keys_for_user(2, false)?;
- assert_eq!(1, db.list(Domain::APP, 110000)?.len());
- assert_eq!(0, db.list(Domain::APP, 210000)?.len());
+ assert_eq!(1, db.list(Domain::APP, 110000, KeyType::Client)?.len());
+ assert_eq!(0, db.list(Domain::APP, 210000, KeyType::Client)?.len());
db.unbind_keys_for_user(1, true)?;
- assert_eq!(0, db.list(Domain::APP, 110000)?.len());
+ assert_eq!(0, db.list(Domain::APP, 110000, KeyType::Client)?.len());
Ok(())
}
@@ -5431,7 +5322,12 @@
for t in get_valid_statsd_storage_types() {
let stat = db.get_storage_stat(t)?;
- assert!(stat.size >= PAGE_SIZE);
+ // AuthToken can be less than a page since it's in a btree, not sqlite
+ // TODO(b/187474736) stop using if-let here
+ if let StatsdStorageType::AuthToken = t {
+ } else {
+ assert!(stat.size >= PAGE_SIZE);
+ }
assert!(stat.size >= stat.unused_size);
}
@@ -5561,7 +5457,7 @@
authenticatorType: kmhw_authenticator_type::ANY,
timestamp: Timestamp { milliSeconds: 10 },
mac: b"mac".to_vec(),
- })?;
+ });
assert_storage_increased(&mut db, vec![StatsdStorageType::AuthToken], &mut working_stats);
Ok(())
}
@@ -5590,4 +5486,78 @@
Ok(())
}
+
+ #[test]
+ fn find_auth_token_entry_returns_latest() -> Result<()> {
+ let mut db = new_test_db()?;
+ db.insert_auth_token(&HardwareAuthToken {
+ challenge: 123,
+ userId: 456,
+ authenticatorId: 789,
+ authenticatorType: kmhw_authenticator_type::ANY,
+ timestamp: Timestamp { milliSeconds: 10 },
+ mac: b"mac0".to_vec(),
+ });
+ std::thread::sleep(std::time::Duration::from_millis(1));
+ db.insert_auth_token(&HardwareAuthToken {
+ challenge: 123,
+ userId: 457,
+ authenticatorId: 789,
+ authenticatorType: kmhw_authenticator_type::ANY,
+ timestamp: Timestamp { milliSeconds: 12 },
+ mac: b"mac1".to_vec(),
+ });
+ std::thread::sleep(std::time::Duration::from_millis(1));
+ db.insert_auth_token(&HardwareAuthToken {
+ challenge: 123,
+ userId: 458,
+ authenticatorId: 789,
+ authenticatorType: kmhw_authenticator_type::ANY,
+ timestamp: Timestamp { milliSeconds: 3 },
+ mac: b"mac2".to_vec(),
+ });
+ // All three entries are in the database
+ assert_eq!(db.perboot.auth_tokens_len(), 3);
+ // It selected the most recent timestamp
+ assert_eq!(db.find_auth_token_entry(|_| true).unwrap().0.auth_token.mac, b"mac2".to_vec());
+ Ok(())
+ }
+
+ #[test]
+ fn test_set_wal_mode() -> Result<()> {
+ let temp_dir = TempDir::new("test_set_wal_mode")?;
+ let mut db = KeystoreDB::new(temp_dir.path(), None)?;
+ let mode: String =
+ db.conn.pragma_query_value(Some(Attached("persistent")), "journal_mode", |row| {
+ row.get(0)
+ })?;
+ assert_eq!(mode, "delete");
+ db.conn.close().expect("Close didn't work");
+
+ KeystoreDB::set_wal_mode(temp_dir.path())?;
+
+ db = KeystoreDB::new(temp_dir.path(), None)?;
+ let mode: String =
+ db.conn.pragma_query_value(Some(Attached("persistent")), "journal_mode", |row| {
+ row.get(0)
+ })?;
+ assert_eq!(mode, "wal");
+ Ok(())
+ }
+
+ #[test]
+ fn test_load_key_descriptor() -> Result<()> {
+ let mut db = new_test_db()?;
+ let key_id = make_test_key_entry(&mut db, Domain::APP, 1, TEST_ALIAS, None)?.0;
+
+ let key = db.load_key_descriptor(key_id)?.unwrap();
+
+ assert_eq!(key.domain, Domain::APP);
+ assert_eq!(key.nspace, 1);
+ assert_eq!(key.alias, Some(TEST_ALIAS.to_string()));
+
+ // No such id
+ assert_eq!(db.load_key_descriptor(key_id + 1)?, None);
+ Ok(())
+ }
}
diff --git a/keystore2/src/database/perboot.rs b/keystore2/src/database/perboot.rs
new file mode 100644
index 0000000..7ff35fa
--- /dev/null
+++ b/keystore2/src/database/perboot.rs
@@ -0,0 +1,122 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module implements a per-boot, shared, in-memory storage of auth tokens
+//! and last-time-on-body for the main Keystore 2.0 database module.
+
+use super::{AuthTokenEntry, MonotonicRawTime};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ HardwareAuthToken::HardwareAuthToken, HardwareAuthenticatorType::HardwareAuthenticatorType,
+};
+use lazy_static::lazy_static;
+use std::collections::HashSet;
+use std::sync::atomic::{AtomicI64, Ordering};
+use std::sync::Arc;
+use std::sync::RwLock;
+
+#[derive(PartialEq, PartialOrd, Ord, Eq, Hash)]
+struct AuthTokenId {
+ user_id: i64,
+ auth_id: i64,
+ authenticator_type: HardwareAuthenticatorType,
+}
+
+impl AuthTokenId {
+ fn from_auth_token(tok: &HardwareAuthToken) -> Self {
+ AuthTokenId {
+ user_id: tok.userId,
+ auth_id: tok.authenticatorId,
+ authenticator_type: tok.authenticatorType,
+ }
+ }
+}
+
+//Implements Eq/Hash to only operate on the AuthTokenId portion
+//of the AuthTokenEntry. This allows a HashSet to DTRT.
+#[derive(Clone)]
+struct AuthTokenEntryWrap(AuthTokenEntry);
+
+impl std::hash::Hash for AuthTokenEntryWrap {
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+ AuthTokenId::from_auth_token(&self.0.auth_token).hash(state)
+ }
+}
+
+impl PartialEq<AuthTokenEntryWrap> for AuthTokenEntryWrap {
+ fn eq(&self, other: &AuthTokenEntryWrap) -> bool {
+ AuthTokenId::from_auth_token(&self.0.auth_token)
+ == AuthTokenId::from_auth_token(&other.0.auth_token)
+ }
+}
+
+impl Eq for AuthTokenEntryWrap {}
+
+/// Per-boot state structure. Currently only used to track auth tokens and
+/// last-off-body.
+#[derive(Default)]
+pub struct PerbootDB {
+ // We can use a .unwrap() discipline on this lock, because only panicking
+ // while holding a .write() lock will poison it. The only write usage is
+ // an insert call which inserts a pre-constructed pair.
+ auth_tokens: RwLock<HashSet<AuthTokenEntryWrap>>,
+ // Ordering::Relaxed is appropriate for accessing this atomic, since it
+ // does not currently need to be synchronized with anything else.
+ last_off_body: AtomicI64,
+}
+
+lazy_static! {
+ /// The global instance of the perboot DB. Located here rather than in globals
+ /// in order to restrict access to the database module.
+ pub static ref PERBOOT_DB: Arc<PerbootDB> = Arc::new(PerbootDB::new());
+}
+
+impl PerbootDB {
+ /// Construct a new perboot database. Currently just uses default values.
+ pub fn new() -> Self {
+ Default::default()
+ }
+ /// Add a new auth token + timestamp to the database, replacing any which
+ /// match all of user_id, auth_id, and auth_type.
+ pub fn insert_auth_token_entry(&self, entry: AuthTokenEntry) {
+ self.auth_tokens.write().unwrap().replace(AuthTokenEntryWrap(entry));
+ }
+ /// Locate an auth token entry which matches the predicate with the most
+ /// recent update time.
+ pub fn find_auth_token_entry<P: Fn(&AuthTokenEntry) -> bool>(
+ &self,
+ p: P,
+ ) -> Option<AuthTokenEntry> {
+ let reader = self.auth_tokens.read().unwrap();
+ let mut matches: Vec<_> = reader.iter().filter(|x| p(&x.0)).collect();
+ matches.sort_by_key(|x| x.0.time_received);
+ matches.last().map(|x| x.0.clone())
+ }
+ /// Get the last time the device was off the user's body
+ pub fn get_last_off_body(&self) -> MonotonicRawTime {
+ MonotonicRawTime(self.last_off_body.load(Ordering::Relaxed))
+ }
+ /// Set the last time the device was off the user's body
+ pub fn set_last_off_body(&self, last_off_body: MonotonicRawTime) {
+ self.last_off_body.store(last_off_body.0, Ordering::Relaxed)
+ }
+ /// Return how many auth tokens are currently tracked.
+ pub fn auth_tokens_len(&self) -> usize {
+ self.auth_tokens.read().unwrap().len()
+ }
+ #[cfg(test)]
+ /// For testing, return all auth tokens currently tracked.
+ pub fn get_all_auth_token_entries(&self) -> Vec<AuthTokenEntry> {
+ self.auth_tokens.read().unwrap().iter().cloned().map(|x| x.0).collect()
+ }
+}
diff --git a/keystore2/src/enforcements.rs b/keystore2/src/enforcements.rs
index 04d1f77..29a3f0b 100644
--- a/keystore2/src/enforcements.rs
+++ b/keystore2/src/enforcements.rs
@@ -638,8 +638,7 @@
} else {
unlocked_device_required
}
- })
- .context("In authorize_create: Trying to get required auth token.")?;
+ });
Some(
hat_and_last_off_body
.ok_or(Error::Km(Ec::KEY_USER_NOT_AUTHENTICATED))
@@ -700,15 +699,11 @@
})
}
- fn find_auth_token<F>(p: F) -> Result<Option<(AuthTokenEntry, MonotonicRawTime)>>
+ fn find_auth_token<F>(p: F) -> Option<(AuthTokenEntry, MonotonicRawTime)>
where
F: Fn(&AuthTokenEntry) -> bool,
{
- DB.with(|db| {
- let mut db = db.borrow_mut();
- db.find_auth_token_entry(p).context("Trying to find auth token.")
- })
- .context("In find_auth_token.")
+ DB.with(|db| db.borrow().find_auth_token_entry(p))
}
/// Checks if the time now since epoch is greater than (or equal, if is_given_time_inclusive is
@@ -752,11 +747,9 @@
/// Add this auth token to the database.
/// Then present the auth token to the op auth map. If an operation is waiting for this
/// auth token this fulfills the request and removes the receiver from the map.
- pub fn add_auth_token(&self, hat: HardwareAuthToken) -> Result<()> {
- DB.with(|db| db.borrow_mut().insert_auth_token(&hat)).context("In add_auth_token.")?;
-
+ pub fn add_auth_token(&self, hat: HardwareAuthToken) {
+ DB.with(|db| db.borrow_mut().insert_auth_token(&hat));
self.op_auth_map.add_auth_token(hat);
- Ok(())
}
/// This allows adding an entry to the op_auth_map, indexed by the operation challenge.
@@ -824,28 +817,22 @@
// Filter the matching auth tokens by challenge
let result = Self::find_auth_token(|hat: &AuthTokenEntry| {
(challenge == hat.challenge()) && hat.satisfies(&sids, auth_type)
- })
- .context(
- "In get_auth_tokens: Failed to get a matching auth token filtered by challenge.",
- )?;
+ });
let auth_token = if let Some((auth_token_entry, _)) = result {
auth_token_entry.take_auth_token()
} else {
// Filter the matching auth tokens by age.
if auth_token_max_age_millis != 0 {
- let now_in_millis = MonotonicRawTime::now().milli_seconds();
+ let now_in_millis = MonotonicRawTime::now();
let result = Self::find_auth_token(|auth_token_entry: &AuthTokenEntry| {
let token_valid = now_in_millis
- .checked_sub(auth_token_entry.time_received().milli_seconds())
+ .checked_sub(&auth_token_entry.time_received())
.map_or(false, |token_age_in_millis| {
- auth_token_max_age_millis > token_age_in_millis
+ auth_token_max_age_millis > token_age_in_millis.milliseconds()
});
token_valid && auth_token_entry.satisfies(&sids, auth_type)
- })
- .context(
- "In get_auth_tokens: Failed to get a matching auth token filtered by age.",
- )?;
+ });
if let Some((auth_token_entry, _)) = result {
auth_token_entry.take_auth_token()
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index c492120..89114a6 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -39,11 +39,12 @@
use binder::FromIBinder;
use keystore2_vintf::get_aidl_instances;
use lazy_static::lazy_static;
-use std::sync::{Arc, Mutex};
+use std::sync::{Arc, Mutex, RwLock};
use std::{cell::RefCell, sync::Once};
use std::{collections::HashMap, path::Path, path::PathBuf};
static DB_INIT: Once = Once::new();
+static DB_SET_WAL_MODE: Once = Once::new();
/// Open a connection to the Keystore 2.0 database. This is called during the initialization of
/// the thread local DB field. It should never be called directly. The first time this is called
@@ -54,15 +55,19 @@
/// is run only once, as long as the ASYNC_TASK instance is the same. So only one additional
/// database connection is created for the garbage collector worker.
pub fn create_thread_local_db() -> KeystoreDB {
- let mut db = KeystoreDB::new(
- &DB_PATH.lock().expect("Could not get the database directory."),
- Some(GC.clone()),
- )
- .expect("Failed to open database.");
+ let db_path = DB_PATH.read().expect("Could not get the database directory.");
+
+ DB_SET_WAL_MODE.call_once(|| {
+ log::info!("Setting Keystore 2.0 database to WAL mode first time since boot.");
+ KeystoreDB::set_wal_mode(&db_path)
+ .expect("In create_thread_local_db: Could not set WAL mode.");
+ });
+
+ let mut db = KeystoreDB::new(&db_path, Some(GC.clone())).expect("Failed to open database.");
+
DB_INIT.call_once(|| {
log::info!("Touching Keystore 2.0 database for this first time since boot.");
- db.insert_last_off_body(MonotonicRawTime::now())
- .expect("Could not initialize database with last off body.");
+ db.insert_last_off_body(MonotonicRawTime::now());
log::info!("Calling cleanup leftovers.");
let n = db.cleanup_leftovers().expect("Failed to cleanup database on startup.");
if n != 0 {
@@ -140,7 +145,7 @@
lazy_static! {
/// The path where keystore stores all its keys.
- pub static ref DB_PATH: Mutex<PathBuf> = Mutex::new(
+ pub static ref DB_PATH: RwLock<PathBuf> = RwLock::new(
Path::new("/data/misc/keystore").to_path_buf());
/// Runtime database of unwrapped super keys.
pub static ref SUPER_KEY: Arc<SuperKeyManager> = Default::default();
@@ -158,7 +163,7 @@
/// LegacyBlobLoader is initialized and exists globally.
/// The same directory used by the database is used by the LegacyBlobLoader as well.
pub static ref LEGACY_BLOB_LOADER: Arc<LegacyBlobLoader> = Arc::new(LegacyBlobLoader::new(
- &DB_PATH.lock().expect("Could not get the database path for legacy blob loader.")));
+ &DB_PATH.read().expect("Could not get the database path for legacy blob loader.")));
/// Legacy migrator. Atomically migrates legacy blobs to the database.
pub static ref LEGACY_MIGRATOR: Arc<LegacyMigrator> =
Arc::new(LegacyMigrator::new(Arc::new(Default::default())));
@@ -174,7 +179,7 @@
map_km_error(km_dev.deleteKey(&*blob))
.context("In invalidate key closure: Trying to invalidate key blob.")
}),
- KeystoreDB::new(&DB_PATH.lock().expect("Could not get the database directory."), None)
+ KeystoreDB::new(&DB_PATH.read().expect("Could not get the database directory."), None)
.expect("Failed to open database."),
SUPER_KEY.clone(),
)
diff --git a/keystore2/src/key_parameter.rs b/keystore2/src/key_parameter.rs
index 74a9b23..549f574 100644
--- a/keystore2/src/key_parameter.rs
+++ b/keystore2/src/key_parameter.rs
@@ -90,8 +90,6 @@
//! * The termination condition which has an empty in list.
//! * The public interface, which does not have @marker and calls itself with an empty out list.
-#![allow(clippy::from_over_into, clippy::needless_question_mark)]
-
use std::convert::TryInto;
use crate::db_utils::SqlField;
@@ -601,9 +599,9 @@
], [$($in)*]
}};
(@into $enum_name:ident, [$($out:tt)*], []) => {
- impl Into<KmKeyParameter> for $enum_name {
- fn into(self) -> KmKeyParameter {
- match self {
+ impl From<$enum_name> for KmKeyParameter {
+ fn from(x: $enum_name) -> Self {
+ match x {
$($out)*
}
}
@@ -1389,11 +1387,11 @@
db.prepare("SELECT tag, data, security_level FROM persistent.keyparameter")?;
let mut rows = stmt.query(NO_PARAMS)?;
let row = rows.next()?.unwrap();
- Ok(KeyParameter::new_from_sql(
+ KeyParameter::new_from_sql(
Tag(row.get(0)?),
&SqlField::new(1, row),
SecurityLevel(row.get(2)?),
- )?)
+ )
}
}
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index 4d4a718..3d53a36 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -47,10 +47,6 @@
// Saying hi.
info!("Keystore2 is starting.");
- // Initialize the per boot database.
- let _keep_me_alive = keystore2::database::KeystoreDB::keep_perboot_db_alive()
- .expect("Failed to initialize the perboot database.");
-
let mut args = std::env::args();
args.next().expect("That's odd. How is there not even a first argument?");
@@ -60,7 +56,7 @@
// For the ground truth check the service startup rule for init (typically in keystore2.rc).
let id_rotation_state = if let Some(dir) = args.next() {
let db_path = Path::new(&dir);
- *keystore2::globals::DB_PATH.lock().expect("Could not lock DB_PATH.") =
+ *keystore2::globals::DB_PATH.write().expect("Could not lock DB_PATH.") =
db_path.to_path_buf();
IdRotationState::new(&db_path)
} else {
@@ -125,7 +121,7 @@
}
let vpnprofilestore = VpnProfileStore::new_native_binder(
- &keystore2::globals::DB_PATH.lock().expect("Could not get DB_PATH."),
+ &keystore2::globals::DB_PATH.read().expect("Could not get DB_PATH."),
);
binder::add_service(VPNPROFILESTORE_SERVICE_NAME, vpnprofilestore.as_binder()).unwrap_or_else(
|e| {
@@ -136,12 +132,7 @@
},
);
- std::thread::spawn(|| {
- match metrics::register_pull_metrics_callbacks() {
- Err(e) => error!("register_pull_metrics_callbacks failed: {:?}.", e),
- _ => info!("Pull metrics callbacks successfully registered."),
- };
- });
+ metrics::register_pull_metrics_callbacks();
info!("Successfully registered Keystore 2.0 service.");
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 29d46ad..9eebb36 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -14,8 +14,6 @@
//! This module implements methods to load legacy keystore key blob files.
-#![allow(clippy::redundant_slicing)]
-
use crate::{
error::{Error as KsError, ResponseCode},
key_parameter::{KeyParameter, KeyParameterValue},
@@ -484,7 +482,7 @@
let element_size =
read_ne_u32(stream).context("In read_key_parameters: While reading element size.")?;
- let elements_buffer = stream
+ let mut element_stream = stream
.get(0..element_size as usize)
.ok_or(KsError::Rc(ResponseCode::VALUE_CORRUPTED))
.context("In read_key_parameters: While reading elements buffer.")?;
@@ -492,8 +490,6 @@
// update the stream position.
*stream = &stream[element_size as usize..];
- let mut element_stream = &elements_buffer[..];
-
let mut params: Vec<KeyParameterValue> = Vec::new();
for _ in 0..element_count {
let tag = Tag(read_ne_i32(&mut element_stream).context("In read_key_parameters.")?);
diff --git a/keystore2/src/maintenance.rs b/keystore2/src/maintenance.rs
index a099d18..0633bc1 100644
--- a/keystore2/src/maintenance.rs
+++ b/keystore2/src/maintenance.rs
@@ -179,8 +179,8 @@
check_keystore_permission(KeystorePerm::report_off_body())
.context("In on_device_off_body.")?;
- DB.with(|db| db.borrow_mut().update_last_off_body(MonotonicRawTime::now()))
- .context("In on_device_off_body: Trying to update last off body time.")
+ DB.with(|db| db.borrow_mut().update_last_off_body(MonotonicRawTime::now()));
+ Ok(())
}
fn migrate_key_namespace(source: &KeyDescriptor, destination: &KeyDescriptor) -> Result<()> {
diff --git a/keystore2/src/metrics.rs b/keystore2/src/metrics.rs
index 07c3d64..10a465c 100644
--- a/keystore2/src/metrics.rs
+++ b/keystore2/src/metrics.rs
@@ -23,7 +23,7 @@
KeyParameter::KeyParameter, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
SecurityLevel::SecurityLevel,
};
-use anyhow::Result;
+use anyhow::{anyhow, Result};
use keystore2_system_property::PropertyWatcher;
use statslog_rust::{
keystore2_key_creation_event_reported::{
@@ -40,6 +40,47 @@
use statslog_rust_header::Atoms;
use statspull_rust::{set_pull_atom_callback, StatsPullResult};
+// Waits and returns Ok if boot is completed.
+fn wait_for_boot_completed() -> Result<()> {
+ let watcher = PropertyWatcher::new("sys.boot_completed");
+ match watcher {
+ Ok(mut watcher) => {
+ loop {
+ let wait_result = watcher.wait();
+ match wait_result {
+ Ok(_) => {
+ let value_result =
+ watcher.read(|_name, value| Ok(value.trim().to_string()));
+ match value_result {
+ Ok(value) => {
+ if value == "1" {
+ break;
+ }
+ }
+ Err(e) => {
+ log::error!(
+ "In wait_for_boot_completed: Failed while reading property. {}",
+ e
+ );
+ return Err(anyhow!("Error in waiting for boot completion."));
+ }
+ }
+ }
+ Err(e) => {
+ log::error!("In wait_for_boot_completed: Failed while waiting. {}", e);
+ return Err(anyhow!("Error in waiting for boot completion."));
+ }
+ }
+ }
+ Ok(())
+ }
+ Err(e) => {
+ log::error!("In wait_for_boot_completed: Failed to create PropertyWatcher. {}", e);
+ Err(anyhow!("Error in waiting for boot completion."))
+ }
+ }
+}
+
fn create_default_key_creation_atom() -> Keystore2KeyCreationEventReported {
// If a value is not present, fields represented by bitmaps and i32 fields
// will take 0, except error_code which defaults to 1 indicating NO_ERROR and key_size,
@@ -89,10 +130,10 @@
construct_key_creation_event_stats(sec_level, key_params, result);
LOGS_HANDLER.queue_lo(move |_| {
- let logging_result = key_creation_event_stats.stats_write();
-
- if let Err(e) = logging_result {
- log::error!("Error in logging key creation event in the async task. {:?}", e);
+ if let Ok(()) = wait_for_boot_completed() {
+ if let Err(e) = key_creation_event_stats.stats_write() {
+ log::error!("Error in logging key creation event in the async task. {:?}", e);
+ }
}
});
}
@@ -114,10 +155,10 @@
);
LOGS_HANDLER.queue_lo(move |_| {
- let logging_result = key_operation_event_stats.stats_write();
-
- if let Err(e) = logging_result {
- log::error!("Error in logging key operation event in the async task. {:?}", e);
+ if let Ok(()) = wait_for_boot_completed() {
+ if let Err(e) = key_operation_event_stats.stats_write() {
+ log::error!("Error in logging key operation event in the async task. {:?}", e);
+ }
}
});
}
@@ -383,21 +424,17 @@
}
/// Registers pull metrics callbacks
-pub fn register_pull_metrics_callbacks() -> Result<()> {
+pub fn register_pull_metrics_callbacks() {
// Before registering the callbacks with statsd, we have to wait for the system to finish
// booting up. This avoids possible races that may occur at startup. For example, statsd
// depends on a companion service, and if registration happens too soon it will fail since
// the companion service isn't up yet.
- let mut watcher = PropertyWatcher::new("sys.boot_completed")?;
- loop {
- watcher.wait()?;
- let value = watcher.read(|_name, value| Ok(value.trim().to_string()));
- if value? == "1" {
+ LOGS_HANDLER.queue_lo(move |_| {
+ if let Ok(()) = wait_for_boot_completed() {
set_pull_atom_callback(Atoms::Keystore2StorageStats, None, pull_metrics_callback);
- break;
+ log::info!("Pull metrics callbacks successfully registered.")
}
- }
- Ok(())
+ });
}
fn pull_metrics_callback() -> StatsPullResult {
diff --git a/keystore2/src/permission.rs b/keystore2/src/permission.rs
index 726c2ec..e7999bc 100644
--- a/keystore2/src/permission.rs
+++ b/keystore2/src/permission.rs
@@ -18,8 +18,6 @@
//! It also provides KeystorePerm and KeyPerm as convenience wrappers for the SELinux permission
//! defined by keystore2 and keystore2_key respectively.
-#![allow(clippy::from_over_into)]
-
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, KeyDescriptor::KeyDescriptor, KeyPermission::KeyPermission,
};
@@ -151,9 +149,9 @@
}
}
- impl Into<$aidl_name> for $name {
- fn into(self) -> $aidl_name {
- self.0
+ impl From<$name> for $aidl_name {
+ fn from(p: $name) -> $aidl_name {
+ p.0
}
}
@@ -259,9 +257,9 @@
}
}
- impl Into<i32> for $name {
- fn into(self) -> i32 {
- self as i32
+ impl From<$name> for i32 {
+ fn from(p: $name) -> i32 {
+ p as i32
}
}
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
index fc1a6ad..1f3f8e8 100644
--- a/keystore2/src/remote_provisioning.rs
+++ b/keystore2/src/remote_provisioning.rs
@@ -19,8 +19,6 @@
//! certificate chains signed by some root authority and stored in a keystore SQLite
//! DB.
-#![allow(clippy::from_over_into, clippy::needless_question_mark, clippy::vec_init_then_push)]
-
use std::collections::HashMap;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
@@ -252,7 +250,7 @@
// attestation keys unless the pool status is checked first, so this call should be
// enough to routinely clean out expired keys.
db.delete_expired_attestation_keys()?;
- Ok(db.get_attestation_pool_status(expired_by, &uuid)?)
+ db.get_attestation_pool_status(expired_by, &uuid)
})
}
@@ -294,14 +292,15 @@
protected_data,
))
.context("In generate_csr: Failed to generate csr")?;
- let mut cose_mac_0 = Vec::<u8>::new();
// TODO(b/180392379): Replace this manual CBOR generation with the cbor-serde crate as well.
// This generates an array consisting of the mac and the public key Maps.
// Just generate the actual MacedPublicKeys structure when the crate is
// available.
- cose_mac_0.push((0b100_00000 | (keys_to_sign.len() + 1)) as u8);
- cose_mac_0.push(0b010_11000); //push mac
- cose_mac_0.push(mac.len() as u8);
+ let mut cose_mac_0: Vec<u8> = vec![
+ (0b100_00000 | (keys_to_sign.len() + 1)) as u8,
+ 0b010_11000, // mac
+ (mac.len() as u8),
+ ];
cose_mac_0.append(&mut mac);
for maced_public_key in keys_to_sign {
if maced_public_key.macedKey.len() > 83 + 8 {
@@ -327,13 +326,13 @@
DB.with::<_, Result<()>>(|db| {
let mut db = db.borrow_mut();
let (_, _, uuid) = get_keymint_device(&sec_level)?;
- Ok(db.store_signed_attestation_certificate_chain(
+ db.store_signed_attestation_certificate_chain(
public_key,
batch_cert,
certs, /* DER encoded certificate chain */
expiration_date,
&uuid,
- )?)
+ )
})
}
@@ -362,7 +361,7 @@
raw_key[32..64].clone_from_slice(&data[53..53 + 32]);
DB.with::<_, Result<()>>(|db| {
let mut db = db.borrow_mut();
- Ok(db.create_attestation_key_entry(&maced_key.macedKey, &raw_key, &priv_key, &uuid)?)
+ db.create_attestation_key_entry(&maced_key.macedKey, &raw_key, &priv_key, &uuid)
})
}
@@ -377,7 +376,7 @@
pub fn delete_all_keys(&self) -> Result<i64> {
DB.with::<_, Result<i64>>(|db| {
let mut db = db.borrow_mut();
- Ok(db.delete_all_attestation_keys()?)
+ db.delete_all_attestation_keys()
})
}
}
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index d10aba0..00b26e4 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -15,7 +15,9 @@
//! This crate implements the IKeystoreSecurityLevel interface.
use crate::attestation_key_utils::{get_attest_key_info, AttestationKeyInfo};
-use crate::audit_log::{log_key_deleted, log_key_generated, log_key_imported};
+use crate::audit_log::{
+ log_key_deleted, log_key_generated, log_key_imported, log_key_integrity_violation,
+};
use crate::database::{CertificateInfo, KeyIdGuard};
use crate::error::{self, map_km_error, map_or_log_err, Error, ErrorCode};
use crate::globals::{DB, ENFORCEMENTS, LEGACY_MIGRATOR, SUPER_KEY};
@@ -325,6 +327,18 @@
self.operation_db.prune(caller_uid, forced)?;
continue;
}
+ v @ Err(Error::Km(ErrorCode::INVALID_KEY_BLOB)) => {
+ if let Some((key_id, _)) = key_properties {
+ if let Ok(Some(key)) =
+ DB.with(|db| db.borrow_mut().load_key_descriptor(key_id))
+ {
+ log_key_integrity_violation(&key);
+ } else {
+ log::error!("Failed to load key descriptor for audit log");
+ }
+ }
+ return v;
+ }
v => return v,
}
},
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index 3ce0550..1f61729 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -291,7 +291,7 @@
&mut DB
.with(|db| {
let mut db = db.borrow_mut();
- db.list(k.domain, k.nspace)
+ db.list(k.domain, k.nspace, KeyType::Client)
})
.context("In list_entries: Trying to list keystore database.")?,
);
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 848707c..7a8b9be 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -992,7 +992,7 @@
for sid in &biometric.sids {
if let Some((auth_token_entry, _)) = db.find_auth_token_entry(|entry| {
entry.auth_token().userId == *sid || entry.auth_token().authenticatorId == *sid
- })? {
+ }) {
let res: Result<(Arc<SuperKey>, Arc<SuperKey>)> = (|| {
let slb = biometric.screen_lock_bound.decrypt(
db,
diff --git a/keystore2/src/utils.rs b/keystore2/src/utils.rs
index 9852aad..a110c64 100644
--- a/keystore2/src/utils.rs
+++ b/keystore2/src/utils.rs
@@ -36,7 +36,6 @@
APC_COMPAT_ERROR_IGNORED, APC_COMPAT_ERROR_OK, APC_COMPAT_ERROR_OPERATION_PENDING,
APC_COMPAT_ERROR_SYSTEM_ERROR,
};
-use std::convert::TryFrom;
use std::sync::Mutex;
/// This function uses its namesake in the permission module and in
@@ -186,19 +185,15 @@
parameters.into_iter().map(|p| p.into_authorization()).collect()
}
-/// This returns the current time (in seconds) as an instance of a monotonic clock, by invoking the
-/// system call since Rust does not support getting monotonic time instance as an integer.
-pub fn get_current_time_in_seconds() -> i64 {
+/// This returns the current time (in milliseconds) as an instance of a monotonic clock,
+/// by invoking the system call since Rust does not support getting monotonic time instance
+/// as an integer.
+pub fn get_current_time_in_milliseconds() -> i64 {
let mut current_time = libc::timespec { tv_sec: 0, tv_nsec: 0 };
// Following unsafe block includes one system call to get monotonic time.
// Therefore, it is not considered harmful.
unsafe { libc::clock_gettime(libc::CLOCK_MONOTONIC_RAW, &mut current_time) };
- // It is safe to unwrap here because try_from() returns std::convert::Infallible, which is
- // defined to be an error that can never happen (i.e. the result is always ok).
- // This suppresses the compiler's complaint about converting tv_sec to i64 in method
- // get_current_time_in_seconds.
- #[allow(clippy::useless_conversion)]
- i64::try_from(current_time.tv_sec).unwrap()
+ current_time.tv_sec as i64 * 1000 + (current_time.tv_nsec as i64 / 1_000_000)
}
/// Converts a response code as returned by the Android Protected Confirmation HIDL compatibility
diff --git a/keystore2/vpnprofilestore/lib.rs b/keystore2/vpnprofilestore/lib.rs
index 8b3bc2b..df2731a 100644
--- a/keystore2/vpnprofilestore/lib.rs
+++ b/keystore2/vpnprofilestore/lib.rs
@@ -22,7 +22,7 @@
BinderFeatures, ExceptionCode, Result as BinderResult, Status as BinderStatus, Strong,
ThreadState,
};
-use anyhow::{Context, Result};
+use anyhow::{anyhow, Context, Result};
use keystore2::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader, utils::watchdog as wd};
use rusqlite::{
params, Connection, OptionalExtension, Transaction, TransactionBehavior, NO_PARAMS,
@@ -30,25 +30,42 @@
use std::{
collections::HashSet,
path::{Path, PathBuf},
+ sync::Once,
};
+static DB_SET_WAL_MODE: Once = Once::new();
+
struct DB {
conn: Connection,
}
impl DB {
fn new(db_file: &Path) -> Result<Self> {
+ DB_SET_WAL_MODE.call_once(|| {
+ log::info!("Setting VpnProfileStore database to WAL mode first time since boot.");
+ Self::set_wal_mode(&db_file).expect("In vpnprofilestore: Could not set WAL mode.");
+ });
+
let mut db = Self {
conn: Connection::open(db_file).context("Failed to initialize SQLite connection.")?,
};
- // On busy fail Immediately. It is unlikely to succeed given a bug in sqlite.
- db.conn.busy_handler(None).context("Failed to set busy handler.")?;
-
db.init_tables().context("Trying to initialize vpnstore db.")?;
Ok(db)
}
+ fn set_wal_mode(db_file: &Path) -> Result<()> {
+ let conn = Connection::open(db_file)
+ .context("In VpnProfileStore set_wal_mode: Failed to open DB.")?;
+ let mode: String = conn
+ .pragma_update_and_check(None, "journal_mode", &"WAL", |row| row.get(0))
+ .context("In VpnProfileStore set_wal_mode: Failed to set journal_mode")?;
+ match mode.as_str() {
+ "wal" => Ok(()),
+ _ => Err(anyhow!("Unable to set WAL mode, db is still in {} mode.", mode)),
+ }
+ }
+
fn with_transaction<T, F>(&mut self, behavior: TransactionBehavior, f: F) -> Result<T>
where
F: Fn(&Transaction) -> Result<T>,
@@ -470,6 +487,9 @@
const PROFILE_COUNT: u32 = 5000u32;
const PROFILE_DB_COUNT: u32 = 5000u32;
+ let mode: String = db.conn.pragma_query_value(None, "journal_mode", |row| row.get(0))?;
+ assert_eq!(mode, "wal");
+
let mut actual_profile_count = PROFILE_COUNT;
// First insert PROFILE_COUNT profiles.
for count in 0..PROFILE_COUNT {