DO NOT MERGE - Merge ab/7272582
Bug: 190855093
Change-Id: I477816eed7b78ae7768be4eafec685d44f24f4cc
diff --git a/keystore/keystore_cli_v2.cpp b/keystore/keystore_cli_v2.cpp
index 6e45ee2..43f72a9 100644
--- a/keystore/keystore_cli_v2.cpp
+++ b/keystore/keystore_cli_v2.cpp
@@ -56,7 +56,7 @@
keymint::AuthorizationSet parameters;
};
-constexpr const char keystore2_service_name[] = "android.system.keystore2";
+constexpr const char keystore2_service_name[] = "android.system.keystore2.IKeystoreService/default";
int unwrapError(const ndk::ScopedAStatus& status) {
if (status.isOk()) return 0;
@@ -769,7 +769,7 @@
sec_level->generateKey(keyDescriptor(name), {} /* attestationKey */, params.vector_data(),
0 /* flags */, {} /* entropy */, &keyMetadata);
- if (rc.isOk()) {
+ if (!rc.isOk()) {
std::cerr << "GenerateKey failed: " << rc.getDescription() << std::endl;
return unwrapError(rc);
}
diff --git a/keystore/tests/Android.bp b/keystore/tests/Android.bp
index 249cb77..39601eb 100644
--- a/keystore/tests/Android.bp
+++ b/keystore/tests/Android.bp
@@ -62,9 +62,9 @@
"libgtest_main",
"libutils",
"liblog",
+ "android.security.apc-ndk_platform",
],
shared_libs: [
- "android.security.apc-ndk_platform",
"libbinder_ndk",
],
sanitize: {
diff --git a/keystore2/Android.bp b/keystore2/Android.bp
index af177be..32493c0 100644
--- a/keystore2/Android.bp
+++ b/keystore2/Android.bp
@@ -50,15 +50,21 @@
"liblazy_static",
"liblibc",
"liblibsqlite3_sys",
+ "liblog_event_list",
"liblog_rust",
"librand",
"librusqlite",
"libstatslog_rust",
+ "libstatslog_rust_header",
+ "libstatspull_rust",
"libthiserror",
],
shared_libs: [
"libcutils",
],
+ features: [
+ "watchdog",
+ ],
}
rust_library {
@@ -73,7 +79,7 @@
rustlibs: [
"liblog_rust",
"librand",
- ]
+ ],
}
rust_test {
@@ -88,6 +94,10 @@
"libkeystore2_test_utils",
"libnix",
],
+ // The test should always include watchdog.
+ features: [
+ "watchdog",
+ ],
}
rust_binary {
@@ -102,5 +112,39 @@
],
init_rc: ["keystore2.rc"],
+ // In S, keystore2 is the only process using dynamically linked Rust from
+ // /system. As a result, the usual savings from sharing libraries don't
+ // apply.
+ // Remove `prefer_rlib: true` once we have several processes, once a space
+ // calculation shows net RAM savings, or once we have automatic variant
+ // selection available in the build system.
+ prefer_rlib: true,
+
+ // TODO(b/187412695)
+ // This is a hack to work around the build system not installing
+ // dynamic dependencies of rlibs to the device. This section should
+ // be removed once that works correctly.
+ shared_libs: [
+ "android.hardware.confirmationui@1.0",
+ "android.hardware.security.sharedsecret-V1-ndk_platform",
+ "android.security.compat-ndk_platform",
+ "libc",
+ "libdl_android",
+ "libdl",
+ "libandroidicu",
+ "libkeymint",
+ "libkeystore2_aaid",
+ "libkeystore2_apc_compat",
+ "libkeystore2_crypto",
+ "libkeystore2_vintf_cpp",
+ "libkm_compat_service",
+ "libkm_compat",
+ "libm",
+ "libstatspull",
+ "libstatssocket",
+ ],
+
vintf_fragments: ["android.system.keystore2-service.xml"],
+
+ required: ["keystore_cli_v2"],
}
diff --git a/keystore2/aaid/Android.bp b/keystore2/aaid/Android.bp
index d27fdf6..c04ce51 100644
--- a/keystore2/aaid/Android.bp
+++ b/keystore2/aaid/Android.bp
@@ -39,8 +39,8 @@
bindgen_flags: [
"--size_t-is-usize",
- "--whitelist-function=aaid_keystore_attestation_id",
- "--whitelist-var=KEY_ATTESTATION_APPLICATION_ID_MAX_SIZE",
+ "--allowlist-function=aaid_keystore_attestation_id",
+ "--allowlist-var=KEY_ATTESTATION_APPLICATION_ID_MAX_SIZE",
],
}
diff --git a/keystore2/aidl/android/security/authorization/IKeystoreAuthorization.aidl b/keystore2/aidl/android/security/authorization/IKeystoreAuthorization.aidl
index 01616b1..3f33431 100644
--- a/keystore2/aidl/android/security/authorization/IKeystoreAuthorization.aidl
+++ b/keystore2/aidl/android/security/authorization/IKeystoreAuthorization.aidl
@@ -27,7 +27,6 @@
*/
@SensitiveData
interface IKeystoreAuthorization {
-
/**
* Allows the Android authenticators to hand over an auth token to Keystore.
* Callers require 'AddAuth' permission.
@@ -58,9 +57,29 @@
* @param userId - Android user id
*
* @param password - synthetic password derived by the user denoted by the user id
+ *
+ * @param unlockingSids - list of biometric SIDs for this user. This will be null when
+ * lockScreenEvent is UNLOCK, but may be non-null when
+ * lockScreenEvent is LOCK.
+ *
+ * When the device is unlocked, Keystore stores in memory
+ * a super-encryption key that protects UNLOCKED_DEVICE_REQUIRED
+ * keys; this key is wiped from memory when the device is locked.
+ *
+ * If unlockingSids is non-empty on lock, then before the
+ * super-encryption key is wiped from memory, a copy of it
+ * is stored in memory encrypted with a fresh AES key.
+ * This key is then imported into KM, tagged such that it can be
+ * used given a valid, recent auth token for any of the
+ * unlockingSids.
+ *
+ * Then, when the device is unlocked again, if a suitable auth token
+ * has been sent to keystore, it is used to recover the
+ * super-encryption key, so that UNLOCKED_DEVICE_REQUIRED keys can
+ * be used once again.
*/
void onLockScreenEvent(in LockScreenEvent lockScreenEvent, in int userId,
- in @nullable byte[] password);
+ in @nullable byte[] password, in @nullable long[] unlockingSids);
/**
* Allows Credstore to retrieve a HardwareAuthToken and a TimestampToken.
diff --git a/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl b/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
index 21ddd9b..5f91e79 100644
--- a/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
+++ b/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
@@ -15,6 +15,7 @@
package android.security.maintenance;
import android.system.keystore2.Domain;
+import android.system.keystore2.KeyDescriptor;
import android.security.maintenance.UserState;
/**
@@ -107,4 +108,19 @@
* `ResponseCode::SYSTEM_ERROR` - if an unexpected error occurred.
*/
void onDeviceOffBody();
+
+ /**
+ * Migrate a key from one namespace to another. The caller must have use, grant, and delete
+ * permissions on the source namespace and rebind permissions on the destination namespace.
+ * The source may be specified by Domain::APP, Domain::SELINUX, or Domain::KEY_ID. The target
+ * may be specified by Domain::APP or Domain::SELINUX.
+ *
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - If the caller lacks any of the required permissions.
+ * `ResponseCode::KEY_NOT_FOUND` - If the source did not exist.
+ * `ResponseCode::INVALID_ARGUMENT` - If the target exists or if any of the above mentioned
+ * requirements for the domain parameter are not met.
+ * `ResponseCode::SYSTEM_ERROR` - An unexpected system error occurred.
+ */
+ void migrateKeyNamespace(in KeyDescriptor source, in KeyDescriptor destination);
}
diff --git a/keystore2/apc_compat/Android.bp b/keystore2/apc_compat/Android.bp
index 9519c8e..bf21675 100644
--- a/keystore2/apc_compat/Android.bp
+++ b/keystore2/apc_compat/Android.bp
@@ -41,12 +41,12 @@
source_stem: "bindings",
bindgen_flags: [
- "--whitelist-function=tryGetUserConfirmationService",
- "--whitelist-function=promptUserConfirmation",
- "--whitelist-function=abortUserConfirmation",
- "--whitelist-function=closeUserConfirmationService",
- "--whitelist-var=INVALID_SERVICE_HANDLE",
- "--whitelist-var=APC_COMPAT_.*",
+ "--allowlist-function=tryGetUserConfirmationService",
+ "--allowlist-function=promptUserConfirmation",
+ "--allowlist-function=abortUserConfirmation",
+ "--allowlist-function=closeUserConfirmationService",
+ "--allowlist-var=INVALID_SERVICE_HANDLE",
+ "--allowlist-var=APC_COMPAT_.*",
],
}
diff --git a/keystore2/selinux/Android.bp b/keystore2/selinux/Android.bp
index 18063d3..254f95e 100644
--- a/keystore2/selinux/Android.bp
+++ b/keystore2/selinux/Android.bp
@@ -34,6 +34,7 @@
rustlibs: [
"libanyhow",
+ "liblazy_static",
"liblog_rust",
"libselinux_bindgen",
"libthiserror",
@@ -56,8 +57,30 @@
rustlibs: [
"libandroid_logger",
"libanyhow",
+ "liblazy_static",
"liblog_rust",
"libselinux_bindgen",
"libthiserror",
],
}
+
+rust_test {
+ name: "keystore2_selinux_concurrency_test",
+ srcs: [
+ "src/concurrency_test.rs",
+ ],
+ crate_name: "keystore2_selinux_concurrency_test",
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+
+ rustlibs: [
+ "libandroid_logger",
+ "libanyhow",
+ "libkeystore2_selinux",
+ "liblazy_static",
+ "liblog_rust",
+ "libnix",
+ "libnum_cpus",
+ "libthiserror",
+ ],
+}
diff --git a/keystore2/selinux/src/concurrency_test.rs b/keystore2/selinux/src/concurrency_test.rs
new file mode 100644
index 0000000..a5d2df2
--- /dev/null
+++ b/keystore2/selinux/src/concurrency_test.rs
@@ -0,0 +1,190 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use keystore2_selinux::{check_access, Context};
+use nix::sched::sched_setaffinity;
+use nix::sched::CpuSet;
+use nix::unistd::getpid;
+use std::thread;
+use std::{
+ sync::{atomic::AtomicU8, atomic::Ordering, Arc},
+ time::{Duration, Instant},
+};
+
+#[derive(Clone, Copy)]
+struct CatCount(u8, u8, u8, u8);
+
+impl CatCount {
+ fn next(&mut self) -> CatCount {
+ let result = *self;
+ if self.3 == 255 {
+ if self.2 == 254 {
+ if self.1 == 253 {
+ if self.0 == 252 {
+ self.0 = 255;
+ }
+ self.0 += 1;
+ self.1 = self.0;
+ }
+ self.1 += 1;
+ self.2 = self.1;
+ }
+ self.2 += 1;
+ self.3 = self.2;
+ }
+ self.3 += 1;
+ result
+ }
+
+ fn make_string(&self) -> String {
+ format!("c{},c{},c{},c{}", self.0, self.1, self.2, self.3)
+ }
+}
+
+impl Default for CatCount {
+ fn default() -> Self {
+ Self(0, 1, 2, 3)
+ }
+}
+
+/// This test calls selinux_check_access concurrently causing access vector cache misses
+/// in libselinux avc. The test then checks if any of the threads fails to report back
+/// after a burst of access checks. The purpose of the test is to draw out a specific
+/// access vector cache corruption that sends a calling thread into an infinite loop.
+/// This was observed when keystore2 used libselinux concurrently in a non thread safe
+/// way. See b/184006658.
+#[test]
+fn test_concurrent_check_access() {
+ android_logger::init_once(
+ android_logger::Config::default()
+ .with_tag("keystore2_selinux_concurrency_test")
+ .with_min_level(log::Level::Debug),
+ );
+
+ let cpus = num_cpus::get();
+ let turnpike = Arc::new(AtomicU8::new(0));
+ let complete_count = Arc::new(AtomicU8::new(0));
+ let mut threads: Vec<thread::JoinHandle<()>> = Vec::new();
+
+ for i in 0..cpus {
+ log::info!("Spawning thread {}", i);
+ let turnpike_clone = turnpike.clone();
+ let complete_count_clone = complete_count.clone();
+ threads.push(thread::spawn(move || {
+ let mut cpu_set = CpuSet::new();
+ cpu_set.set(i).unwrap();
+ sched_setaffinity(getpid(), &cpu_set).unwrap();
+ let mut cat_count: CatCount = Default::default();
+
+ log::info!("Thread 0 reached turnpike");
+ loop {
+ turnpike_clone.fetch_add(1, Ordering::Relaxed);
+ loop {
+ match turnpike_clone.load(Ordering::Relaxed) {
+ 0 => break,
+ 255 => return,
+ _ => {}
+ }
+ }
+
+ for _ in 0..250 {
+ let (tctx, sctx, perm, class) = (
+ Context::new("u:object_r:keystore:s0").unwrap(),
+ Context::new(&format!(
+ "u:r:untrusted_app:s0:{}",
+ cat_count.next().make_string()
+ ))
+ .unwrap(),
+ "use",
+ "keystore2_key",
+ );
+
+ check_access(&sctx, &tctx, class, perm).unwrap();
+ }
+
+ complete_count_clone.fetch_add(1, Ordering::Relaxed);
+ while complete_count_clone.load(Ordering::Relaxed) as usize != cpus {
+ thread::sleep(Duration::from_millis(5));
+ }
+ }
+ }));
+ }
+
+ let mut i = 0;
+ let run_time = Instant::now();
+
+ loop {
+ const TEST_ITERATIONS: u32 = 500;
+ const MAX_SLEEPS: u64 = 500;
+ const SLEEP_MILLISECONDS: u64 = 5;
+ let mut sleep_count: u64 = 0;
+ while turnpike.load(Ordering::Relaxed) as usize != cpus {
+ thread::sleep(Duration::from_millis(SLEEP_MILLISECONDS));
+ sleep_count += 1;
+ assert!(
+ sleep_count < MAX_SLEEPS,
+ "Waited too long to go ready on iteration {}, only {} are ready",
+ i,
+ turnpike.load(Ordering::Relaxed)
+ );
+ }
+
+ if i % 100 == 0 {
+ let elapsed = run_time.elapsed().as_secs();
+ println!("{:02}:{:02}: Iteration {}", elapsed / 60, elapsed % 60, i);
+ }
+
+ // Give the threads some time to reach and spin on the turn pike.
+ assert_eq!(turnpike.load(Ordering::Relaxed) as usize, cpus, "i = {}", i);
+ if i >= TEST_ITERATIONS {
+ turnpike.store(255, Ordering::Relaxed);
+ break;
+ }
+
+ // Now go.
+ complete_count.store(0, Ordering::Relaxed);
+ turnpike.store(0, Ordering::Relaxed);
+ i += 1;
+
+ // Wait for them to all complete.
+ sleep_count = 0;
+ while complete_count.load(Ordering::Relaxed) as usize != cpus {
+ thread::sleep(Duration::from_millis(SLEEP_MILLISECONDS));
+ sleep_count += 1;
+ if sleep_count >= MAX_SLEEPS {
+ // Enable the following block to park the thread to allow attaching a debugger.
+ if false {
+ println!(
+ "Waited {} seconds and we seem stuck. Going to sleep forever.",
+ (MAX_SLEEPS * SLEEP_MILLISECONDS) as f32 / 1000.0
+ );
+ loop {
+ thread::park();
+ }
+ } else {
+ assert!(
+ sleep_count < MAX_SLEEPS,
+ "Waited too long to complete on iteration {}, only {} are complete",
+ i,
+ complete_count.load(Ordering::Relaxed)
+ );
+ }
+ }
+ }
+ }
+
+ for t in threads {
+ t.join().unwrap();
+ }
+}
diff --git a/keystore2/selinux/src/lib.rs b/keystore2/selinux/src/lib.rs
index cc707e7..5197cf6 100644
--- a/keystore2/selinux/src/lib.rs
+++ b/keystore2/selinux/src/lib.rs
@@ -20,6 +20,13 @@
//! * selabel_lookup for the keystore2_key backend.
//! And it provides an owning wrapper around context strings `Context`.
+use anyhow::Context as AnyhowContext;
+use anyhow::{anyhow, Result};
+use lazy_static::lazy_static;
+pub use selinux::pid_t;
+use selinux::SELABEL_CTX_ANDROID_KEYSTORE2_KEY;
+use selinux::SELINUX_CB_LOG;
+use selinux_bindgen as selinux;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
@@ -29,18 +36,18 @@
use std::ptr;
use std::sync;
-use selinux_bindgen as selinux;
-
-use anyhow::Context as AnyhowContext;
-use anyhow::{anyhow, Result};
-
-use selinux::SELABEL_CTX_ANDROID_KEYSTORE2_KEY;
-use selinux::SELINUX_CB_LOG;
-
-pub use selinux::pid_t;
-
static SELINUX_LOG_INIT: sync::Once = sync::Once::new();
+lazy_static! {
+ /// `selinux_check_access` is only thread safe if avc_init was called with lock callbacks.
+ /// However, avc_init is deprecated and not exported by androids version of libselinux.
+ /// `selinux_set_callbacks` does not allow setting lock callbacks. So the only option
+ /// that remains right now is to put a big lock around calls into libselinux.
+ /// TODO b/188079221 It should suffice to protect `selinux_check_access` but until we are
+ /// certain of that, we leave the extra locks in place
+ static ref LIB_SELINUX_LOCK: sync::Mutex<()> = Default::default();
+}
+
fn redirect_selinux_logs_to_logcat() {
// `selinux_set_callback` assigns the static lifetime function pointer
// `selinux_log_callback` to a static lifetime variable.
@@ -164,6 +171,8 @@
/// `selinux_android_keystore2_key_context_handle`.
pub fn new() -> Result<Self> {
init_logger_once();
+ let _lock = LIB_SELINUX_LOCK.lock().unwrap();
+
let handle = unsafe { selinux::selinux_android_keystore2_key_context_handle() };
if handle.is_null() {
return Err(anyhow!(Error::sys("Failed to open KeystoreKeyBackend")));
@@ -192,6 +201,8 @@
match unsafe {
// No need to initialize the logger here because it cannot run unless
// KeystoreKeyBackend::new has run.
+ let _lock = LIB_SELINUX_LOCK.lock().unwrap();
+
selinux::selabel_lookup(self.handle, &mut con, c_key.as_ptr(), Self::BACKEND_TYPE)
} {
0 => {
@@ -219,6 +230,8 @@
/// * Err(io::Error::last_os_error()) if getcon failed.
pub fn getcon() -> Result<Context> {
init_logger_once();
+ let _lock = LIB_SELINUX_LOCK.lock().unwrap();
+
let mut con: *mut c_char = ptr::null_mut();
match unsafe { selinux::getcon(&mut con) } {
0 => {
@@ -241,6 +254,8 @@
/// * Err(io::Error::last_os_error()) if getpidcon failed.
pub fn getpidcon(pid: selinux::pid_t) -> Result<Context> {
init_logger_once();
+ let _lock = LIB_SELINUX_LOCK.lock().unwrap();
+
let mut con: *mut c_char = ptr::null_mut();
match unsafe { selinux::getpidcon(pid, &mut con) } {
0 => {
@@ -267,6 +282,7 @@
/// the access check.
pub fn check_access(source: &CStr, target: &CStr, tclass: &str, perm: &str) -> Result<()> {
init_logger_once();
+
let c_tclass = CString::new(tclass).with_context(|| {
format!("check_access: Failed to convert tclass \"{}\" to CString.", tclass)
})?;
@@ -275,6 +291,8 @@
})?;
match unsafe {
+ let _lock = LIB_SELINUX_LOCK.lock().unwrap();
+
selinux::selinux_check_access(
source.as_ptr(),
target.as_ptr(),
diff --git a/keystore2/src/apc.rs b/keystore2/src/apc.rs
index f8259ea..0096686 100644
--- a/keystore2/src/apc.rs
+++ b/keystore2/src/apc.rs
@@ -21,17 +21,17 @@
sync::{mpsc::Sender, Arc, Mutex},
};
-use crate::utils::{compat_2_response_code, ui_opts_2_compat};
+use crate::utils::{compat_2_response_code, ui_opts_2_compat, watchdog as wd};
use android_security_apc::aidl::android::security::apc::{
IConfirmationCallback::IConfirmationCallback,
IProtectedConfirmation::{BnProtectedConfirmation, IProtectedConfirmation},
ResponseCode::ResponseCode,
};
use android_security_apc::binder::{
- ExceptionCode, Interface, Result as BinderResult, SpIBinder, Status as BinderStatus, Strong,
+ BinderFeatures, ExceptionCode, Interface, Result as BinderResult, SpIBinder,
+ Status as BinderStatus, Strong, ThreadState,
};
use anyhow::{Context, Result};
-use binder::{IBinderInternal, ThreadState};
use keystore2_apc_compat::ApcHal;
use keystore2_selinux as selinux;
use std::time::{Duration, Instant};
@@ -203,11 +203,10 @@
pub fn new_native_binder(
confirmation_token_sender: Sender<Vec<u8>>,
) -> Result<Strong<dyn IProtectedConfirmation>> {
- let result = BnProtectedConfirmation::new_binder(Self {
- state: Arc::new(Mutex::new(ApcState::new(confirmation_token_sender))),
- });
- result.as_binder().set_requesting_sid(true);
- Ok(result)
+ Ok(BnProtectedConfirmation::new_binder(
+ Self { state: Arc::new(Mutex::new(ApcState::new(confirmation_token_sender))) },
+ BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
+ ))
}
fn result(
@@ -268,7 +267,7 @@
fn present_prompt(
&self,
- listener: &dyn IConfirmationCallback,
+ listener: &binder::Strong<dyn IConfirmationCallback>,
prompt_text: &str,
extra_data: &[u8],
locale: &str,
@@ -327,7 +326,7 @@
Ok(())
}
- fn cancel_prompt(&self, listener: &dyn IConfirmationCallback) -> Result<()> {
+ fn cancel_prompt(&self, listener: &binder::Strong<dyn IConfirmationCallback>) -> Result<()> {
let mut state = self.state.lock().unwrap();
let hal = match &mut state.session {
None => {
@@ -358,21 +357,28 @@
impl IProtectedConfirmation for ApcManager {
fn presentPrompt(
&self,
- listener: &dyn IConfirmationCallback,
+ listener: &binder::Strong<dyn IConfirmationCallback>,
prompt_text: &str,
extra_data: &[u8],
locale: &str,
ui_option_flags: i32,
) -> BinderResult<()> {
+ // presentPrompt can take more time than other operations.
+ let _wp = wd::watch_millis("IProtectedConfirmation::presentPrompt", 3000);
map_or_log_err(
self.present_prompt(listener, prompt_text, extra_data, locale, ui_option_flags),
Ok,
)
}
- fn cancelPrompt(&self, listener: &dyn IConfirmationCallback) -> BinderResult<()> {
+ fn cancelPrompt(
+ &self,
+ listener: &binder::Strong<dyn IConfirmationCallback>,
+ ) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IProtectedConfirmation::cancelPrompt", 500);
map_or_log_err(self.cancel_prompt(listener), Ok)
}
fn isSupported(&self) -> BinderResult<bool> {
+ let _wp = wd::watch_millis("IProtectedConfirmation::isSupported", 500);
map_or_log_err(Self::is_supported(), Ok)
}
}
diff --git a/keystore2/src/async_task.rs b/keystore2/src/async_task.rs
index 4d0034a..45f0274 100644
--- a/keystore2/src/async_task.rs
+++ b/keystore2/src/async_task.rs
@@ -19,6 +19,7 @@
//! processed all tasks before it terminates.
//! Note that low priority tasks are processed only when the high priority queue is empty.
+use crate::utils::watchdog as wd;
use std::{any::Any, any::TypeId, time::Duration};
use std::{
collections::{HashMap, VecDeque},
@@ -170,6 +171,7 @@
{
let (ref condvar, ref state) = *self.state;
let mut state = state.lock().unwrap();
+
if hi_prio {
state.hi_prio_req.push_back(Box::new(f));
} else {
@@ -239,11 +241,14 @@
// Now that the lock has been dropped, perform the action.
match action {
Action::QueuedFn(f) => {
+ let _wd = wd::watch_millis("async_task thread: calling queued fn", 500);
f(&mut shelf);
done_idle = false;
}
Action::IdleFns(idle_fns) => {
for idle_fn in idle_fns {
+ let _wd =
+ wd::watch_millis("async_task thread: calling idle_fn", 500);
idle_fn(&mut shelf);
}
done_idle = true;
diff --git a/keystore2/src/audit_log.rs b/keystore2/src/audit_log.rs
new file mode 100644
index 0000000..3d7d26e
--- /dev/null
+++ b/keystore2/src/audit_log.rs
@@ -0,0 +1,86 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module implements functions to log audit events to binary security log buffer for NIAP
+//! compliance.
+
+use crate::globals::LOGS_HANDLER;
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, KeyDescriptor::KeyDescriptor,
+};
+use libc::uid_t;
+use log_event_list::{LogContext, LogIdSecurity};
+
+const TAG_KEY_GENERATED: u32 = 210024;
+const TAG_KEY_IMPORTED: u32 = 210025;
+const TAG_KEY_DESTROYED: u32 = 210026;
+const TAG_KEY_INTEGRITY_VIOLATION: u32 = 210032;
+
+const FLAG_NAMESPACE: i64 = 0x80000000;
+
+/// Encode key owner as either uid or namespace with a flag.
+fn key_owner(domain: Domain, nspace: i64, uid: i32) -> i32 {
+ match domain {
+ Domain::APP => uid,
+ Domain::SELINUX => (nspace | FLAG_NAMESPACE) as i32,
+ _ => {
+ log::info!("Not logging audit event for key with unexpected domain");
+ 0
+ }
+ }
+}
+
+/// Logs key generation event to NIAP audit log.
+pub fn log_key_generated(key: &KeyDescriptor, calling_app: uid_t, success: bool) {
+ log_key_event(TAG_KEY_GENERATED, key, calling_app, success);
+}
+
+/// Logs key import event to NIAP audit log.
+pub fn log_key_imported(key: &KeyDescriptor, calling_app: uid_t, success: bool) {
+ log_key_event(TAG_KEY_IMPORTED, key, calling_app, success);
+}
+
+/// Logs key deletion event to NIAP audit log.
+pub fn log_key_deleted(key: &KeyDescriptor, calling_app: uid_t, success: bool) {
+ log_key_event(TAG_KEY_DESTROYED, key, calling_app, success);
+}
+
+/// Logs key integrity violation to NIAP audit log.
+pub fn log_key_integrity_violation(key: &KeyDescriptor) {
+ with_log_context(TAG_KEY_INTEGRITY_VIOLATION, |ctx| {
+ let owner = key_owner(key.domain, key.nspace, key.nspace as i32);
+ ctx.append_str(key.alias.as_ref().map_or("none", String::as_str)).append_i32(owner)
+ })
+}
+
+fn log_key_event(tag: u32, key: &KeyDescriptor, calling_app: uid_t, success: bool) {
+ with_log_context(tag, |ctx| {
+ let owner = key_owner(key.domain, key.nspace, calling_app as i32);
+ ctx.append_i32(if success { 1 } else { 0 })
+ .append_str(key.alias.as_ref().map_or("none", String::as_str))
+ .append_i32(owner)
+ })
+}
+
+fn with_log_context<F>(tag: u32, f: F)
+where
+ F: Fn(LogContext) -> LogContext,
+{
+ if let Some(ctx) = LogContext::new(LogIdSecurity, tag) {
+ let event = f(ctx);
+ LOGS_HANDLER.queue_lo(move |_| {
+ event.write();
+ });
+ }
+}
diff --git a/keystore2/src/authorization.rs b/keystore2/src/authorization.rs
index 06b5598..777089f 100644
--- a/keystore2/src/authorization.rs
+++ b/keystore2/src/authorization.rs
@@ -18,11 +18,11 @@
use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_MIGRATOR};
use crate::permission::KeystorePerm;
use crate::super_key::UserState;
-use crate::utils::check_keystore_permission;
+use crate::utils::{check_keystore_permission, watchdog as wd};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
HardwareAuthToken::HardwareAuthToken,
};
-use android_security_authorization::binder::{ExceptionCode, Interface, Result as BinderResult,
+use android_security_authorization::binder::{BinderFeatures,ExceptionCode, Interface, Result as BinderResult,
Strong, Status as BinderStatus};
use android_security_authorization::aidl::android::security::authorization::{
IKeystoreAuthorization::BnKeystoreAuthorization, IKeystoreAuthorization::IKeystoreAuthorization,
@@ -32,7 +32,6 @@
use android_system_keystore2::aidl::android::system::keystore2::{
ResponseCode::ResponseCode as KsResponseCode };
use anyhow::{Context, Result};
-use binder::IBinderInternal;
use keystore2_crypto::Password;
use keystore2_selinux as selinux;
@@ -112,16 +111,17 @@
impl AuthorizationManager {
/// Create a new instance of Keystore Authorization service.
pub fn new_native_binder() -> Result<Strong<dyn IKeystoreAuthorization>> {
- let result = BnKeystoreAuthorization::new_binder(Self);
- result.as_binder().set_requesting_sid(true);
- Ok(result)
+ Ok(BnKeystoreAuthorization::new_binder(
+ Self,
+ BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
+ ))
}
fn add_auth_token(&self, auth_token: &HardwareAuthToken) -> Result<()> {
- //check keystore permission
+ // Check keystore permission.
check_keystore_permission(KeystorePerm::add_auth()).context("In add_auth_token.")?;
- ENFORCEMENTS.add_auth_token(auth_token.clone())?;
+ ENFORCEMENTS.add_auth_token(auth_token.clone());
Ok(())
}
@@ -130,11 +130,19 @@
lock_screen_event: LockScreenEvent,
user_id: i32,
password: Option<Password>,
+ unlocking_sids: Option<&[i64]>,
) -> Result<()> {
+ log::info!(
+ "on_lock_screen_event({:?}, user_id={:?}, password.is_some()={}, unlocking_sids={:?})",
+ lock_screen_event,
+ user_id,
+ password.is_some(),
+ unlocking_sids
+ );
match (lock_screen_event, password) {
(LockScreenEvent::UNLOCK, Some(password)) => {
- //This corresponds to the unlock() method in legacy keystore API.
- //check permission
+ // This corresponds to the unlock() method in legacy keystore API.
+ // check permission
check_keystore_permission(KeystorePerm::unlock())
.context("In on_lock_screen_event: Unlock with password.")?;
ENFORCEMENTS.set_device_locked(user_id, false);
@@ -172,14 +180,23 @@
check_keystore_permission(KeystorePerm::unlock())
.context("In on_lock_screen_event: Unlock.")?;
ENFORCEMENTS.set_device_locked(user_id, false);
+ DB.with(|db| {
+ SUPER_KEY.try_unlock_user_with_biometric(&mut db.borrow_mut(), user_id as u32)
+ })
+ .context("In on_lock_screen_event: try_unlock_user_with_biometric failed")?;
Ok(())
}
(LockScreenEvent::LOCK, None) => {
check_keystore_permission(KeystorePerm::lock())
.context("In on_lock_screen_event: Lock")?;
ENFORCEMENTS.set_device_locked(user_id, true);
- SUPER_KEY.lock_screen_lock_bound_key(user_id as u32);
-
+ DB.with(|db| {
+ SUPER_KEY.lock_screen_lock_bound_key(
+ &mut db.borrow_mut(),
+ user_id as u32,
+ unlocking_sids.unwrap_or(&[]),
+ );
+ });
Ok(())
}
_ => {
@@ -201,7 +218,7 @@
check_keystore_permission(KeystorePerm::get_auth_token())
.context("In get_auth_tokens_for_credstore.")?;
- // if the challenge is zero, return error
+ // If the challenge is zero, return error
if challenge == 0 {
return Err(Error::Rc(ResponseCode::INVALID_ARGUMENT))
.context("In get_auth_tokens_for_credstore. Challenge can not be zero.");
@@ -217,6 +234,7 @@
impl IKeystoreAuthorization for AuthorizationManager {
fn addAuthToken(&self, auth_token: &HardwareAuthToken) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IKeystoreAuthorization::addAuthToken", 500);
map_or_log_err(self.add_auth_token(auth_token), Ok)
}
@@ -225,9 +243,19 @@
lock_screen_event: LockScreenEvent,
user_id: i32,
password: Option<&[u8]>,
+ unlocking_sids: Option<&[i64]>,
) -> BinderResult<()> {
+ let _wp =
+ wd::watch_millis_with("IKeystoreAuthorization::onLockScreenEvent", 500, move || {
+ format!("lock event: {}", lock_screen_event.0)
+ });
map_or_log_err(
- self.on_lock_screen_event(lock_screen_event, user_id, password.map(|pw| pw.into())),
+ self.on_lock_screen_event(
+ lock_screen_event,
+ user_id,
+ password.map(|pw| pw.into()),
+ unlocking_sids,
+ ),
Ok,
)
}
@@ -238,6 +266,7 @@
secure_user_id: i64,
auth_token_max_age_millis: i64,
) -> binder::public_api::Result<AuthorizationTokens> {
+ let _wp = wd::watch_millis("IKeystoreAuthorization::getAuthTokensForCredStore", 500);
map_or_log_err(
self.get_auth_tokens_for_credstore(
challenge,
diff --git a/keystore2/src/boot_level_keys.rs b/keystore2/src/boot_level_keys.rs
new file mode 100644
index 0000000..1110caf
--- /dev/null
+++ b/keystore2/src/boot_level_keys.rs
@@ -0,0 +1,284 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Offer keys based on the "boot level" for superencryption.
+
+use crate::{
+ database::{KeyType, KeystoreDB},
+ key_parameter::KeyParameterValue,
+ raw_device::KeyMintDevice,
+};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ Algorithm::Algorithm, Digest::Digest, KeyParameter::KeyParameter as KmKeyParameter,
+ KeyParameterValue::KeyParameterValue as KmKeyParameterValue, KeyPurpose::KeyPurpose,
+ SecurityLevel::SecurityLevel, Tag::Tag,
+};
+use anyhow::{Context, Result};
+use keystore2_crypto::{hkdf_expand, ZVec, AES_256_KEY_LENGTH};
+use std::{collections::VecDeque, convert::TryFrom};
+
+fn get_preferred_km_instance_for_level_zero_key() -> Result<KeyMintDevice> {
+ let tee = KeyMintDevice::get(SecurityLevel::TRUSTED_ENVIRONMENT)
+ .context("In get_preferred_km_instance_for_level_zero_key: Get TEE instance failed.")?;
+ if tee.version() >= KeyMintDevice::KEY_MASTER_V4_1 {
+ Ok(tee)
+ } else {
+ match KeyMintDevice::get_or_none(SecurityLevel::STRONGBOX).context(
+ "In get_preferred_km_instance_for_level_zero_key: Get Strongbox instance failed.",
+ )? {
+ Some(strongbox) if strongbox.version() >= KeyMintDevice::KEY_MASTER_V4_1 => {
+ Ok(strongbox)
+ }
+ _ => Ok(tee),
+ }
+ }
+}
+
+/// This is not thread safe; caller must hold a lock before calling.
+/// In practice the caller is SuperKeyManager and the lock is the
+/// Mutex on its internal state.
+pub fn get_level_zero_key(db: &mut KeystoreDB) -> Result<ZVec> {
+ let km_dev = get_preferred_km_instance_for_level_zero_key()
+ .context("In get_level_zero_key: get preferred KM instance failed")?;
+
+ let key_desc = KeyMintDevice::internal_descriptor("boot_level_key".to_string());
+ let mut params = vec![
+ KeyParameterValue::Algorithm(Algorithm::HMAC).into(),
+ KeyParameterValue::Digest(Digest::SHA_2_256).into(),
+ KeyParameterValue::KeySize(256).into(),
+ KeyParameterValue::MinMacLength(256).into(),
+ KeyParameterValue::KeyPurpose(KeyPurpose::SIGN).into(),
+ KeyParameterValue::NoAuthRequired.into(),
+ ];
+
+ let has_early_boot_only = km_dev.version() >= KeyMintDevice::KEY_MASTER_V4_1;
+
+ if has_early_boot_only {
+ params.push(KeyParameterValue::EarlyBootOnly.into());
+ } else {
+ params.push(KeyParameterValue::MaxUsesPerBoot(1).into())
+ }
+
+ let (key_id_guard, key_entry) = km_dev
+ .lookup_or_generate_key(db, &key_desc, KeyType::Client, ¶ms, |key_characteristics| {
+ key_characteristics.iter().any(|kc| {
+ if kc.securityLevel == km_dev.security_level() {
+ kc.authorizations.iter().any(|a| {
+ matches!(
+ (has_early_boot_only, a),
+ (
+ true,
+ KmKeyParameter {
+ tag: Tag::EARLY_BOOT_ONLY,
+ value: KmKeyParameterValue::BoolValue(true)
+ }
+ ) | (
+ false,
+ KmKeyParameter {
+ tag: Tag::MAX_USES_PER_BOOT,
+ value: KmKeyParameterValue::Integer(1)
+ }
+ )
+ )
+ })
+ } else {
+ false
+ }
+ })
+ })
+ .context("In get_level_zero_key: lookup_or_generate_key failed")?;
+
+ let params = [
+ KeyParameterValue::MacLength(256).into(),
+ KeyParameterValue::Digest(Digest::SHA_2_256).into(),
+ ];
+ let level_zero_key = km_dev
+ .use_key_in_one_step(
+ db,
+ &key_id_guard,
+ &key_entry,
+ KeyPurpose::SIGN,
+ ¶ms,
+ None,
+ b"Create boot level key",
+ )
+ .context("In get_level_zero_key: use_key_in_one_step failed")?;
+ // TODO: this is rather unsatisfactory, we need a better way to handle
+ // sensitive binder returns.
+ let level_zero_key = ZVec::try_from(level_zero_key)
+ .context("In get_level_zero_key: conversion to ZVec failed")?;
+ Ok(level_zero_key)
+}
+
+/// Holds the key for the current boot level, and a cache of future keys generated as required.
+/// When the boot level advances, keys prior to the current boot level are securely dropped.
+pub struct BootLevelKeyCache {
+ /// Least boot level currently accessible, if any is.
+ current: usize,
+ /// Invariant: cache entry *i*, if it exists, holds the HKDF key for boot level
+ /// *i* + `current`. If the cache is non-empty it can be grown forwards, but it cannot be
+ /// grown backwards, so keys below `current` are inaccessible.
+ /// `cache.clear()` makes all keys inaccessible.
+ cache: VecDeque<ZVec>,
+}
+
+impl BootLevelKeyCache {
+ const HKDF_ADVANCE: &'static [u8] = b"Advance KDF one step";
+ const HKDF_AES: &'static [u8] = b"Generate AES-256-GCM key";
+ const HKDF_KEY_SIZE: usize = 32;
+
+ /// Initialize the cache with the level zero key.
+ pub fn new(level_zero_key: ZVec) -> Self {
+ let mut cache: VecDeque<ZVec> = VecDeque::new();
+ cache.push_back(level_zero_key);
+ Self { current: 0, cache }
+ }
+
+ /// Report whether the key for the given level can be inferred.
+ pub fn level_accessible(&self, boot_level: usize) -> bool {
+ // If the requested boot level is lower than the current boot level
+ // or if we have reached the end (`cache.empty()`) we can't retrieve
+ // the boot key.
+ boot_level >= self.current && !self.cache.is_empty()
+ }
+
+ /// Get the HKDF key for boot level `boot_level`. The key for level *i*+1
+ /// is calculated from the level *i* key using `hkdf_expand`.
+ fn get_hkdf_key(&mut self, boot_level: usize) -> Result<Option<&ZVec>> {
+ if !self.level_accessible(boot_level) {
+ return Ok(None);
+ }
+ // `self.cache.len()` represents the first entry not in the cache,
+ // so `self.current + self.cache.len()` is the first boot level not in the cache.
+ let first_not_cached = self.current + self.cache.len();
+
+ // Grow the cache forwards until it contains the desired boot level.
+ for _level in first_not_cached..=boot_level {
+ // We check at the start that cache is non-empty and future iterations only push,
+ // so this must unwrap.
+ let highest_key = self.cache.back().unwrap();
+ let next_key = hkdf_expand(Self::HKDF_KEY_SIZE, highest_key, Self::HKDF_ADVANCE)
+ .context("In BootLevelKeyCache::get_hkdf_key: Advancing key one step")?;
+ self.cache.push_back(next_key);
+ }
+
+ // If we reach this point, we should have a key at index boot_level - current.
+ Ok(Some(self.cache.get(boot_level - self.current).unwrap()))
+ }
+
+ /// Drop keys prior to the given boot level, while retaining the ability to generate keys for
+ /// that level and later.
+ pub fn advance_boot_level(&mut self, new_boot_level: usize) -> Result<()> {
+ if !self.level_accessible(new_boot_level) {
+ log::error!(
+ concat!(
+ "In BootLevelKeyCache::advance_boot_level: ",
+ "Failed to advance boot level to {}, current is {}, cache size {}"
+ ),
+ new_boot_level,
+ self.current,
+ self.cache.len()
+ );
+ return Ok(());
+ }
+
+ // We `get` the new boot level for the side effect of advancing the cache to a point
+ // where the new boot level is present.
+ self.get_hkdf_key(new_boot_level)
+ .context("In BootLevelKeyCache::advance_boot_level: Advancing cache")?;
+
+ // Then we split the queue at the index of the new boot level and discard the front,
+ // keeping only the keys with the current boot level or higher.
+ self.cache = self.cache.split_off(new_boot_level - self.current);
+
+ // The new cache has the new boot level at index 0, so we set `current` to
+ // `new_boot_level`.
+ self.current = new_boot_level;
+
+ Ok(())
+ }
+
+ /// Drop all keys, effectively raising the current boot level to infinity; no keys can
+ /// be inferred from this point on.
+ pub fn finish(&mut self) {
+ self.cache.clear();
+ }
+
+ fn expand_key(
+ &mut self,
+ boot_level: usize,
+ out_len: usize,
+ info: &[u8],
+ ) -> Result<Option<ZVec>> {
+ self.get_hkdf_key(boot_level)
+ .context("In BootLevelKeyCache::expand_key: Looking up HKDF key")?
+ .map(|k| hkdf_expand(out_len, k, info))
+ .transpose()
+ .context("In BootLevelKeyCache::expand_key: Calling hkdf_expand")
+ }
+
+ /// Return the AES-256-GCM key for the current boot level.
+ pub fn aes_key(&mut self, boot_level: usize) -> Result<Option<ZVec>> {
+ self.expand_key(boot_level, AES_256_KEY_LENGTH, BootLevelKeyCache::HKDF_AES)
+ .context("In BootLevelKeyCache::aes_key: expand_key failed")
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_output_is_consistent() -> Result<()> {
+ let initial_key = b"initial key";
+ let mut blkc = BootLevelKeyCache::new(ZVec::try_from(initial_key as &[u8])?);
+ assert_eq!(true, blkc.level_accessible(0));
+ assert_eq!(true, blkc.level_accessible(9));
+ assert_eq!(true, blkc.level_accessible(10));
+ assert_eq!(true, blkc.level_accessible(100));
+ let v0 = blkc.aes_key(0).unwrap().unwrap();
+ let v10 = blkc.aes_key(10).unwrap().unwrap();
+ assert_eq!(Some(&v0), blkc.aes_key(0)?.as_ref());
+ assert_eq!(Some(&v10), blkc.aes_key(10)?.as_ref());
+ blkc.advance_boot_level(5)?;
+ assert_eq!(false, blkc.level_accessible(0));
+ assert_eq!(true, blkc.level_accessible(9));
+ assert_eq!(true, blkc.level_accessible(10));
+ assert_eq!(true, blkc.level_accessible(100));
+ assert_eq!(None, blkc.aes_key(0)?);
+ assert_eq!(Some(&v10), blkc.aes_key(10)?.as_ref());
+ blkc.advance_boot_level(10)?;
+ assert_eq!(false, blkc.level_accessible(0));
+ assert_eq!(false, blkc.level_accessible(9));
+ assert_eq!(true, blkc.level_accessible(10));
+ assert_eq!(true, blkc.level_accessible(100));
+ assert_eq!(None, blkc.aes_key(0)?);
+ assert_eq!(Some(&v10), blkc.aes_key(10)?.as_ref());
+ blkc.advance_boot_level(0)?;
+ assert_eq!(false, blkc.level_accessible(0));
+ assert_eq!(false, blkc.level_accessible(9));
+ assert_eq!(true, blkc.level_accessible(10));
+ assert_eq!(true, blkc.level_accessible(100));
+ assert_eq!(None, blkc.aes_key(0)?);
+ assert_eq!(Some(v10), blkc.aes_key(10)?);
+ blkc.finish();
+ assert_eq!(false, blkc.level_accessible(0));
+ assert_eq!(false, blkc.level_accessible(9));
+ assert_eq!(false, blkc.level_accessible(10));
+ assert_eq!(false, blkc.level_accessible(100));
+ assert_eq!(None, blkc.aes_key(0)?);
+ assert_eq!(None, blkc.aes_key(10)?);
+ Ok(())
+ }
+}
diff --git a/keystore2/src/crypto/Android.bp b/keystore2/src/crypto/Android.bp
index 21c9b74..3ba47cd 100644
--- a/keystore2/src/crypto/Android.bp
+++ b/keystore2/src/crypto/Android.bp
@@ -59,27 +59,27 @@
shared_libs: ["libcrypto"],
bindgen_flags: [
"--size_t-is-usize",
- "--whitelist-function", "randomBytes",
- "--whitelist-function", "AES_gcm_encrypt",
- "--whitelist-function", "AES_gcm_decrypt",
- "--whitelist-function", "CreateKeyId",
- "--whitelist-function", "generateKeyFromPassword",
- "--whitelist-function", "HKDFExtract",
- "--whitelist-function", "HKDFExpand",
- "--whitelist-function", "ECDHComputeKey",
- "--whitelist-function", "ECKEYGenerateKey",
- "--whitelist-function", "ECKEYMarshalPrivateKey",
- "--whitelist-function", "ECKEYParsePrivateKey",
- "--whitelist-function", "EC_KEY_get0_public_key",
- "--whitelist-function", "ECPOINTPoint2Oct",
- "--whitelist-function", "ECPOINTOct2Point",
- "--whitelist-function", "EC_KEY_free",
- "--whitelist-function", "EC_POINT_free",
- "--whitelist-function", "extractSubjectFromCertificate",
- "--whitelist-type", "EC_KEY",
- "--whitelist-type", "EC_POINT",
- "--whitelist-var", "EC_MAX_BYTES",
- "--whitelist-var", "EVP_MAX_MD_SIZE",
+ "--allowlist-function", "randomBytes",
+ "--allowlist-function", "AES_gcm_encrypt",
+ "--allowlist-function", "AES_gcm_decrypt",
+ "--allowlist-function", "CreateKeyId",
+ "--allowlist-function", "generateKeyFromPassword",
+ "--allowlist-function", "HKDFExtract",
+ "--allowlist-function", "HKDFExpand",
+ "--allowlist-function", "ECDHComputeKey",
+ "--allowlist-function", "ECKEYGenerateKey",
+ "--allowlist-function", "ECKEYMarshalPrivateKey",
+ "--allowlist-function", "ECKEYParsePrivateKey",
+ "--allowlist-function", "EC_KEY_get0_public_key",
+ "--allowlist-function", "ECPOINTPoint2Oct",
+ "--allowlist-function", "ECPOINTOct2Point",
+ "--allowlist-function", "EC_KEY_free",
+ "--allowlist-function", "EC_POINT_free",
+ "--allowlist-function", "extractSubjectFromCertificate",
+ "--allowlist-type", "EC_KEY",
+ "--allowlist-type", "EC_POINT",
+ "--allowlist-var", "EC_MAX_BYTES",
+ "--allowlist-var", "EVP_MAX_MD_SIZE",
],
cflags: ["-DBORINGSSL_NO_CXX"],
}
diff --git a/keystore2/src/crypto/certificate_utils.cpp b/keystore2/src/crypto/certificate_utils.cpp
index 31c7fb4..64bf1d0 100644
--- a/keystore2/src/crypto/certificate_utils.cpp
+++ b/keystore2/src/crypto/certificate_utils.cpp
@@ -19,14 +19,18 @@
#include <openssl/err.h>
#include <openssl/evp.h>
#include <openssl/mem.h>
+#include <openssl/ossl_typ.h>
#include <openssl/x509v3.h>
#include <functional>
#include <limits>
-#include <string>
#include <variant>
#include <vector>
+#ifndef __LP64__
+#include <time64.h>
+#endif
+
namespace keystore {
namespace {
@@ -167,45 +171,42 @@
return key_usage;
}
-template <typename Out, typename In> static Out saturate(In in) {
- if constexpr (std::is_signed_v<Out> == std::is_signed_v<In>) {
- if constexpr (sizeof(Out) >= sizeof(In)) {
- // Same sign, and In fits into Out. Cast is lossless.
- return static_cast<Out>(in);
- } else {
- // Out is smaller than In we may need to truncate.
- // We pick the smaller of `out::max()` and the greater of `out::min()` and `in`.
- return static_cast<Out>(
- std::min(static_cast<In>(std::numeric_limits<Out>::max()),
- std::max(static_cast<In>(std::numeric_limits<Out>::min()), in)));
- }
- } else {
- // So we have different signs. This puts the lower bound at 0 because either input or output
- // is unsigned. The upper bound is max of the smaller type or, if they are equal the max of
- // the signed type.
- if constexpr (std::is_signed_v<Out>) {
- if constexpr (sizeof(Out) > sizeof(In)) {
- return static_cast<Out>(in);
- } else {
- // Because `out` is the signed one, the lower bound of `in` is 0 and fits into
- // `out`. We just have to compare the maximum and we do it in type In because it has
- // a greater range than Out, so Out::max() is guaranteed to fit.
- return static_cast<Out>(
- std::min(static_cast<In>(std::numeric_limits<Out>::max()), in));
- }
- } else {
- // Out is unsigned. So we can return 0 if in is negative.
- if (in < 0) return 0;
- if constexpr (sizeof(Out) >= sizeof(In)) {
- // If Out is wider or equal we can assign lossless.
- return static_cast<Out>(in);
- } else {
- // Otherwise we have to take the minimum of Out::max() and `in`.
- return static_cast<Out>(
- std::min(static_cast<In>(std::numeric_limits<Out>::max()), in));
- }
- }
+// TODO Once boring ssl can take int64_t instead of time_t we can go back to using
+// ASN1_TIME_set: https://bugs.chromium.org/p/boringssl/issues/detail?id=416
+std::optional<std::array<char, 16>> toTimeString(int64_t timeMillis) {
+ struct tm time;
+ // If timeMillis is negative the rounding direction should still be to the nearest previous
+ // second.
+ if (timeMillis < 0 && __builtin_add_overflow(timeMillis, -999, &timeMillis)) {
+ return std::nullopt;
}
+#if defined(__LP64__)
+ time_t timeSeconds = timeMillis / 1000;
+ if (gmtime_r(&timeSeconds, &time) == nullptr) {
+ return std::nullopt;
+ }
+#else
+ time64_t timeSeconds = timeMillis / 1000;
+ if (gmtime64_r(&timeSeconds, &time) == nullptr) {
+ return std::nullopt;
+ }
+#endif
+ std::array<char, 16> buffer;
+ if (__builtin_add_overflow(time.tm_year, 1900, &time.tm_year)) {
+ return std::nullopt;
+ }
+ if (time.tm_year >= 1950 && time.tm_year < 2050) {
+ // UTCTime according to RFC5280 4.1.2.5.1.
+ snprintf(buffer.data(), buffer.size(), "%02d%02d%02d%02d%02d%02dZ", time.tm_year % 100,
+ time.tm_mon + 1, time.tm_mday, time.tm_hour, time.tm_min, time.tm_sec);
+ } else if (time.tm_year >= 0 && time.tm_year < 10000) {
+ // GeneralizedTime according to RFC5280 4.1.2.5.2.
+ snprintf(buffer.data(), buffer.size(), "%04d%02d%02d%02d%02d%02dZ", time.tm_year,
+ time.tm_mon + 1, time.tm_mday, time.tm_hour, time.tm_min, time.tm_sec);
+ } else {
+ return std::nullopt;
+ }
+ return buffer;
}
// Creates a rump certificate structure with serial, subject and issuer names, as well as
@@ -259,19 +260,24 @@
return std::get<CertUtilsError>(subjectName);
}
- time_t notBeforeTime = saturate<time_t>(activeDateTimeMilliSeconds / 1000);
+ auto notBeforeTime = toTimeString(activeDateTimeMilliSeconds);
+ if (!notBeforeTime) {
+ return CertUtilsError::TimeError;
+ }
// Set activation date.
ASN1_TIME_Ptr notBefore(ASN1_TIME_new());
- if (!notBefore || !ASN1_TIME_set(notBefore.get(), notBeforeTime) ||
+ if (!notBefore || !ASN1_TIME_set_string(notBefore.get(), notBeforeTime->data()) ||
!X509_set_notBefore(certificate.get(), notBefore.get() /* Don't release; copied */))
return CertUtilsError::BoringSsl;
// Set expiration date.
- time_t notAfterTime;
- notAfterTime = saturate<time_t>(usageExpireDateTimeMilliSeconds / 1000);
+ auto notAfterTime = toTimeString(usageExpireDateTimeMilliSeconds);
+ if (!notAfterTime) {
+ return CertUtilsError::TimeError;
+ }
ASN1_TIME_Ptr notAfter(ASN1_TIME_new());
- if (!notAfter || !ASN1_TIME_set(notAfter.get(), notAfterTime) ||
+ if (!notAfter || !ASN1_TIME_set_string(notAfter.get(), notAfterTime->data()) ||
!X509_set_notAfter(certificate.get(), notAfter.get() /* Don't release; copied */)) {
return CertUtilsError::BoringSsl;
}
@@ -512,10 +518,7 @@
return ASN1_STRING_Ptr(algo_str);
}
-CertUtilsError makeAndSetAlgo(X509_ALGOR* algo_field, Algo algo, Padding padding, Digest digest) {
- if (algo_field == nullptr) {
- return CertUtilsError::UnexpectedNullPointer;
- }
+std::variant<CertUtilsError, X509_ALGOR_Ptr> makeAlgo(Algo algo, Padding padding, Digest digest) {
ASN1_STRING_Ptr param;
int param_type = V_ASN1_UNDEF;
int nid = 0;
@@ -584,23 +587,29 @@
return CertUtilsError::InvalidArgument;
}
- if (!X509_ALGOR_set0(algo_field, OBJ_nid2obj(nid), param_type, param.get())) {
+ X509_ALGOR_Ptr result(X509_ALGOR_new());
+ if (!result) {
+ return CertUtilsError::MemoryAllocation;
+ }
+ if (!X509_ALGOR_set0(result.get(), OBJ_nid2obj(nid), param_type, param.get())) {
return CertUtilsError::Encoding;
}
// The X509 struct took ownership.
param.release();
- return CertUtilsError::Ok;
+ return result;
}
// This function allows for signing a
CertUtilsError signCertWith(X509* certificate,
std::function<std::vector<uint8_t>(const uint8_t*, size_t)> sign,
Algo algo, Padding padding, Digest digest) {
- if (auto error = makeAndSetAlgo(certificate->sig_alg, algo, padding, digest)) {
- return error;
+ auto algo_objV = makeAlgo(algo, padding, digest);
+ if (auto error = std::get_if<CertUtilsError>(&algo_objV)) {
+ return *error;
}
- if (auto error = makeAndSetAlgo(certificate->cert_info->signature, algo, padding, digest)) {
- return error;
+ auto& algo_obj = std::get<X509_ALGOR_Ptr>(algo_objV);
+ if (!X509_set1_signature_algo(certificate, algo_obj.get())) {
+ return CertUtilsError::BoringSsl;
}
uint8_t* cert_buf = nullptr;
@@ -615,13 +624,10 @@
return CertUtilsError::SignatureFailed;
}
- if (!ASN1_STRING_set(certificate->signature, signature.data(), signature.size())) {
+ if (!X509_set1_signature_value(certificate, signature.data(), signature.size())) {
return CertUtilsError::BoringSsl;
}
- certificate->signature->flags &= ~(0x07);
- certificate->signature->flags |= ASN1_STRING_FLAG_BITS_LEFT;
-
return CertUtilsError::Ok;
}
diff --git a/keystore2/src/crypto/include/certificate_utils.h b/keystore2/src/crypto/include/certificate_utils.h
index 6c25b9a..cad82b6 100644
--- a/keystore2/src/crypto/include/certificate_utils.h
+++ b/keystore2/src/crypto/include/certificate_utils.h
@@ -39,6 +39,7 @@
DEFINE_OPENSSL_OBJECT_POINTER(ASN1_TIME);
DEFINE_OPENSSL_OBJECT_POINTER(EVP_PKEY);
DEFINE_OPENSSL_OBJECT_POINTER(X509);
+DEFINE_OPENSSL_OBJECT_POINTER(X509_ALGOR);
DEFINE_OPENSSL_OBJECT_POINTER(X509_EXTENSION);
DEFINE_OPENSSL_OBJECT_POINTER(X509_NAME);
DEFINE_OPENSSL_OBJECT_POINTER(EVP_PKEY_CTX);
@@ -53,6 +54,7 @@
InvalidArgument,
UnexpectedNullPointer,
SignatureFailed,
+ TimeError,
};
private:
@@ -137,6 +139,16 @@
};
/**
+ * Takes an int64_t representing UNIX epoch time in milliseconds and turns it into a UTCTime
+ * or GeneralizedTime string depending on whether the year is in the interval [1950 .. 2050).
+ * Note: The string returned in the array buffer is NUL terminated and of length 13 (UTCTime)
+ * or 15 (GeneralizedTime).
+ * @param timeMillis
+ * @return UTCTime or GeneralizedTime string.
+ */
+std::optional<std::array<char, 16>> toTimeString(int64_t timeMillis);
+
+/**
* Sets the signature specifier of the certificate and the signature according to the parameters
* c. Then it signs the certificate with the `sign` callback.
* IMPORTANT: The parameters `algo`, `padding`, and `digest` do not control the actual signing
diff --git a/keystore2/src/crypto/lib.rs b/keystore2/src/crypto/lib.rs
index 3523a9d..db23d1f 100644
--- a/keystore2/src/crypto/lib.rs
+++ b/keystore2/src/crypto/lib.rs
@@ -30,7 +30,7 @@
pub use zvec::ZVec;
/// Length of the expected initialization vector.
-pub const IV_LENGTH: usize = 16;
+pub const GCM_IV_LENGTH: usize = 12;
/// Length of the expected AEAD TAG.
pub const TAG_LENGTH: usize = 16;
/// Length of an AES 256 key in bytes.
@@ -40,9 +40,9 @@
/// Length of the expected salt for key from password generation.
pub const SALT_LENGTH: usize = 16;
-// This is the number of bytes of the GCM IV that is expected to be initialized
-// with random bytes.
-const GCM_IV_LENGTH: usize = 12;
+/// Older versions of keystore produced IVs with four extra
+/// ignored zero bytes at the end; recognise and trim those.
+pub const LEGACY_IV_LENGTH: usize = 16;
/// Generate an AES256 key, essentially 32 random bytes from the underlying
/// boringssl library discretely stuffed into a ZVec.
@@ -80,10 +80,13 @@
/// freed. Input key is taken as a slice for flexibility, but it is recommended that it is held
/// in a ZVec as well.
pub fn aes_gcm_decrypt(data: &[u8], iv: &[u8], tag: &[u8], key: &[u8]) -> Result<ZVec, Error> {
- if iv.len() != IV_LENGTH {
- return Err(Error::InvalidIvLength);
- }
-
+ // Old versions of aes_gcm_encrypt produced 16 byte IVs, but the last four bytes were ignored
+ // so trim these to the correct size.
+ let iv = match iv.len() {
+ GCM_IV_LENGTH => iv,
+ LEGACY_IV_LENGTH => &iv[..GCM_IV_LENGTH],
+ _ => return Err(Error::InvalidIvLength),
+ };
if tag.len() != TAG_LENGTH {
return Err(Error::InvalidAeadTagLength);
}
@@ -96,8 +99,8 @@
let mut result = ZVec::new(data.len())?;
// Safety: The first two arguments must point to buffers with a size given by the third
- // argument. The key must have a size of 16 or 32 bytes which we check above.
- // The iv and tag arguments must be 16 bytes, which we also check above.
+ // argument. We pass the length of the key buffer along with the key.
+ // The `iv` buffer must be 12 bytes and the `tag` buffer 16, which we check above.
match unsafe {
AES_gcm_decrypt(
data.as_ptr(),
@@ -118,10 +121,9 @@
/// This function accepts 128 and 256-bit keys and uses AES128 and AES256 respectively based on
/// the key length. The function generates an initialization vector. The return value is a tuple
/// of `(ciphertext, iv, tag)`.
-pub fn aes_gcm_encrypt(data: &[u8], key: &[u8]) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>), Error> {
- let mut iv = vec![0; IV_LENGTH];
- // Safety: iv is longer than GCM_IV_LENGTH, which is 12 while IV_LENGTH is 16.
- // The iv needs to be 16 bytes long, but the last 4 bytes remain zeroed.
+pub fn aes_gcm_encrypt(plaintext: &[u8], key: &[u8]) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>), Error> {
+ let mut iv = vec![0; GCM_IV_LENGTH];
+ // Safety: iv is GCM_IV_LENGTH bytes long.
if !unsafe { randomBytes(iv.as_mut_ptr(), GCM_IV_LENGTH) } {
return Err(Error::RandomNumberGenerationFailed);
}
@@ -131,21 +133,25 @@
_ => return Err(Error::InvalidKeyLength),
}
- let mut result: Vec<u8> = vec![0; data.len()];
+ let mut ciphertext: Vec<u8> = vec![0; plaintext.len()];
let mut tag: Vec<u8> = vec![0; TAG_LENGTH];
- match unsafe {
+ // Safety: The first two arguments must point to buffers with a size given by the third
+ // argument. We pass the length of the key buffer along with the key.
+ // The `iv` buffer must be 12 bytes and the `tag` buffer 16, which we check above.
+ if unsafe {
AES_gcm_encrypt(
- data.as_ptr(),
- result.as_mut_ptr(),
- data.len(),
+ plaintext.as_ptr(),
+ ciphertext.as_mut_ptr(),
+ plaintext.len(),
key.as_ptr(),
key.len(),
iv.as_ptr(),
tag.as_mut_ptr(),
)
} {
- true => Ok((result, iv, tag)),
- false => Err(Error::EncryptionFailed),
+ Ok((ciphertext, iv, tag))
+ } else {
+ Err(Error::EncryptionFailed)
}
}
diff --git a/keystore2/src/crypto/tests/certificate_utils_test.cpp b/keystore2/src/crypto/tests/certificate_utils_test.cpp
index 119c3fa..bd94928 100644
--- a/keystore2/src/crypto/tests/certificate_utils_test.cpp
+++ b/keystore2/src/crypto/tests/certificate_utils_test.cpp
@@ -315,3 +315,23 @@
EVP_PKEY_Ptr decoded_pkey(X509_get_pubkey(decoded_cert.get()));
ASSERT_TRUE(X509_verify(decoded_cert.get(), decoded_pkey.get()));
}
+
+TEST(TimeStringTests, toTimeStringTest) {
+ // Two test vectors that need to result in UTCTime
+ ASSERT_EQ(std::string(toTimeString(1622758591000)->data()), std::string("210603221631Z"));
+ ASSERT_EQ(std::string(toTimeString(0)->data()), std::string("700101000000Z"));
+ // Two test vectors that need to result in GeneralizedTime.
+ ASSERT_EQ(std::string(toTimeString(16227585910000)->data()), std::string("24840325064510Z"));
+ ASSERT_EQ(std::string(toTimeString(-1622758591000)->data()), std::string("19180731014329Z"));
+
+ // Highest possible UTCTime
+ ASSERT_EQ(std::string(toTimeString(2524607999999)->data()), "491231235959Z");
+ // And one millisecond later must be GeneralizedTime.
+ ASSERT_EQ(std::string(toTimeString(2524608000000)->data()), "20500101000000Z");
+
+ // Earliest possible UTCTime
+ ASSERT_EQ(std::string(toTimeString(-631152000000)->data()), "500101000000Z");
+ // And one millisecond earlier must be GeneralizedTime.
+ // This also checks that the rounding direction does not flip when the input is negative.
+ ASSERT_EQ(std::string(toTimeString(-631152000001)->data()), "19491231235959Z");
+}
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index da482f1..b4122bb 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -41,37 +41,37 @@
//! from the database module these functions take permission check
//! callbacks.
-#![allow(clippy::needless_question_mark)]
+mod perboot;
+pub(crate) mod utils;
+mod versioning;
use crate::impl_metadata; // This is in db_utils.rs
use crate::key_parameter::{KeyParameter, Tag};
use crate::permission::KeyPermSet;
-use crate::utils::{get_current_time_in_seconds, AID_USER_OFFSET};
-use crate::{
- db_utils::{self, SqlField},
- gc::Gc,
- super_key::USER_SUPER_KEY,
-};
+use crate::utils::{get_current_time_in_milliseconds, watchdog as wd, AID_USER_OFFSET};
use crate::{
error::{Error as KsError, ErrorCode, ResponseCode},
super_key::SuperKeyType,
};
+use crate::{gc::Gc, super_key::USER_SUPER_KEY};
use anyhow::{anyhow, Context, Result};
use std::{convert::TryFrom, convert::TryInto, ops::Deref, time::SystemTimeError};
+use utils as db_utils;
+use utils::SqlField;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
HardwareAuthToken::HardwareAuthToken,
HardwareAuthenticatorType::HardwareAuthenticatorType, SecurityLevel::SecurityLevel,
};
-use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
- Timestamp::Timestamp,
-};
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, KeyDescriptor::KeyDescriptor,
};
use android_security_remoteprovisioning::aidl::android::security::remoteprovisioning::{
AttestationPoolStatus::AttestationPoolStatus,
};
+use statslog_rust::keystore2_storage_stats::{
+ Keystore2StorageStats, StorageType as StatsdStorageType,
+};
use keystore2_crypto::ZVec;
use lazy_static::lazy_static;
@@ -79,7 +79,7 @@
#[cfg(not(test))]
use rand::prelude::random;
use rusqlite::{
- params,
+ params, params_from_iter,
types::FromSql,
types::FromSqlResult,
types::ToSqlOutput,
@@ -90,7 +90,7 @@
use std::{
collections::{HashMap, HashSet},
path::Path,
- sync::{Condvar, Mutex},
+ sync::{Arc, Condvar, Mutex},
time::{Duration, SystemTime},
};
@@ -188,6 +188,9 @@
KmUuid(Uuid) with accessor km_uuid,
/// If the key is ECDH encrypted, this is the ephemeral public key
PublicKey(Vec<u8>) with accessor public_key,
+ /// If the key is encrypted with a MaxBootLevel key, this is the boot level
+ /// of that key
+ MaxBootLevel(i32) with accessor max_boot_level,
// --- ADD NEW META DATA FIELDS HERE ---
// For backwards compatibility add new entries only to
// end of this list and above this comment.
@@ -729,33 +732,29 @@
/// ownership. It also implements all of Keystore 2.0's database functionality.
pub struct KeystoreDB {
conn: Connection,
- gc: Option<Gc>,
+ gc: Option<Arc<Gc>>,
+ perboot: Arc<perboot::PerbootDB>,
}
/// Database representation of the monotonic time retrieved from the system call clock_gettime with
-/// CLOCK_MONOTONIC_RAW. Stores monotonic time as i64 in seconds.
+/// CLOCK_MONOTONIC_RAW. Stores monotonic time as i64 in milliseconds.
#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Ord, PartialOrd)]
pub struct MonotonicRawTime(i64);
impl MonotonicRawTime {
/// Constructs a new MonotonicRawTime
pub fn now() -> Self {
- Self(get_current_time_in_seconds())
+ Self(get_current_time_in_milliseconds())
}
- /// Constructs a new MonotonicRawTime from a given number of seconds.
- pub fn from_secs(val: i64) -> Self {
- Self(val)
+ /// Returns the value of MonotonicRawTime in milliseconds as i64
+ pub fn milliseconds(&self) -> i64 {
+ self.0
}
/// Returns the integer value of MonotonicRawTime as i64
pub fn seconds(&self) -> i64 {
- self.0
- }
-
- /// Returns the value of MonotonicRawTime in milli seconds as i64
- pub fn milli_seconds(&self) -> i64 {
- self.0 * 1000
+ self.0 / 1000
}
/// Like i64::checked_sub.
@@ -778,8 +777,10 @@
/// This struct encapsulates the information to be stored in the database about the auth tokens
/// received by keystore.
+#[derive(Clone)]
pub struct AuthTokenEntry {
auth_token: HardwareAuthToken,
+ // Time received in milliseconds
time_received: MonotonicRawTime,
}
@@ -824,16 +825,24 @@
impl KeystoreDB {
const UNASSIGNED_KEY_ID: i64 = -1i64;
- const PERBOOT_DB_FILE_NAME: &'static str = &"file:perboot.sqlite?mode=memory&cache=shared";
+ const CURRENT_DB_VERSION: u32 = 1;
+ const UPGRADERS: &'static [fn(&Transaction) -> Result<u32>] = &[Self::from_0_to_1];
- /// This creates a PerBootDbKeepAlive object to keep the per boot database alive.
- pub fn keep_perboot_db_alive() -> Result<PerBootDbKeepAlive> {
- let conn = Connection::open_in_memory()
- .context("In keep_perboot_db_alive: Failed to initialize SQLite connection.")?;
+ /// Name of the file that holds the cross-boot persistent database.
+ pub const PERSISTENT_DB_FILENAME: &'static str = &"persistent.sqlite";
- conn.execute("ATTACH DATABASE ? as perboot;", params![Self::PERBOOT_DB_FILE_NAME])
- .context("In keep_perboot_db_alive: Failed to attach database perboot.")?;
- Ok(PerBootDbKeepAlive(conn))
+ /// Set write-ahead logging mode on the persistent database found in `db_root`.
+ pub fn set_wal_mode(db_root: &Path) -> Result<()> {
+ let path = Self::make_persistent_path(&db_root)?;
+ let conn =
+ Connection::open(path).context("In KeystoreDB::set_wal_mode: Failed to open DB")?;
+ let mode: String = conn
+ .pragma_update_and_check(None, "journal_mode", &"WAL", |row| row.get(0))
+ .context("In KeystoreDB::set_wal_mode: Failed to set journal_mode")?;
+ match mode.as_str() {
+ "wal" => Ok(()),
+ _ => Err(anyhow!("Unable to set WAL mode, db is still in {} mode.", mode)),
+ }
}
/// This will create a new database connection connecting the two
@@ -841,27 +850,41 @@
/// It also attempts to initialize all of the tables.
/// KeystoreDB cannot be used by multiple threads.
/// Each thread should open their own connection using `thread_local!`.
- pub fn new(db_root: &Path, gc: Option<Gc>) -> Result<Self> {
- // Build the path to the sqlite file.
- let mut persistent_path = db_root.to_path_buf();
- persistent_path.push("persistent.sqlite");
+ pub fn new(db_root: &Path, gc: Option<Arc<Gc>>) -> Result<Self> {
+ let _wp = wd::watch_millis("KeystoreDB::new", 500);
- // Now convert them to strings prefixed with "file:"
- let mut persistent_path_str = "file:".to_owned();
- persistent_path_str.push_str(&persistent_path.to_string_lossy());
+ let persistent_path = Self::make_persistent_path(&db_root)?;
+ let conn = Self::make_connection(&persistent_path)?;
- let conn = Self::make_connection(&persistent_path_str, &Self::PERBOOT_DB_FILE_NAME)?;
-
- // On busy fail Immediately. It is unlikely to succeed given a bug in sqlite.
- conn.busy_handler(None).context("In KeystoreDB::new: Failed to set busy handler.")?;
-
- let mut db = Self { conn, gc };
+ let mut db = Self { conn, gc, perboot: perboot::PERBOOT_DB.clone() };
db.with_transaction(TransactionBehavior::Immediate, |tx| {
+ versioning::upgrade_database(tx, Self::CURRENT_DB_VERSION, Self::UPGRADERS)
+ .context("In KeystoreDB::new: trying to upgrade database.")?;
Self::init_tables(tx).context("Trying to initialize tables.").no_gc()
})?;
Ok(db)
}
+ // This upgrade function deletes all MAX_BOOT_LEVEL keys, that were generated before
+ // cryptographic binding to the boot level keys was implemented.
+ fn from_0_to_1(tx: &Transaction) -> Result<u32> {
+ tx.execute(
+ "UPDATE persistent.keyentry SET state = ?
+ WHERE
+ id IN (SELECT keyentryid FROM persistent.keyparameter WHERE tag = ?)
+ AND
+ id NOT IN (
+ SELECT keyentryid FROM persistent.blobentry
+ WHERE id IN (
+ SELECT blobentryid FROM persistent.blobmetadata WHERE tag = ?
+ )
+ );",
+ params![KeyLifeCycle::Unreferenced, Tag::MAX_BOOT_LEVEL.0, BlobMetaData::MaxBootLevel],
+ )
+ .context("In from_0_to_1: Failed to delete logical boot level keys.")?;
+ Ok(1)
+ }
+
fn init_tables(tx: &Transaction) -> Result<()> {
tx.execute(
"CREATE TABLE IF NOT EXISTS persistent.keyentry (
@@ -969,41 +992,22 @@
)
.context("Failed to initialize \"grant\" table.")?;
- //TODO: only drop the following two perboot tables if this is the first start up
- //during the boot (b/175716626).
- // tx.execute("DROP TABLE IF EXISTS perboot.authtoken;", NO_PARAMS)
- // .context("Failed to drop perboot.authtoken table")?;
- tx.execute(
- "CREATE TABLE IF NOT EXISTS perboot.authtoken (
- id INTEGER PRIMARY KEY,
- challenge INTEGER,
- user_id INTEGER,
- auth_id INTEGER,
- authenticator_type INTEGER,
- timestamp INTEGER,
- mac BLOB,
- time_received INTEGER,
- UNIQUE(user_id, auth_id, authenticator_type));",
- NO_PARAMS,
- )
- .context("Failed to initialize \"authtoken\" table.")?;
-
- // tx.execute("DROP TABLE IF EXISTS perboot.metadata;", NO_PARAMS)
- // .context("Failed to drop perboot.metadata table")?;
- // metadata table stores certain miscellaneous information required for keystore functioning
- // during a boot cycle, as key-value pairs.
- tx.execute(
- "CREATE TABLE IF NOT EXISTS perboot.metadata (
- key TEXT,
- value BLOB,
- UNIQUE(key));",
- NO_PARAMS,
- )
- .context("Failed to initialize \"metadata\" table.")?;
Ok(())
}
- fn make_connection(persistent_file: &str, perboot_file: &str) -> Result<Connection> {
+ fn make_persistent_path(db_root: &Path) -> Result<String> {
+ // Build the path to the sqlite file.
+ let mut persistent_path = db_root.to_path_buf();
+ persistent_path.push(Self::PERSISTENT_DB_FILENAME);
+
+ // Now convert them to strings prefixed with "file:"
+ let mut persistent_path_str = "file:".to_owned();
+ persistent_path_str.push_str(&persistent_path.to_string_lossy());
+
+ Ok(persistent_path_str)
+ }
+
+ fn make_connection(persistent_file: &str) -> Result<Connection> {
let conn =
Connection::open_in_memory().context("Failed to initialize SQLite connection.")?;
@@ -1021,70 +1025,184 @@
}
break;
}
- loop {
- if let Err(e) = conn
- .execute("ATTACH DATABASE ? as perboot;", params![perboot_file])
- .context("Failed to attach database perboot.")
- {
- if Self::is_locked_error(&e) {
- std::thread::sleep(std::time::Duration::from_micros(500));
- continue;
- } else {
- return Err(e);
- }
- }
- break;
- }
+
+ // Drop the cache size from default (2M) to 0.5M
+ conn.execute("PRAGMA persistent.cache_size = -500;", params![])
+ .context("Failed to decrease cache size for persistent db")?;
Ok(conn)
}
+ fn do_table_size_query(
+ &mut self,
+ storage_type: StatsdStorageType,
+ query: &str,
+ params: &[&str],
+ ) -> Result<Keystore2StorageStats> {
+ let (total, unused) = self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ tx.query_row(query, params_from_iter(params), |row| Ok((row.get(0)?, row.get(1)?)))
+ .with_context(|| {
+ format!("get_storage_stat: Error size of storage type {}", storage_type as i32)
+ })
+ .no_gc()
+ })?;
+ Ok(Keystore2StorageStats { storage_type, size: total, unused_size: unused })
+ }
+
+ fn get_total_size(&mut self) -> Result<Keystore2StorageStats> {
+ self.do_table_size_query(
+ StatsdStorageType::Database,
+ "SELECT page_count * page_size, freelist_count * page_size
+ FROM pragma_page_count('persistent'),
+ pragma_page_size('persistent'),
+ persistent.pragma_freelist_count();",
+ &[],
+ )
+ }
+
+ fn get_table_size(
+ &mut self,
+ storage_type: StatsdStorageType,
+ schema: &str,
+ table: &str,
+ ) -> Result<Keystore2StorageStats> {
+ self.do_table_size_query(
+ storage_type,
+ "SELECT pgsize,unused FROM dbstat(?1)
+ WHERE name=?2 AND aggregate=TRUE;",
+ &[schema, table],
+ )
+ }
+
+ /// Fetches a storage statisitics atom for a given storage type. For storage
+ /// types that map to a table, information about the table's storage is
+ /// returned. Requests for storage types that are not DB tables return None.
+ pub fn get_storage_stat(
+ &mut self,
+ storage_type: StatsdStorageType,
+ ) -> Result<Keystore2StorageStats> {
+ let _wp = wd::watch_millis("KeystoreDB::get_storage_stat", 500);
+
+ match storage_type {
+ StatsdStorageType::Database => self.get_total_size(),
+ StatsdStorageType::KeyEntry => {
+ self.get_table_size(storage_type, "persistent", "keyentry")
+ }
+ StatsdStorageType::KeyEntryIdIndex => {
+ self.get_table_size(storage_type, "persistent", "keyentry_id_index")
+ }
+ StatsdStorageType::KeyEntryDomainNamespaceIndex => {
+ self.get_table_size(storage_type, "persistent", "keyentry_domain_namespace_index")
+ }
+ StatsdStorageType::BlobEntry => {
+ self.get_table_size(storage_type, "persistent", "blobentry")
+ }
+ StatsdStorageType::BlobEntryKeyEntryIdIndex => {
+ self.get_table_size(storage_type, "persistent", "blobentry_keyentryid_index")
+ }
+ StatsdStorageType::KeyParameter => {
+ self.get_table_size(storage_type, "persistent", "keyparameter")
+ }
+ StatsdStorageType::KeyParameterKeyEntryIdIndex => {
+ self.get_table_size(storage_type, "persistent", "keyparameter_keyentryid_index")
+ }
+ StatsdStorageType::KeyMetadata => {
+ self.get_table_size(storage_type, "persistent", "keymetadata")
+ }
+ StatsdStorageType::KeyMetadataKeyEntryIdIndex => {
+ self.get_table_size(storage_type, "persistent", "keymetadata_keyentryid_index")
+ }
+ StatsdStorageType::Grant => self.get_table_size(storage_type, "persistent", "grant"),
+ StatsdStorageType::AuthToken => {
+ // Since the table is actually a BTreeMap now, unused_size is not meaningfully
+ // reportable
+ // Size provided is only an approximation
+ Ok(Keystore2StorageStats {
+ storage_type,
+ size: (self.perboot.auth_tokens_len() * std::mem::size_of::<AuthTokenEntry>())
+ as i64,
+ unused_size: 0,
+ })
+ }
+ StatsdStorageType::BlobMetadata => {
+ self.get_table_size(storage_type, "persistent", "blobmetadata")
+ }
+ StatsdStorageType::BlobMetadataBlobEntryIdIndex => {
+ self.get_table_size(storage_type, "persistent", "blobmetadata_blobentryid_index")
+ }
+ _ => Err(anyhow::Error::msg(format!(
+ "Unsupported storage type: {}",
+ storage_type as i32
+ ))),
+ }
+ }
+
/// This function is intended to be used by the garbage collector.
- /// It deletes the blob given by `blob_id_to_delete`. It then tries to find a superseded
- /// key blob that might need special handling by the garbage collector.
+ /// It deletes the blobs given by `blob_ids_to_delete`. It then tries to find up to `max_blobs`
+ /// superseded key blobs that might need special handling by the garbage collector.
/// If no further superseded blobs can be found it deletes all other superseded blobs that don't
/// need special handling and returns None.
- pub fn handle_next_superseded_blob(
+ pub fn handle_next_superseded_blobs(
&mut self,
- blob_id_to_delete: Option<i64>,
- ) -> Result<Option<(i64, Vec<u8>, BlobMetaData)>> {
+ blob_ids_to_delete: &[i64],
+ max_blobs: usize,
+ ) -> Result<Vec<(i64, Vec<u8>, BlobMetaData)>> {
+ let _wp = wd::watch_millis("KeystoreDB::handle_next_superseded_blob", 500);
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- // Delete the given blob if one was given.
- if let Some(blob_id_to_delete) = blob_id_to_delete {
+ // Delete the given blobs.
+ for blob_id in blob_ids_to_delete {
tx.execute(
"DELETE FROM persistent.blobmetadata WHERE blobentryid = ?;",
- params![blob_id_to_delete],
+ params![blob_id],
)
.context("Trying to delete blob metadata.")?;
- tx.execute(
- "DELETE FROM persistent.blobentry WHERE id = ?;",
- params![blob_id_to_delete],
- )
- .context("Trying to blob.")?;
+ tx.execute("DELETE FROM persistent.blobentry WHERE id = ?;", params![blob_id])
+ .context("Trying to blob.")?;
}
- // Find another superseded keyblob load its metadata and return it.
- if let Some((blob_id, blob)) = tx
- .query_row(
- "SELECT id, blob FROM persistent.blobentry
- WHERE subcomponent_type = ?
- AND (
- id NOT IN (
- SELECT MAX(id) FROM persistent.blobentry
- WHERE subcomponent_type = ?
- GROUP BY keyentryid, subcomponent_type
- )
- OR keyentryid NOT IN (SELECT id FROM persistent.keyentry)
- );",
- params![SubComponentType::KEY_BLOB, SubComponentType::KEY_BLOB],
- |row| Ok((row.get(0)?, row.get(1)?)),
- )
- .optional()
- .context("Trying to query superseded blob.")?
- {
- let blob_metadata = BlobMetaData::load_from_db(blob_id, tx)
- .context("Trying to load blob metadata.")?;
- return Ok(Some((blob_id, blob, blob_metadata))).no_gc();
+ Self::cleanup_unreferenced(tx).context("Trying to cleanup unreferenced.")?;
+
+ // Find up to max_blobx more superseded key blobs, load their metadata and return it.
+ let result: Vec<(i64, Vec<u8>)> = {
+ let mut stmt = tx
+ .prepare(
+ "SELECT id, blob FROM persistent.blobentry
+ WHERE subcomponent_type = ?
+ AND (
+ id NOT IN (
+ SELECT MAX(id) FROM persistent.blobentry
+ WHERE subcomponent_type = ?
+ GROUP BY keyentryid, subcomponent_type
+ )
+ OR keyentryid NOT IN (SELECT id FROM persistent.keyentry)
+ ) LIMIT ?;",
+ )
+ .context("Trying to prepare query for superseded blobs.")?;
+
+ let rows = stmt
+ .query_map(
+ params![
+ SubComponentType::KEY_BLOB,
+ SubComponentType::KEY_BLOB,
+ max_blobs as i64,
+ ],
+ |row| Ok((row.get(0)?, row.get(1)?)),
+ )
+ .context("Trying to query superseded blob.")?;
+
+ rows.collect::<Result<Vec<(i64, Vec<u8>)>, rusqlite::Error>>()
+ .context("Trying to extract superseded blobs.")?
+ };
+
+ let result = result
+ .into_iter()
+ .map(|(blob_id, blob)| {
+ Ok((blob_id, blob, BlobMetaData::load_from_db(blob_id, tx)?))
+ })
+ .collect::<Result<Vec<(i64, Vec<u8>, BlobMetaData)>>>()
+ .context("Trying to load blob metadata.")?;
+ if !result.is_empty() {
+ return Ok(result).no_gc();
}
// We did not find any superseded key blob, so let's remove other superseded blob in
@@ -1103,9 +1221,9 @@
)
.context("Trying to purge superseded blobs.")?;
- Ok(None).no_gc()
+ Ok(vec![]).no_gc()
})
- .context("In handle_next_superseded_blob.")
+ .context("In handle_next_superseded_blobs.")
}
/// This maintenance function should be called only once before the database is used for the
@@ -1117,6 +1235,8 @@
/// Unlike with `mark_unreferenced`, we don't need to purge grants, because only keys that made
/// it to `KeyLifeCycle::Live` may have grants.
pub fn cleanup_leftovers(&mut self) -> Result<usize> {
+ let _wp = wd::watch_millis("KeystoreDB::cleanup_leftovers", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
tx.execute(
"UPDATE persistent.keyentry SET state = ? WHERE state = ?;",
@@ -1136,6 +1256,8 @@
alias: &str,
key_type: KeyType,
) -> Result<bool> {
+ let _wp = wd::watch_millis("KeystoreDB::key_exists", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let key_descriptor =
KeyDescriptor { domain, nspace, alias: Some(alias.to_string()), blob: None };
@@ -1161,6 +1283,8 @@
blob_metadata: &BlobMetaData,
key_metadata: &KeyMetaData,
) -> Result<KeyEntry> {
+ let _wp = wd::watch_millis("KeystoreDB::store_super_key", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let key_id = Self::insert_with_retry(|id| {
tx.execute(
@@ -1204,6 +1328,8 @@
key_type: &SuperKeyType,
user_id: u32,
) -> Result<Option<(KeyIdGuard, KeyEntry)>> {
+ let _wp = wd::watch_millis("KeystoreDB::load_super_key", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let key_descriptor = KeyDescriptor {
domain: Domain::APP,
@@ -1243,6 +1369,8 @@
where
F: Fn() -> Result<(Vec<u8>, BlobMetaData)>,
{
+ let _wp = wd::watch_millis("KeystoreDB::get_or_create_key_with", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let id = {
let mut stmt = tx
@@ -1321,18 +1449,6 @@
.context("In get_or_create_key_with.")
}
- /// SQLite3 seems to hold a shared mutex while running the busy handler when
- /// waiting for the database file to become available. This makes it
- /// impossible to successfully recover from a locked database when the
- /// transaction holding the device busy is in the same process on a
- /// different connection. As a result the busy handler has to time out and
- /// fail in order to make progress.
- ///
- /// Instead, we set the busy handler to None (return immediately). And catch
- /// Busy and Locked errors (the latter occur on in memory databases with
- /// shared cache, e.g., the per-boot database.) and restart the transaction
- /// after a grace period of half a millisecond.
- ///
/// Creates a transaction with the given behavior and executes f with the new transaction.
/// The transaction is committed only if f returns Ok and retried if DatabaseBusy
/// or DatabaseLocked is encountered.
@@ -1389,10 +1505,13 @@
&mut self,
domain: &Domain,
namespace: &i64,
+ key_type: KeyType,
km_uuid: &Uuid,
) -> Result<KeyIdGuard> {
+ let _wp = wd::watch_millis("KeystoreDB::create_key_entry", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- Self::create_key_entry_internal(tx, domain, namespace, km_uuid).no_gc()
+ Self::create_key_entry_internal(tx, domain, namespace, key_type, km_uuid).no_gc()
})
.context("In create_key_entry.")
}
@@ -1401,6 +1520,7 @@
tx: &Transaction,
domain: &Domain,
namespace: &i64,
+ key_type: KeyType,
km_uuid: &Uuid,
) -> Result<KeyIdGuard> {
match *domain {
@@ -1418,7 +1538,7 @@
VALUES(?, ?, ?, ?, NULL, ?, ?);",
params![
id,
- KeyType::Client,
+ key_type,
domain.0 as u32,
*namespace,
KeyLifeCycle::Existing,
@@ -1442,6 +1562,8 @@
private_key: &[u8],
km_uuid: &Uuid,
) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::create_attestation_key_entry", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let key_id = KEY_ID_LOCK.get(
Self::insert_with_retry(|id| {
@@ -1484,6 +1606,8 @@
blob: Option<&[u8]>,
blob_metadata: Option<&BlobMetaData>,
) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::set_blob", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
Self::set_blob_internal(&tx, key_id.0, sc_type, blob, blob_metadata).need_gc()
})
@@ -1495,6 +1619,8 @@
/// We use this to insert key blobs into the database which can then be garbage collected
/// lazily by the key garbage collector.
pub fn set_deleted_blob(&mut self, blob: &[u8], blob_metadata: &BlobMetaData) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::set_deleted_blob", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
Self::set_blob_internal(
&tx,
@@ -1605,6 +1731,8 @@
expiration_date: i64,
km_uuid: &Uuid,
) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::store_signed_attestation_certificate_chain", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let mut stmt = tx
.prepare(
@@ -1674,6 +1802,8 @@
namespace: i64,
km_uuid: &Uuid,
) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::assign_attestation_key", 500);
+
match domain {
Domain::APP | Domain::SELINUX => {}
_ => {
@@ -1736,6 +1866,8 @@
num_keys: i32,
km_uuid: &Uuid,
) -> Result<Vec<Vec<u8>>> {
+ let _wp = wd::watch_millis("KeystoreDB::fetch_unsigned_attestation_keys", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let mut stmt = tx
.prepare(
@@ -1761,7 +1893,7 @@
km_uuid,
num_keys
],
- |row| Ok(row.get(0)?),
+ |row| row.get(0),
)?
.collect::<rusqlite::Result<Vec<Vec<u8>>>>()
.context("Failed to execute statement")?;
@@ -1773,6 +1905,8 @@
/// Removes any keys that have expired as of the current time. Returns the number of keys
/// marked unreferenced that are bound to be garbage collected.
pub fn delete_expired_attestation_keys(&mut self) -> Result<i32> {
+ let _wp = wd::watch_millis("KeystoreDB::delete_expired_attestation_keys", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let mut stmt = tx
.prepare(
@@ -1808,6 +1942,8 @@
/// Deletes all remotely provisioned attestation keys in the system, regardless of the state
/// they are in. This is useful primarily as a testing mechanism.
pub fn delete_all_attestation_keys(&mut self) -> Result<i64> {
+ let _wp = wd::watch_millis("KeystoreDB::delete_all_attestation_keys", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let mut stmt = tx
.prepare(
@@ -1816,7 +1952,7 @@
)
.context("Failed to prepare statement")?;
let keys_to_delete = stmt
- .query_map(params![KeyType::Attestation], |row| Ok(row.get(0)?))?
+ .query_map(params![KeyType::Attestation], |row| row.get(0))?
.collect::<rusqlite::Result<Vec<i64>>>()
.context("Failed to execute statement")?;
let num_deleted = keys_to_delete
@@ -1839,6 +1975,8 @@
date: i64,
km_uuid: &Uuid,
) -> Result<AttestationPoolStatus> {
+ let _wp = wd::watch_millis("KeystoreDB::get_attestation_pool_status", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let mut stmt = tx.prepare(
"SELECT data
@@ -1859,7 +1997,7 @@
km_uuid,
KeyLifeCycle::Live
],
- |row| Ok(row.get(0)?),
+ |row| row.get(0),
)?
.collect::<rusqlite::Result<Vec<DateTime>>>()
.context("Failed to execute metadata statement")?;
@@ -1906,6 +2044,8 @@
namespace: i64,
km_uuid: &Uuid,
) -> Result<Option<CertificateChain>> {
+ let _wp = wd::watch_millis("KeystoreDB::retrieve_attestation_key_and_cert_chain", 500);
+
match domain {
Domain::APP | Domain::SELINUX => {}
_ => {
@@ -1989,6 +2129,7 @@
alias: &str,
domain: &Domain,
namespace: &i64,
+ key_type: KeyType,
) -> Result<bool> {
match *domain {
Domain::APP | Domain::SELINUX => {}
@@ -2003,15 +2144,15 @@
.execute(
"UPDATE persistent.keyentry
SET alias = NULL, domain = NULL, namespace = NULL, state = ?
- WHERE alias = ? AND domain = ? AND namespace = ?;",
- params![KeyLifeCycle::Unreferenced, alias, domain.0 as u32, namespace],
+ WHERE alias = ? AND domain = ? AND namespace = ? AND key_type = ?;",
+ params![KeyLifeCycle::Unreferenced, alias, domain.0 as u32, namespace, key_type],
)
.context("In rebind_alias: Failed to rebind existing entry.")?;
let result = tx
.execute(
"UPDATE persistent.keyentry
SET alias = ?, state = ?
- WHERE id = ? AND domain = ? AND namespace = ? AND state = ?;",
+ WHERE id = ? AND domain = ? AND namespace = ? AND state = ? AND key_type = ?;",
params![
alias,
KeyLifeCycle::Live,
@@ -2019,6 +2160,7 @@
domain.0 as u32,
*namespace,
KeyLifeCycle::Existing,
+ key_type,
],
)
.context("In rebind_alias: Failed to set alias.")?;
@@ -2031,20 +2173,89 @@
Ok(updated != 0)
}
+ /// Moves the key given by KeyIdGuard to the new location at `destination`. If the destination
+ /// is already occupied by a key, this function fails with `ResponseCode::INVALID_ARGUMENT`.
+ pub fn migrate_key_namespace(
+ &mut self,
+ key_id_guard: KeyIdGuard,
+ destination: &KeyDescriptor,
+ caller_uid: u32,
+ check_permission: impl Fn(&KeyDescriptor) -> Result<()>,
+ ) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::migrate_key_namespace", 500);
+
+ let destination = match destination.domain {
+ Domain::APP => KeyDescriptor { nspace: caller_uid as i64, ..(*destination).clone() },
+ Domain::SELINUX => (*destination).clone(),
+ domain => {
+ return Err(KsError::Rc(ResponseCode::INVALID_ARGUMENT))
+ .context(format!("Domain {:?} must be either APP or SELINUX.", domain));
+ }
+ };
+
+ // Security critical: Must return immediately on failure. Do not remove the '?';
+ check_permission(&destination)
+ .context("In migrate_key_namespace: Trying to check permission.")?;
+
+ let alias = destination
+ .alias
+ .as_ref()
+ .ok_or(KsError::Rc(ResponseCode::INVALID_ARGUMENT))
+ .context("In migrate_key_namespace: Alias must be specified.")?;
+
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ // Query the destination location. If there is a key, the migration request fails.
+ if tx
+ .query_row(
+ "SELECT id FROM persistent.keyentry
+ WHERE alias = ? AND domain = ? AND namespace = ?;",
+ params![alias, destination.domain.0, destination.nspace],
+ |_| Ok(()),
+ )
+ .optional()
+ .context("Failed to query destination.")?
+ .is_some()
+ {
+ return Err(KsError::Rc(ResponseCode::INVALID_ARGUMENT))
+ .context("Target already exists.");
+ }
+
+ let updated = tx
+ .execute(
+ "UPDATE persistent.keyentry
+ SET alias = ?, domain = ?, namespace = ?
+ WHERE id = ?;",
+ params![alias, destination.domain.0, destination.nspace, key_id_guard.id()],
+ )
+ .context("Failed to update key entry.")?;
+
+ if updated != 1 {
+ return Err(KsError::sys())
+ .context(format!("Update succeeded, but {} rows were updated.", updated));
+ }
+ Ok(()).no_gc()
+ })
+ .context("In migrate_key_namespace:")
+ }
+
/// Store a new key in a single transaction.
/// The function creates a new key entry, populates the blob, key parameter, and metadata
/// fields, and rebinds the given alias to the new key.
/// The boolean returned is a hint for the garbage collector. If true, a key was replaced,
/// is now unreferenced and needs to be collected.
+ #[allow(clippy::clippy::too_many_arguments)]
pub fn store_new_key(
&mut self,
key: &KeyDescriptor,
+ key_type: KeyType,
params: &[KeyParameter],
blob_info: &(&[u8], &BlobMetaData),
cert_info: &CertificateInfo,
metadata: &KeyMetaData,
km_uuid: &Uuid,
) -> Result<KeyIdGuard> {
+ let _wp = wd::watch_millis("KeystoreDB::store_new_key", 500);
+
let (alias, domain, namespace) = match key {
KeyDescriptor { alias: Some(alias), domain: Domain::APP, nspace, blob: None }
| KeyDescriptor { alias: Some(alias), domain: Domain::SELINUX, nspace, blob: None } => {
@@ -2056,7 +2267,7 @@
}
};
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- let key_id = Self::create_key_entry_internal(tx, &domain, namespace, km_uuid)
+ let key_id = Self::create_key_entry_internal(tx, &domain, namespace, key_type, km_uuid)
.context("Trying to create new key entry.")?;
let (blob, blob_metadata) = *blob_info;
Self::set_blob_internal(
@@ -2084,7 +2295,7 @@
Self::insert_keyparameter_internal(tx, &key_id, params)
.context("Trying to insert key parameters.")?;
metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
- let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace)
+ let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace, key_type)
.context("Trying to rebind alias.")?;
Ok(key_id).do_gc(need_gc)
})
@@ -2097,9 +2308,12 @@
pub fn store_new_certificate(
&mut self,
key: &KeyDescriptor,
+ key_type: KeyType,
cert: &[u8],
km_uuid: &Uuid,
) -> Result<KeyIdGuard> {
+ let _wp = wd::watch_millis("KeystoreDB::store_new_certificate", 500);
+
let (alias, domain, namespace) = match key {
KeyDescriptor { alias: Some(alias), domain: Domain::APP, nspace, blob: None }
| KeyDescriptor { alias: Some(alias), domain: Domain::SELINUX, nspace, blob: None } => {
@@ -2112,7 +2326,7 @@
}
};
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- let key_id = Self::create_key_entry_internal(tx, &domain, namespace, km_uuid)
+ let key_id = Self::create_key_entry_internal(tx, &domain, namespace, key_type, km_uuid)
.context("Trying to create new key entry.")?;
Self::set_blob_internal(
@@ -2131,7 +2345,7 @@
metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
- let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace)
+ let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace, key_type)
.context("Trying to rebind alias.")?;
Ok(key_id).do_gc(need_gc)
})
@@ -2212,11 +2426,12 @@
let mut stmt = tx
.prepare(
"SELECT keyentryid, access_vector FROM persistent.grant
- WHERE grantee = ? AND id = ?;",
+ WHERE grantee = ? AND id = ? AND
+ (SELECT state FROM persistent.keyentry WHERE id = keyentryid) = ?;",
)
.context("Domain::GRANT prepare statement failed")?;
let mut rows = stmt
- .query(params![caller_uid as i64, key.nspace])
+ .query(params![caller_uid as i64, key.nspace, KeyLifeCycle::Live])
.context("Domain:Grant: query failed.")?;
let (key_id, access_vector): (i64, i32) =
db_utils::with_rows_extract_one(&mut rows, |row| {
@@ -2379,6 +2594,8 @@
/// zero, the key also gets marked unreferenced and scheduled for deletion.
/// Returns Ok(true) if the key was marked unreferenced as a hint to the garbage collector.
pub fn check_and_update_key_usage_count(&mut self, key_id: i64) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::check_and_update_key_usage_count", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let limit: Option<i32> = tx
.query_row(
@@ -2425,6 +2642,8 @@
caller_uid: u32,
check_permission: impl Fn(&KeyDescriptor, Option<KeyPermSet>) -> Result<()>,
) -> Result<(KeyIdGuard, KeyEntry)> {
+ let _wp = wd::watch_millis("KeystoreDB::load_key_entry", 500);
+
loop {
match self.load_key_entry_internal(
key,
@@ -2552,6 +2771,8 @@
caller_uid: u32,
check_permission: impl Fn(&KeyDescriptor, Option<KeyPermSet>) -> Result<()>,
) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::unbind_key", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let (key_id, access_key_descriptor, access_vector) =
Self::load_access_tuple(tx, key, key_type, caller_uid)
@@ -2581,6 +2802,8 @@
/// Delete all artifacts belonging to the namespace given by the domain-namespace tuple.
/// This leaves all of the blob entries orphaned for subsequent garbage collection.
pub fn unbind_keys_for_namespace(&mut self, domain: Domain, namespace: i64) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::unbind_keys_for_namespace", 500);
+
if !(domain == Domain::APP || domain == Domain::SELINUX) {
return Err(KsError::Rc(ResponseCode::INVALID_ARGUMENT))
.context("In unbind_keys_for_namespace.");
@@ -2590,32 +2813,33 @@
"DELETE FROM persistent.keymetadata
WHERE keyentryid IN (
SELECT id FROM persistent.keyentry
- WHERE domain = ? AND namespace = ?
+ WHERE domain = ? AND namespace = ? AND key_type = ?
);",
- params![domain.0, namespace],
+ params![domain.0, namespace, KeyType::Client],
)
.context("Trying to delete keymetadata.")?;
tx.execute(
"DELETE FROM persistent.keyparameter
WHERE keyentryid IN (
SELECT id FROM persistent.keyentry
- WHERE domain = ? AND namespace = ?
+ WHERE domain = ? AND namespace = ? AND key_type = ?
);",
- params![domain.0, namespace],
+ params![domain.0, namespace, KeyType::Client],
)
.context("Trying to delete keyparameters.")?;
tx.execute(
"DELETE FROM persistent.grant
WHERE keyentryid IN (
SELECT id FROM persistent.keyentry
- WHERE domain = ? AND namespace = ?
+ WHERE domain = ? AND namespace = ? AND key_type = ?
);",
- params![domain.0, namespace],
+ params![domain.0, namespace, KeyType::Client],
)
.context("Trying to delete grants.")?;
tx.execute(
- "DELETE FROM persistent.keyentry WHERE domain = ? AND namespace = ?;",
- params![domain.0, namespace],
+ "DELETE FROM persistent.keyentry
+ WHERE domain = ? AND namespace = ? AND key_type = ?;",
+ params![domain.0, namespace, KeyType::Client],
)
.context("Trying to delete keyentry.")?;
Ok(()).need_gc()
@@ -2623,6 +2847,47 @@
.context("In unbind_keys_for_namespace")
}
+ fn cleanup_unreferenced(tx: &Transaction) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::cleanup_unreferenced", 500);
+ {
+ tx.execute(
+ "DELETE FROM persistent.keymetadata
+ WHERE keyentryid IN (
+ SELECT id FROM persistent.keyentry
+ WHERE state = ?
+ );",
+ params![KeyLifeCycle::Unreferenced],
+ )
+ .context("Trying to delete keymetadata.")?;
+ tx.execute(
+ "DELETE FROM persistent.keyparameter
+ WHERE keyentryid IN (
+ SELECT id FROM persistent.keyentry
+ WHERE state = ?
+ );",
+ params![KeyLifeCycle::Unreferenced],
+ )
+ .context("Trying to delete keyparameters.")?;
+ tx.execute(
+ "DELETE FROM persistent.grant
+ WHERE keyentryid IN (
+ SELECT id FROM persistent.keyentry
+ WHERE state = ?
+ );",
+ params![KeyLifeCycle::Unreferenced],
+ )
+ .context("Trying to delete grants.")?;
+ tx.execute(
+ "DELETE FROM persistent.keyentry
+ WHERE state = ?;",
+ params![KeyLifeCycle::Unreferenced],
+ )
+ .context("Trying to delete keyentry.")?;
+ Result::<()>::Ok(())
+ }
+ .context("In cleanup_unreferenced")
+ }
+
/// Delete the keys created on behalf of the user, denoted by the user id.
/// Delete all the keys unless 'keep_non_super_encrypted_keys' set to true.
/// Returned boolean is to hint the garbage collector to delete the unbound keys.
@@ -2632,6 +2897,8 @@
user_id: u32,
keep_non_super_encrypted_keys: bool,
) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::unbind_keys_for_user", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let mut stmt = tx
.prepare(&format!(
@@ -2731,17 +2998,28 @@
/// Returns a list of KeyDescriptors in the selected domain/namespace.
/// The key descriptors will have the domain, nspace, and alias field set.
/// Domain must be APP or SELINUX, the caller must make sure of that.
- pub fn list(&mut self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
+ pub fn list(
+ &mut self,
+ domain: Domain,
+ namespace: i64,
+ key_type: KeyType,
+ ) -> Result<Vec<KeyDescriptor>> {
+ let _wp = wd::watch_millis("KeystoreDB::list", 500);
+
self.with_transaction(TransactionBehavior::Deferred, |tx| {
let mut stmt = tx
.prepare(
"SELECT alias FROM persistent.keyentry
- WHERE domain = ? AND namespace = ? AND alias IS NOT NULL AND state = ?;",
+ WHERE domain = ?
+ AND namespace = ?
+ AND alias IS NOT NULL
+ AND state = ?
+ AND key_type = ?;",
)
.context("In list: Failed to prepare.")?;
let mut rows = stmt
- .query(params![domain.0 as u32, namespace, KeyLifeCycle::Live])
+ .query(params![domain.0 as u32, namespace, KeyLifeCycle::Live, key_type])
.context("In list: Failed to query.")?;
let mut descriptors: Vec<KeyDescriptor> = Vec::new();
@@ -2773,6 +3051,8 @@
access_vector: KeyPermSet,
check_permission: impl Fn(&KeyDescriptor, &KeyPermSet) -> Result<()>,
) -> Result<KeyDescriptor> {
+ let _wp = wd::watch_millis("KeystoreDB::grant", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
// Load the key_id and complete the access control tuple.
// We ignore the access vector here because grants cannot be granted.
@@ -2838,6 +3118,8 @@
grantee_uid: u32,
check_permission: impl Fn(&KeyDescriptor) -> Result<()>,
) -> Result<()> {
+ let _wp = wd::watch_millis("KeystoreDB::ungrant", 500);
+
self.with_transaction(TransactionBehavior::Immediate, |tx| {
// Load the key_id and complete the access control tuple.
// We ignore the access vector here because grants cannot be granted.
@@ -2887,100 +3169,59 @@
}
}
- /// Insert or replace the auth token based on the UNIQUE constraint of the auth token table
- pub fn insert_auth_token(&mut self, auth_token: &HardwareAuthToken) -> Result<()> {
- self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute(
- "INSERT OR REPLACE INTO perboot.authtoken (challenge, user_id, auth_id,
- authenticator_type, timestamp, mac, time_received) VALUES(?, ?, ?, ?, ?, ?, ?);",
- params![
- auth_token.challenge,
- auth_token.userId,
- auth_token.authenticatorId,
- auth_token.authenticatorType.0 as i32,
- auth_token.timestamp.milliSeconds as i64,
- auth_token.mac,
- MonotonicRawTime::now(),
- ],
- )
- .context("In insert_auth_token: failed to insert auth token into the database")?;
- Ok(()).no_gc()
- })
+ /// Insert or replace the auth token based on (user_id, auth_id, auth_type)
+ pub fn insert_auth_token(&mut self, auth_token: &HardwareAuthToken) {
+ self.perboot.insert_auth_token_entry(AuthTokenEntry::new(
+ auth_token.clone(),
+ MonotonicRawTime::now(),
+ ))
}
/// Find the newest auth token matching the given predicate.
- pub fn find_auth_token_entry<F>(
- &mut self,
- p: F,
- ) -> Result<Option<(AuthTokenEntry, MonotonicRawTime)>>
+ pub fn find_auth_token_entry<F>(&self, p: F) -> Option<(AuthTokenEntry, MonotonicRawTime)>
where
F: Fn(&AuthTokenEntry) -> bool,
{
- self.with_transaction(TransactionBehavior::Deferred, |tx| {
- let mut stmt = tx
- .prepare("SELECT * from perboot.authtoken ORDER BY time_received DESC;")
- .context("Prepare statement failed.")?;
-
- let mut rows = stmt.query(NO_PARAMS).context("Failed to query.")?;
-
- while let Some(row) = rows.next().context("Failed to get next row.")? {
- let entry = AuthTokenEntry::new(
- HardwareAuthToken {
- challenge: row.get(1)?,
- userId: row.get(2)?,
- authenticatorId: row.get(3)?,
- authenticatorType: HardwareAuthenticatorType(row.get(4)?),
- timestamp: Timestamp { milliSeconds: row.get(5)? },
- mac: row.get(6)?,
- },
- row.get(7)?,
- );
- if p(&entry) {
- return Ok(Some((
- entry,
- Self::get_last_off_body(tx)
- .context("In find_auth_token_entry: Trying to get last off body")?,
- )))
- .no_gc();
- }
- }
- Ok(None).no_gc()
- })
- .context("In find_auth_token_entry.")
+ self.perboot.find_auth_token_entry(p).map(|entry| (entry, self.get_last_off_body()))
}
/// Insert last_off_body into the metadata table at the initialization of auth token table
- pub fn insert_last_off_body(&mut self, last_off_body: MonotonicRawTime) -> Result<()> {
- self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute(
- "INSERT OR REPLACE INTO perboot.metadata (key, value) VALUES (?, ?);",
- params!["last_off_body", last_off_body],
- )
- .context("In insert_last_off_body: failed to insert.")?;
- Ok(()).no_gc()
- })
+ pub fn insert_last_off_body(&self, last_off_body: MonotonicRawTime) {
+ self.perboot.set_last_off_body(last_off_body)
}
/// Update last_off_body when on_device_off_body is called
- pub fn update_last_off_body(&mut self, last_off_body: MonotonicRawTime) -> Result<()> {
- self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute(
- "UPDATE perboot.metadata SET value = ? WHERE key = ?;",
- params![last_off_body, "last_off_body"],
- )
- .context("In update_last_off_body: failed to update.")?;
- Ok(()).no_gc()
- })
+ pub fn update_last_off_body(&self, last_off_body: MonotonicRawTime) {
+ self.perboot.set_last_off_body(last_off_body)
}
/// Get last_off_body time when finding auth tokens
- fn get_last_off_body(tx: &Transaction) -> Result<MonotonicRawTime> {
- tx.query_row(
- "SELECT value from perboot.metadata WHERE key = ?;",
- params!["last_off_body"],
- |row| Ok(row.get(0)?),
- )
- .context("In get_last_off_body: query_row failed.")
+ fn get_last_off_body(&self) -> MonotonicRawTime {
+ self.perboot.get_last_off_body()
+ }
+
+ /// Load descriptor of a key by key id
+ pub fn load_key_descriptor(&mut self, key_id: i64) -> Result<Option<KeyDescriptor>> {
+ let _wp = wd::watch_millis("KeystoreDB::load_key_descriptor", 500);
+
+ self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ tx.query_row(
+ "SELECT domain, namespace, alias FROM persistent.keyentry WHERE id = ?;",
+ params![key_id],
+ |row| {
+ Ok(KeyDescriptor {
+ domain: Domain(row.get(0)?),
+ nspace: row.get(1)?,
+ alias: row.get(2)?,
+ blob: None,
+ })
+ },
+ )
+ .optional()
+ .context("Trying to load key descriptor")
+ .no_gc()
+ })
+ .context("In load_key_descriptor.")
}
}
@@ -3003,9 +3244,12 @@
use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
Timestamp::Timestamp,
};
+ use rusqlite::DatabaseName::Attached;
use rusqlite::NO_PARAMS;
- use rusqlite::{Error, TransactionBehavior};
+ use rusqlite::TransactionBehavior;
use std::cell::RefCell;
+ use std::collections::BTreeMap;
+ use std::fmt::Write;
use std::sync::atomic::{AtomicU8, Ordering};
use std::sync::Arc;
use std::thread;
@@ -3014,9 +3258,9 @@
use std::time::Instant;
fn new_test_db() -> Result<KeystoreDB> {
- let conn = KeystoreDB::make_connection("file::memory:", "file::memory:")?;
+ let conn = KeystoreDB::make_connection("file::memory:")?;
- let mut db = KeystoreDB { conn, gc: None };
+ let mut db = KeystoreDB { conn, gc: None, perboot: Arc::new(perboot::PerbootDB::new()) };
db.with_transaction(TransactionBehavior::Immediate, |tx| {
KeystoreDB::init_tables(tx).context("Failed to initialize tables.").no_gc()
})?;
@@ -3032,7 +3276,7 @@
let gc_db = KeystoreDB::new(path, None).expect("Failed to open test gc db_connection.");
let gc = Gc::new_init_with(Default::default(), move || (Box::new(cb), gc_db, super_key));
- KeystoreDB::new(path, Some(gc))
+ KeystoreDB::new(path, Some(Arc::new(gc)))
}
fn rebind_alias(
@@ -3043,7 +3287,7 @@
namespace: i64,
) -> Result<bool> {
db.with_transaction(TransactionBehavior::Immediate, |tx| {
- KeystoreDB::rebind_alias(tx, newid, alias, &domain, &namespace).no_gc()
+ KeystoreDB::rebind_alias(tx, newid, alias, &domain, &namespace, KeyType::Client).no_gc()
})
.context("In rebind_alias.")
}
@@ -3102,15 +3346,6 @@
assert_eq!(tables[3], "keyentry");
assert_eq!(tables[4], "keymetadata");
assert_eq!(tables[5], "keyparameter");
- let tables = db
- .conn
- .prepare("SELECT name from perboot.sqlite_master WHERE type='table' ORDER BY name;")?
- .query_map(params![], |row| row.get(0))?
- .collect::<rusqlite::Result<Vec<String>>>()?;
-
- assert_eq!(tables.len(), 2);
- assert_eq!(tables[0], "authtoken");
- assert_eq!(tables[1], "metadata");
Ok(())
}
@@ -3125,8 +3360,8 @@
timestamp: Timestamp { milliSeconds: 500 },
mac: String::from("mac").into_bytes(),
};
- db.insert_auth_token(&auth_token1)?;
- let auth_tokens_returned = get_auth_tokens(&mut db)?;
+ db.insert_auth_token(&auth_token1);
+ let auth_tokens_returned = get_auth_tokens(&db);
assert_eq!(auth_tokens_returned.len(), 1);
// insert another auth token with the same values for the columns in the UNIQUE constraint
@@ -3140,8 +3375,8 @@
mac: String::from("mac").into_bytes(),
};
- db.insert_auth_token(&auth_token2)?;
- let mut auth_tokens_returned = get_auth_tokens(&mut db)?;
+ db.insert_auth_token(&auth_token2);
+ let mut auth_tokens_returned = get_auth_tokens(&db);
assert_eq!(auth_tokens_returned.len(), 1);
if let Some(auth_token) = auth_tokens_returned.pop() {
@@ -3159,33 +3394,16 @@
mac: String::from("mac").into_bytes(),
};
- db.insert_auth_token(&auth_token3)?;
- let auth_tokens_returned = get_auth_tokens(&mut db)?;
+ db.insert_auth_token(&auth_token3);
+ let auth_tokens_returned = get_auth_tokens(&db);
assert_eq!(auth_tokens_returned.len(), 2);
Ok(())
}
// utility function for test_auth_token_table_invariant()
- fn get_auth_tokens(db: &mut KeystoreDB) -> Result<Vec<AuthTokenEntry>> {
- let mut stmt = db.conn.prepare("SELECT * from perboot.authtoken;")?;
-
- let auth_token_entries: Vec<AuthTokenEntry> = stmt
- .query_map(NO_PARAMS, |row| {
- Ok(AuthTokenEntry::new(
- HardwareAuthToken {
- challenge: row.get(1)?,
- userId: row.get(2)?,
- authenticatorId: row.get(3)?,
- authenticatorType: HardwareAuthenticatorType(row.get(4)?),
- timestamp: Timestamp { milliSeconds: row.get(5)? },
- mac: row.get(6)?,
- },
- row.get(7)?,
- ))
- })?
- .collect::<Result<Vec<AuthTokenEntry>, Error>>()?;
- Ok(auth_token_entries)
+ fn get_auth_tokens(db: &KeystoreDB) -> Vec<AuthTokenEntry> {
+ db.perboot.get_all_auth_token_entries()
}
#[test]
@@ -3193,7 +3411,7 @@
let temp_dir = TempDir::new("persistent_db_test")?;
let mut db = KeystoreDB::new(temp_dir.path(), None)?;
- db.create_key_entry(&Domain::APP, &100, &KEYSTORE_UUID)?;
+ db.create_key_entry(&Domain::APP, &100, KeyType::Client, &KEYSTORE_UUID)?;
let entries = get_keyentry(&db)?;
assert_eq!(entries.len(), 1);
@@ -3212,8 +3430,8 @@
let mut db = new_test_db()?;
- db.create_key_entry(&Domain::APP, &100, &KEYSTORE_UUID)?;
- db.create_key_entry(&Domain::SELINUX, &101, &KEYSTORE_UUID)?;
+ db.create_key_entry(&Domain::APP, &100, KeyType::Client, &KEYSTORE_UUID)?;
+ db.create_key_entry(&Domain::SELINUX, &101, KeyType::Client, &KEYSTORE_UUID)?;
let entries = get_keyentry(&db)?;
assert_eq!(entries.len(), 2);
@@ -3222,15 +3440,15 @@
// Test that we must pass in a valid Domain.
check_result_is_error_containing_string(
- db.create_key_entry(&Domain::GRANT, &102, &KEYSTORE_UUID),
+ db.create_key_entry(&Domain::GRANT, &102, KeyType::Client, &KEYSTORE_UUID),
"Domain Domain(1) must be either App or SELinux.",
);
check_result_is_error_containing_string(
- db.create_key_entry(&Domain::BLOB, &103, &KEYSTORE_UUID),
+ db.create_key_entry(&Domain::BLOB, &103, KeyType::Client, &KEYSTORE_UUID),
"Domain Domain(3) must be either App or SELinux.",
);
check_result_is_error_containing_string(
- db.create_key_entry(&Domain::KEY_ID, &104, &KEYSTORE_UUID),
+ db.create_key_entry(&Domain::KEY_ID, &104, KeyType::Client, &KEYSTORE_UUID),
"Domain Domain(4) must be either App or SELinux.",
);
@@ -3398,7 +3616,7 @@
let mut db = new_test_db()?;
load_attestation_key_pool(&mut db, 45 /* expiration */, 1 /* namespace */, 0x02)?;
load_attestation_key_pool(&mut db, 80 /* expiration */, 2 /* namespace */, 0x03)?;
- db.create_key_entry(&Domain::APP, &42, &KEYSTORE_UUID)?;
+ db.create_key_entry(&Domain::APP, &42, KeyType::Client, &KEYSTORE_UUID)?;
let result = db.delete_all_attestation_keys()?;
// Give the garbage collector half a second to catch up.
@@ -3419,8 +3637,8 @@
}
let mut db = new_test_db()?;
- db.create_key_entry(&Domain::APP, &42, &KEYSTORE_UUID)?;
- db.create_key_entry(&Domain::APP, &42, &KEYSTORE_UUID)?;
+ db.create_key_entry(&Domain::APP, &42, KeyType::Client, &KEYSTORE_UUID)?;
+ db.create_key_entry(&Domain::APP, &42, KeyType::Client, &KEYSTORE_UUID)?;
let entries = get_keyentry(&db)?;
assert_eq!(entries.len(), 2);
assert_eq!(
@@ -3756,6 +3974,7 @@
alias: Some(TEST_ALIAS.to_string()),
blob: None,
},
+ KeyType::Client,
TEST_CERT_BLOB,
&KEYSTORE_UUID,
)
@@ -4100,6 +4319,327 @@
Ok(())
}
+ // Creates a key migrates it to a different location and then tries to access it by the old
+ // and new location.
+ #[test]
+ fn test_migrate_key_app_to_app() -> Result<()> {
+ let mut db = new_test_db()?;
+ const SOURCE_UID: u32 = 1u32;
+ const DESTINATION_UID: u32 = 2u32;
+ static SOURCE_ALIAS: &str = &"SOURCE_ALIAS";
+ static DESTINATION_ALIAS: &str = &"DESTINATION_ALIAS";
+ let key_id_guard =
+ make_test_key_entry(&mut db, Domain::APP, SOURCE_UID as i64, SOURCE_ALIAS, None)
+ .context("test_insert_and_load_full_keyentry_from_grant_by_key_id")?;
+
+ let source_descriptor: KeyDescriptor = KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(SOURCE_ALIAS.to_string()),
+ blob: None,
+ };
+
+ let destination_descriptor: KeyDescriptor = KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(DESTINATION_ALIAS.to_string()),
+ blob: None,
+ };
+
+ let key_id = key_id_guard.id();
+
+ db.migrate_key_namespace(key_id_guard, &destination_descriptor, DESTINATION_UID, |_k| {
+ Ok(())
+ })
+ .unwrap();
+
+ let (_, key_entry) = db
+ .load_key_entry(
+ &destination_descriptor,
+ KeyType::Client,
+ KeyEntryLoadBits::BOTH,
+ DESTINATION_UID,
+ |k, av| {
+ assert_eq!(Domain::APP, k.domain);
+ assert_eq!(DESTINATION_UID as i64, k.nspace);
+ assert!(av.is_none());
+ Ok(())
+ },
+ )
+ .unwrap();
+
+ assert_eq!(key_entry, make_test_key_entry_test_vector(key_id, None));
+
+ assert_eq!(
+ Some(&KsError::Rc(ResponseCode::KEY_NOT_FOUND)),
+ db.load_key_entry(
+ &source_descriptor,
+ KeyType::Client,
+ KeyEntryLoadBits::NONE,
+ SOURCE_UID,
+ |_k, _av| Ok(()),
+ )
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<KsError>()
+ );
+
+ Ok(())
+ }
+
+ // Creates a key migrates it to a different location and then tries to access it by the old
+ // and new location.
+ #[test]
+ fn test_migrate_key_app_to_selinux() -> Result<()> {
+ let mut db = new_test_db()?;
+ const SOURCE_UID: u32 = 1u32;
+ const DESTINATION_UID: u32 = 2u32;
+ const DESTINATION_NAMESPACE: i64 = 1000i64;
+ static SOURCE_ALIAS: &str = &"SOURCE_ALIAS";
+ static DESTINATION_ALIAS: &str = &"DESTINATION_ALIAS";
+ let key_id_guard =
+ make_test_key_entry(&mut db, Domain::APP, SOURCE_UID as i64, SOURCE_ALIAS, None)
+ .context("test_insert_and_load_full_keyentry_from_grant_by_key_id")?;
+
+ let source_descriptor: KeyDescriptor = KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(SOURCE_ALIAS.to_string()),
+ blob: None,
+ };
+
+ let destination_descriptor: KeyDescriptor = KeyDescriptor {
+ domain: Domain::SELINUX,
+ nspace: DESTINATION_NAMESPACE,
+ alias: Some(DESTINATION_ALIAS.to_string()),
+ blob: None,
+ };
+
+ let key_id = key_id_guard.id();
+
+ db.migrate_key_namespace(key_id_guard, &destination_descriptor, DESTINATION_UID, |_k| {
+ Ok(())
+ })
+ .unwrap();
+
+ let (_, key_entry) = db
+ .load_key_entry(
+ &destination_descriptor,
+ KeyType::Client,
+ KeyEntryLoadBits::BOTH,
+ DESTINATION_UID,
+ |k, av| {
+ assert_eq!(Domain::SELINUX, k.domain);
+ assert_eq!(DESTINATION_NAMESPACE as i64, k.nspace);
+ assert!(av.is_none());
+ Ok(())
+ },
+ )
+ .unwrap();
+
+ assert_eq!(key_entry, make_test_key_entry_test_vector(key_id, None));
+
+ assert_eq!(
+ Some(&KsError::Rc(ResponseCode::KEY_NOT_FOUND)),
+ db.load_key_entry(
+ &source_descriptor,
+ KeyType::Client,
+ KeyEntryLoadBits::NONE,
+ SOURCE_UID,
+ |_k, _av| Ok(()),
+ )
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<KsError>()
+ );
+
+ Ok(())
+ }
+
+ // Creates two keys and tries to migrate the first to the location of the second which
+ // is expected to fail.
+ #[test]
+ fn test_migrate_key_destination_occupied() -> Result<()> {
+ let mut db = new_test_db()?;
+ const SOURCE_UID: u32 = 1u32;
+ const DESTINATION_UID: u32 = 2u32;
+ static SOURCE_ALIAS: &str = &"SOURCE_ALIAS";
+ static DESTINATION_ALIAS: &str = &"DESTINATION_ALIAS";
+ let key_id_guard =
+ make_test_key_entry(&mut db, Domain::APP, SOURCE_UID as i64, SOURCE_ALIAS, None)
+ .context("test_insert_and_load_full_keyentry_from_grant_by_key_id")?;
+ make_test_key_entry(&mut db, Domain::APP, DESTINATION_UID as i64, DESTINATION_ALIAS, None)
+ .context("test_insert_and_load_full_keyentry_from_grant_by_key_id")?;
+
+ let destination_descriptor: KeyDescriptor = KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(DESTINATION_ALIAS.to_string()),
+ blob: None,
+ };
+
+ assert_eq!(
+ Some(&KsError::Rc(ResponseCode::INVALID_ARGUMENT)),
+ db.migrate_key_namespace(
+ key_id_guard,
+ &destination_descriptor,
+ DESTINATION_UID,
+ |_k| Ok(())
+ )
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<KsError>()
+ );
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_upgrade_0_to_1() {
+ const ALIAS1: &str = &"test_upgrade_0_to_1_1";
+ const ALIAS2: &str = &"test_upgrade_0_to_1_2";
+ const ALIAS3: &str = &"test_upgrade_0_to_1_3";
+ const UID: u32 = 33;
+ let temp_dir = Arc::new(TempDir::new("test_upgrade_0_to_1").unwrap());
+ let mut db = KeystoreDB::new(temp_dir.path(), None).unwrap();
+ let key_id_untouched1 =
+ make_test_key_entry(&mut db, Domain::APP, UID as i64, ALIAS1, None).unwrap().id();
+ let key_id_untouched2 =
+ make_bootlevel_key_entry(&mut db, Domain::APP, UID as i64, ALIAS2, false).unwrap().id();
+ let key_id_deleted =
+ make_bootlevel_key_entry(&mut db, Domain::APP, UID as i64, ALIAS3, true).unwrap().id();
+
+ let (_, key_entry) = db
+ .load_key_entry(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(ALIAS1.to_string()),
+ blob: None,
+ },
+ KeyType::Client,
+ KeyEntryLoadBits::BOTH,
+ UID,
+ |k, av| {
+ assert_eq!(Domain::APP, k.domain);
+ assert_eq!(UID as i64, k.nspace);
+ assert!(av.is_none());
+ Ok(())
+ },
+ )
+ .unwrap();
+ assert_eq!(key_entry, make_test_key_entry_test_vector(key_id_untouched1, None));
+ let (_, key_entry) = db
+ .load_key_entry(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(ALIAS2.to_string()),
+ blob: None,
+ },
+ KeyType::Client,
+ KeyEntryLoadBits::BOTH,
+ UID,
+ |k, av| {
+ assert_eq!(Domain::APP, k.domain);
+ assert_eq!(UID as i64, k.nspace);
+ assert!(av.is_none());
+ Ok(())
+ },
+ )
+ .unwrap();
+ assert_eq!(key_entry, make_bootlevel_test_key_entry_test_vector(key_id_untouched2, false));
+ let (_, key_entry) = db
+ .load_key_entry(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(ALIAS3.to_string()),
+ blob: None,
+ },
+ KeyType::Client,
+ KeyEntryLoadBits::BOTH,
+ UID,
+ |k, av| {
+ assert_eq!(Domain::APP, k.domain);
+ assert_eq!(UID as i64, k.nspace);
+ assert!(av.is_none());
+ Ok(())
+ },
+ )
+ .unwrap();
+ assert_eq!(key_entry, make_bootlevel_test_key_entry_test_vector(key_id_deleted, true));
+
+ db.with_transaction(TransactionBehavior::Immediate, |tx| {
+ KeystoreDB::from_0_to_1(tx).no_gc()
+ })
+ .unwrap();
+
+ let (_, key_entry) = db
+ .load_key_entry(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(ALIAS1.to_string()),
+ blob: None,
+ },
+ KeyType::Client,
+ KeyEntryLoadBits::BOTH,
+ UID,
+ |k, av| {
+ assert_eq!(Domain::APP, k.domain);
+ assert_eq!(UID as i64, k.nspace);
+ assert!(av.is_none());
+ Ok(())
+ },
+ )
+ .unwrap();
+ assert_eq!(key_entry, make_test_key_entry_test_vector(key_id_untouched1, None));
+ let (_, key_entry) = db
+ .load_key_entry(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(ALIAS2.to_string()),
+ blob: None,
+ },
+ KeyType::Client,
+ KeyEntryLoadBits::BOTH,
+ UID,
+ |k, av| {
+ assert_eq!(Domain::APP, k.domain);
+ assert_eq!(UID as i64, k.nspace);
+ assert!(av.is_none());
+ Ok(())
+ },
+ )
+ .unwrap();
+ assert_eq!(key_entry, make_bootlevel_test_key_entry_test_vector(key_id_untouched2, false));
+ assert_eq!(
+ Some(&KsError::Rc(ResponseCode::KEY_NOT_FOUND)),
+ db.load_key_entry(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(ALIAS3.to_string()),
+ blob: None,
+ },
+ KeyType::Client,
+ KeyEntryLoadBits::BOTH,
+ UID,
+ |k, av| {
+ assert_eq!(Domain::APP, k.domain);
+ assert_eq!(UID as i64, k.nspace);
+ assert!(av.is_none());
+ Ok(())
+ },
+ )
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<KsError>()
+ );
+ }
+
static KEY_LOCK_TEST_ALIAS: &str = "my super duper locked key";
#[test]
@@ -4177,7 +4717,7 @@
}
#[test]
- fn teset_database_busy_error_code() {
+ fn test_database_busy_error_code() {
let temp_dir =
TempDir::new("test_database_busy_error_code_").expect("Failed to create temp dir.");
@@ -4394,7 +4934,7 @@
})
.collect();
list_o_descriptors.sort();
- let mut list_result = db.list(*domain, *namespace)?;
+ let mut list_result = db.list(*domain, *namespace, KeyType::Client)?;
list_result.sort();
assert_eq!(list_o_descriptors, list_result);
@@ -4424,7 +4964,7 @@
loaded_entries.sort_unstable();
assert_eq!(list_o_ids, loaded_entries);
}
- assert_eq!(Vec::<KeyDescriptor>::new(), db.list(Domain::SELINUX, 101)?);
+ assert_eq!(Vec::<KeyDescriptor>::new(), db.list(Domain::SELINUX, 101, KeyType::Client)?);
Ok(())
}
@@ -4742,7 +5282,7 @@
alias: &str,
max_usage_count: Option<i32>,
) -> Result<KeyIdGuard> {
- let key_id = db.create_key_entry(&domain, &namespace, &KEYSTORE_UUID)?;
+ let key_id = db.create_key_entry(&domain, &namespace, KeyType::Client, &KEYSTORE_UUID)?;
let mut blob_metadata = BlobMetaData::new();
blob_metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
blob_metadata.add(BlobMetaEntry::Salt(vec![1, 2, 3]));
@@ -4794,6 +5334,66 @@
}
}
+ fn make_bootlevel_key_entry(
+ db: &mut KeystoreDB,
+ domain: Domain,
+ namespace: i64,
+ alias: &str,
+ logical_only: bool,
+ ) -> Result<KeyIdGuard> {
+ let key_id = db.create_key_entry(&domain, &namespace, KeyType::Client, &KEYSTORE_UUID)?;
+ let mut blob_metadata = BlobMetaData::new();
+ if !logical_only {
+ blob_metadata.add(BlobMetaEntry::MaxBootLevel(3));
+ }
+ blob_metadata.add(BlobMetaEntry::KmUuid(KEYSTORE_UUID));
+
+ db.set_blob(
+ &key_id,
+ SubComponentType::KEY_BLOB,
+ Some(TEST_KEY_BLOB),
+ Some(&blob_metadata),
+ )?;
+ db.set_blob(&key_id, SubComponentType::CERT, Some(TEST_CERT_BLOB), None)?;
+ db.set_blob(&key_id, SubComponentType::CERT_CHAIN, Some(TEST_CERT_CHAIN_BLOB), None)?;
+
+ let mut params = make_test_params(None);
+ params.push(KeyParameter::new(KeyParameterValue::MaxBootLevel(3), SecurityLevel::KEYSTORE));
+
+ db.insert_keyparameter(&key_id, ¶ms)?;
+
+ let mut metadata = KeyMetaData::new();
+ metadata.add(KeyMetaEntry::CreationDate(DateTime::from_millis_epoch(123456789)));
+ db.insert_key_metadata(&key_id, &metadata)?;
+ rebind_alias(db, &key_id, alias, domain, namespace)?;
+ Ok(key_id)
+ }
+
+ fn make_bootlevel_test_key_entry_test_vector(key_id: i64, logical_only: bool) -> KeyEntry {
+ let mut params = make_test_params(None);
+ params.push(KeyParameter::new(KeyParameterValue::MaxBootLevel(3), SecurityLevel::KEYSTORE));
+
+ let mut blob_metadata = BlobMetaData::new();
+ if !logical_only {
+ blob_metadata.add(BlobMetaEntry::MaxBootLevel(3));
+ }
+ blob_metadata.add(BlobMetaEntry::KmUuid(KEYSTORE_UUID));
+
+ let mut metadata = KeyMetaData::new();
+ metadata.add(KeyMetaEntry::CreationDate(DateTime::from_millis_epoch(123456789)));
+
+ KeyEntry {
+ id: key_id,
+ key_blob_info: Some((TEST_KEY_BLOB.to_vec(), blob_metadata)),
+ cert: Some(TEST_CERT_BLOB.to_vec()),
+ cert_chain: Some(TEST_CERT_CHAIN_BLOB.to_vec()),
+ km_uuid: KEYSTORE_UUID,
+ parameters: params,
+ metadata,
+ pure_cert: false,
+ }
+ }
+
fn debug_dump_keyentry_table(db: &mut KeystoreDB) -> Result<()> {
let mut stmt = db.conn.prepare(
"SELECT id, key_type, domain, namespace, alias, state, km_uuid FROM persistent.keyentry;",
@@ -4864,17 +5464,17 @@
#[test]
fn test_last_off_body() -> Result<()> {
let mut db = new_test_db()?;
- db.insert_last_off_body(MonotonicRawTime::now())?;
+ db.insert_last_off_body(MonotonicRawTime::now());
let tx = db.conn.transaction_with_behavior(TransactionBehavior::Immediate)?;
- let last_off_body_1 = KeystoreDB::get_last_off_body(&tx)?;
tx.commit()?;
+ let last_off_body_1 = db.get_last_off_body();
let one_second = Duration::from_secs(1);
thread::sleep(one_second);
- db.update_last_off_body(MonotonicRawTime::now())?;
+ db.update_last_off_body(MonotonicRawTime::now());
let tx2 = db.conn.transaction_with_behavior(TransactionBehavior::Immediate)?;
- let last_off_body_2 = KeystoreDB::get_last_off_body(&tx2)?;
tx2.commit()?;
- assert!(last_off_body_1.seconds() < last_off_body_2.seconds());
+ let last_off_body_2 = db.get_last_off_body();
+ assert!(last_off_body_1 < last_off_body_2);
Ok(())
}
@@ -4887,11 +5487,11 @@
make_test_key_entry(&mut db, Domain::APP, 110000, TEST_ALIAS, None)?;
db.unbind_keys_for_user(2, false)?;
- assert_eq!(1, db.list(Domain::APP, 110000)?.len());
- assert_eq!(0, db.list(Domain::APP, 210000)?.len());
+ assert_eq!(1, db.list(Domain::APP, 110000, KeyType::Client)?.len());
+ assert_eq!(0, db.list(Domain::APP, 210000, KeyType::Client)?.len());
db.unbind_keys_for_user(1, true)?;
- assert_eq!(0, db.list(Domain::APP, 110000)?.len());
+ assert_eq!(0, db.list(Domain::APP, 110000, KeyType::Client)?.len());
Ok(())
}
@@ -4931,4 +5531,271 @@
assert_eq!(secret_bytes, &*decrypted_secret_bytes);
Ok(())
}
+
+ fn get_valid_statsd_storage_types() -> Vec<StatsdStorageType> {
+ vec![
+ StatsdStorageType::KeyEntry,
+ StatsdStorageType::KeyEntryIdIndex,
+ StatsdStorageType::KeyEntryDomainNamespaceIndex,
+ StatsdStorageType::BlobEntry,
+ StatsdStorageType::BlobEntryKeyEntryIdIndex,
+ StatsdStorageType::KeyParameter,
+ StatsdStorageType::KeyParameterKeyEntryIdIndex,
+ StatsdStorageType::KeyMetadata,
+ StatsdStorageType::KeyMetadataKeyEntryIdIndex,
+ StatsdStorageType::Grant,
+ StatsdStorageType::AuthToken,
+ StatsdStorageType::BlobMetadata,
+ StatsdStorageType::BlobMetadataBlobEntryIdIndex,
+ ]
+ }
+
+ /// Perform a simple check to ensure that we can query all the storage types
+ /// that are supported by the DB. Check for reasonable values.
+ #[test]
+ fn test_query_all_valid_table_sizes() -> Result<()> {
+ const PAGE_SIZE: i64 = 4096;
+
+ let mut db = new_test_db()?;
+
+ for t in get_valid_statsd_storage_types() {
+ let stat = db.get_storage_stat(t)?;
+ // AuthToken can be less than a page since it's in a btree, not sqlite
+ // TODO(b/187474736) stop using if-let here
+ if let StatsdStorageType::AuthToken = t {
+ } else {
+ assert!(stat.size >= PAGE_SIZE);
+ }
+ assert!(stat.size >= stat.unused_size);
+ }
+
+ Ok(())
+ }
+
+ fn get_storage_stats_map(db: &mut KeystoreDB) -> BTreeMap<i32, Keystore2StorageStats> {
+ get_valid_statsd_storage_types()
+ .into_iter()
+ .map(|t| (t as i32, db.get_storage_stat(t).unwrap()))
+ .collect()
+ }
+
+ fn assert_storage_increased(
+ db: &mut KeystoreDB,
+ increased_storage_types: Vec<StatsdStorageType>,
+ baseline: &mut BTreeMap<i32, Keystore2StorageStats>,
+ ) {
+ for storage in increased_storage_types {
+ // Verify the expected storage increased.
+ let new = db.get_storage_stat(storage).unwrap();
+ let storage = storage as i32;
+ let old = &baseline[&storage];
+ assert!(new.size >= old.size, "{}: {} >= {}", storage, new.size, old.size);
+ assert!(
+ new.unused_size <= old.unused_size,
+ "{}: {} <= {}",
+ storage,
+ new.unused_size,
+ old.unused_size
+ );
+
+ // Update the baseline with the new value so that it succeeds in the
+ // later comparison.
+ baseline.insert(storage, new);
+ }
+
+ // Get an updated map of the storage and verify there were no unexpected changes.
+ let updated_stats = get_storage_stats_map(db);
+ assert_eq!(updated_stats.len(), baseline.len());
+
+ for &k in baseline.keys() {
+ let stringify = |map: &BTreeMap<i32, Keystore2StorageStats>| -> String {
+ let mut s = String::new();
+ for &k in map.keys() {
+ writeln!(&mut s, " {}: {}, {}", &k, map[&k].size, map[&k].unused_size)
+ .expect("string concat failed");
+ }
+ s
+ };
+
+ assert!(
+ updated_stats[&k].size == baseline[&k].size
+ && updated_stats[&k].unused_size == baseline[&k].unused_size,
+ "updated_stats:\n{}\nbaseline:\n{}",
+ stringify(&updated_stats),
+ stringify(&baseline)
+ );
+ }
+ }
+
+ #[test]
+ fn test_verify_key_table_size_reporting() -> Result<()> {
+ let mut db = new_test_db()?;
+ let mut working_stats = get_storage_stats_map(&mut db);
+
+ let key_id = db.create_key_entry(&Domain::APP, &42, KeyType::Client, &KEYSTORE_UUID)?;
+ assert_storage_increased(
+ &mut db,
+ vec![
+ StatsdStorageType::KeyEntry,
+ StatsdStorageType::KeyEntryIdIndex,
+ StatsdStorageType::KeyEntryDomainNamespaceIndex,
+ ],
+ &mut working_stats,
+ );
+
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
+ db.set_blob(&key_id, SubComponentType::KEY_BLOB, Some(TEST_KEY_BLOB), None)?;
+ assert_storage_increased(
+ &mut db,
+ vec![
+ StatsdStorageType::BlobEntry,
+ StatsdStorageType::BlobEntryKeyEntryIdIndex,
+ StatsdStorageType::BlobMetadata,
+ StatsdStorageType::BlobMetadataBlobEntryIdIndex,
+ ],
+ &mut working_stats,
+ );
+
+ let params = make_test_params(None);
+ db.insert_keyparameter(&key_id, ¶ms)?;
+ assert_storage_increased(
+ &mut db,
+ vec![StatsdStorageType::KeyParameter, StatsdStorageType::KeyParameterKeyEntryIdIndex],
+ &mut working_stats,
+ );
+
+ let mut metadata = KeyMetaData::new();
+ metadata.add(KeyMetaEntry::CreationDate(DateTime::from_millis_epoch(123456789)));
+ db.insert_key_metadata(&key_id, &metadata)?;
+ assert_storage_increased(
+ &mut db,
+ vec![StatsdStorageType::KeyMetadata, StatsdStorageType::KeyMetadataKeyEntryIdIndex],
+ &mut working_stats,
+ );
+
+ let mut sum = 0;
+ for stat in working_stats.values() {
+ sum += stat.size;
+ }
+ let total = db.get_storage_stat(StatsdStorageType::Database)?.size;
+ assert!(sum <= total, "Expected sum <= total. sum: {}, total: {}", sum, total);
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_verify_auth_table_size_reporting() -> Result<()> {
+ let mut db = new_test_db()?;
+ let mut working_stats = get_storage_stats_map(&mut db);
+ db.insert_auth_token(&HardwareAuthToken {
+ challenge: 123,
+ userId: 456,
+ authenticatorId: 789,
+ authenticatorType: kmhw_authenticator_type::ANY,
+ timestamp: Timestamp { milliSeconds: 10 },
+ mac: b"mac".to_vec(),
+ });
+ assert_storage_increased(&mut db, vec![StatsdStorageType::AuthToken], &mut working_stats);
+ Ok(())
+ }
+
+ #[test]
+ fn test_verify_grant_table_size_reporting() -> Result<()> {
+ const OWNER: i64 = 1;
+ let mut db = new_test_db()?;
+ make_test_key_entry(&mut db, Domain::APP, OWNER, TEST_ALIAS, None)?;
+
+ let mut working_stats = get_storage_stats_map(&mut db);
+ db.grant(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: 0,
+ alias: Some(TEST_ALIAS.to_string()),
+ blob: None,
+ },
+ OWNER as u32,
+ 123,
+ key_perm_set![KeyPerm::use_()],
+ |_, _| Ok(()),
+ )?;
+
+ assert_storage_increased(&mut db, vec![StatsdStorageType::Grant], &mut working_stats);
+
+ Ok(())
+ }
+
+ #[test]
+ fn find_auth_token_entry_returns_latest() -> Result<()> {
+ let mut db = new_test_db()?;
+ db.insert_auth_token(&HardwareAuthToken {
+ challenge: 123,
+ userId: 456,
+ authenticatorId: 789,
+ authenticatorType: kmhw_authenticator_type::ANY,
+ timestamp: Timestamp { milliSeconds: 10 },
+ mac: b"mac0".to_vec(),
+ });
+ std::thread::sleep(std::time::Duration::from_millis(1));
+ db.insert_auth_token(&HardwareAuthToken {
+ challenge: 123,
+ userId: 457,
+ authenticatorId: 789,
+ authenticatorType: kmhw_authenticator_type::ANY,
+ timestamp: Timestamp { milliSeconds: 12 },
+ mac: b"mac1".to_vec(),
+ });
+ std::thread::sleep(std::time::Duration::from_millis(1));
+ db.insert_auth_token(&HardwareAuthToken {
+ challenge: 123,
+ userId: 458,
+ authenticatorId: 789,
+ authenticatorType: kmhw_authenticator_type::ANY,
+ timestamp: Timestamp { milliSeconds: 3 },
+ mac: b"mac2".to_vec(),
+ });
+ // All three entries are in the database
+ assert_eq!(db.perboot.auth_tokens_len(), 3);
+ // It selected the most recent timestamp
+ assert_eq!(db.find_auth_token_entry(|_| true).unwrap().0.auth_token.mac, b"mac2".to_vec());
+ Ok(())
+ }
+
+ #[test]
+ fn test_set_wal_mode() -> Result<()> {
+ let temp_dir = TempDir::new("test_set_wal_mode")?;
+ let mut db = KeystoreDB::new(temp_dir.path(), None)?;
+ let mode: String =
+ db.conn.pragma_query_value(Some(Attached("persistent")), "journal_mode", |row| {
+ row.get(0)
+ })?;
+ assert_eq!(mode, "delete");
+ db.conn.close().expect("Close didn't work");
+
+ KeystoreDB::set_wal_mode(temp_dir.path())?;
+
+ db = KeystoreDB::new(temp_dir.path(), None)?;
+ let mode: String =
+ db.conn.pragma_query_value(Some(Attached("persistent")), "journal_mode", |row| {
+ row.get(0)
+ })?;
+ assert_eq!(mode, "wal");
+ Ok(())
+ }
+
+ #[test]
+ fn test_load_key_descriptor() -> Result<()> {
+ let mut db = new_test_db()?;
+ let key_id = make_test_key_entry(&mut db, Domain::APP, 1, TEST_ALIAS, None)?.0;
+
+ let key = db.load_key_descriptor(key_id)?.unwrap();
+
+ assert_eq!(key.domain, Domain::APP);
+ assert_eq!(key.nspace, 1);
+ assert_eq!(key.alias, Some(TEST_ALIAS.to_string()));
+
+ // No such id
+ assert_eq!(db.load_key_descriptor(key_id + 1)?, None);
+ Ok(())
+ }
}
diff --git a/keystore2/src/database/perboot.rs b/keystore2/src/database/perboot.rs
new file mode 100644
index 0000000..7ff35fa
--- /dev/null
+++ b/keystore2/src/database/perboot.rs
@@ -0,0 +1,122 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module implements a per-boot, shared, in-memory storage of auth tokens
+//! and last-time-on-body for the main Keystore 2.0 database module.
+
+use super::{AuthTokenEntry, MonotonicRawTime};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ HardwareAuthToken::HardwareAuthToken, HardwareAuthenticatorType::HardwareAuthenticatorType,
+};
+use lazy_static::lazy_static;
+use std::collections::HashSet;
+use std::sync::atomic::{AtomicI64, Ordering};
+use std::sync::Arc;
+use std::sync::RwLock;
+
+#[derive(PartialEq, PartialOrd, Ord, Eq, Hash)]
+struct AuthTokenId {
+ user_id: i64,
+ auth_id: i64,
+ authenticator_type: HardwareAuthenticatorType,
+}
+
+impl AuthTokenId {
+ fn from_auth_token(tok: &HardwareAuthToken) -> Self {
+ AuthTokenId {
+ user_id: tok.userId,
+ auth_id: tok.authenticatorId,
+ authenticator_type: tok.authenticatorType,
+ }
+ }
+}
+
+//Implements Eq/Hash to only operate on the AuthTokenId portion
+//of the AuthTokenEntry. This allows a HashSet to DTRT.
+#[derive(Clone)]
+struct AuthTokenEntryWrap(AuthTokenEntry);
+
+impl std::hash::Hash for AuthTokenEntryWrap {
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+ AuthTokenId::from_auth_token(&self.0.auth_token).hash(state)
+ }
+}
+
+impl PartialEq<AuthTokenEntryWrap> for AuthTokenEntryWrap {
+ fn eq(&self, other: &AuthTokenEntryWrap) -> bool {
+ AuthTokenId::from_auth_token(&self.0.auth_token)
+ == AuthTokenId::from_auth_token(&other.0.auth_token)
+ }
+}
+
+impl Eq for AuthTokenEntryWrap {}
+
+/// Per-boot state structure. Currently only used to track auth tokens and
+/// last-off-body.
+#[derive(Default)]
+pub struct PerbootDB {
+ // We can use a .unwrap() discipline on this lock, because only panicking
+ // while holding a .write() lock will poison it. The only write usage is
+ // an insert call which inserts a pre-constructed pair.
+ auth_tokens: RwLock<HashSet<AuthTokenEntryWrap>>,
+ // Ordering::Relaxed is appropriate for accessing this atomic, since it
+ // does not currently need to be synchronized with anything else.
+ last_off_body: AtomicI64,
+}
+
+lazy_static! {
+ /// The global instance of the perboot DB. Located here rather than in globals
+ /// in order to restrict access to the database module.
+ pub static ref PERBOOT_DB: Arc<PerbootDB> = Arc::new(PerbootDB::new());
+}
+
+impl PerbootDB {
+ /// Construct a new perboot database. Currently just uses default values.
+ pub fn new() -> Self {
+ Default::default()
+ }
+ /// Add a new auth token + timestamp to the database, replacing any which
+ /// match all of user_id, auth_id, and auth_type.
+ pub fn insert_auth_token_entry(&self, entry: AuthTokenEntry) {
+ self.auth_tokens.write().unwrap().replace(AuthTokenEntryWrap(entry));
+ }
+ /// Locate an auth token entry which matches the predicate with the most
+ /// recent update time.
+ pub fn find_auth_token_entry<P: Fn(&AuthTokenEntry) -> bool>(
+ &self,
+ p: P,
+ ) -> Option<AuthTokenEntry> {
+ let reader = self.auth_tokens.read().unwrap();
+ let mut matches: Vec<_> = reader.iter().filter(|x| p(&x.0)).collect();
+ matches.sort_by_key(|x| x.0.time_received);
+ matches.last().map(|x| x.0.clone())
+ }
+ /// Get the last time the device was off the user's body
+ pub fn get_last_off_body(&self) -> MonotonicRawTime {
+ MonotonicRawTime(self.last_off_body.load(Ordering::Relaxed))
+ }
+ /// Set the last time the device was off the user's body
+ pub fn set_last_off_body(&self, last_off_body: MonotonicRawTime) {
+ self.last_off_body.store(last_off_body.0, Ordering::Relaxed)
+ }
+ /// Return how many auth tokens are currently tracked.
+ pub fn auth_tokens_len(&self) -> usize {
+ self.auth_tokens.read().unwrap().len()
+ }
+ #[cfg(test)]
+ /// For testing, return all auth tokens currently tracked.
+ pub fn get_all_auth_token_entries(&self) -> Vec<AuthTokenEntry> {
+ self.auth_tokens.read().unwrap().iter().cloned().map(|x| x.0).collect()
+ }
+}
diff --git a/keystore2/src/db_utils.rs b/keystore2/src/database/utils.rs
similarity index 100%
rename from keystore2/src/db_utils.rs
rename to keystore2/src/database/utils.rs
diff --git a/keystore2/src/database/versioning.rs b/keystore2/src/database/versioning.rs
new file mode 100644
index 0000000..e3a95c8
--- /dev/null
+++ b/keystore2/src/database/versioning.rs
@@ -0,0 +1,379 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use anyhow::{anyhow, Context, Result};
+use rusqlite::{params, OptionalExtension, Transaction, NO_PARAMS};
+
+pub fn create_or_get_version(tx: &Transaction, current_version: u32) -> Result<u32> {
+ tx.execute(
+ "CREATE TABLE IF NOT EXISTS persistent.version (
+ id INTEGER PRIMARY KEY,
+ version INTEGER);",
+ NO_PARAMS,
+ )
+ .context("In create_or_get_version: Failed to create version table.")?;
+
+ let version = tx
+ .query_row("SELECT version FROM persistent.version WHERE id = 0;", NO_PARAMS, |row| {
+ row.get(0)
+ })
+ .optional()
+ .context("In create_or_get_version: Failed to read version.")?;
+
+ let version = if let Some(version) = version {
+ version
+ } else {
+ // If no version table existed it could mean one of two things:
+ // 1) This database is completely new. In this case the version has to be set
+ // to the current version and the current version which also needs to be
+ // returned.
+ // 2) The database predates db versioning. In this case the version needs to be
+ // set to 0, and 0 needs to be returned.
+ let version = if tx
+ .query_row(
+ "SELECT name FROM persistent.sqlite_master
+ WHERE type = 'table' AND name = 'keyentry';",
+ NO_PARAMS,
+ |_| Ok(()),
+ )
+ .optional()
+ .context("In create_or_get_version: Failed to check for keyentry table.")?
+ .is_none()
+ {
+ current_version
+ } else {
+ 0
+ };
+
+ tx.execute("INSERT INTO persistent.version (id, version) VALUES(0, ?);", params![version])
+ .context("In create_or_get_version: Failed to insert initial version.")?;
+ version
+ };
+ Ok(version)
+}
+
+pub fn update_version(tx: &Transaction, new_version: u32) -> Result<()> {
+ let updated = tx
+ .execute("UPDATE persistent.version SET version = ? WHERE id = 0;", params![new_version])
+ .context("In update_version: Failed to update row.")?;
+ if updated == 1 {
+ Ok(())
+ } else {
+ Err(anyhow!("In update_version: No rows were updated."))
+ }
+}
+
+pub fn upgrade_database<F>(tx: &Transaction, current_version: u32, upgraders: &[F]) -> Result<()>
+where
+ F: Fn(&Transaction) -> Result<u32> + 'static,
+{
+ if upgraders.len() < current_version as usize {
+ return Err(anyhow!("In upgrade_database: Insufficient upgraders provided."));
+ }
+ let mut db_version = create_or_get_version(tx, current_version)
+ .context("In upgrade_database: Failed to get database version.")?;
+ while db_version < current_version {
+ db_version = upgraders[db_version as usize](tx).with_context(|| {
+ format!("In upgrade_database: Trying to upgrade from db version {}.", db_version)
+ })?;
+ }
+ update_version(tx, db_version).context("In upgrade_database.")
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use rusqlite::{Connection, TransactionBehavior, NO_PARAMS};
+
+ #[test]
+ fn upgrade_database_test() {
+ let mut conn = Connection::open_in_memory().unwrap();
+ conn.execute("ATTACH DATABASE 'file::memory:' as persistent;", NO_PARAMS).unwrap();
+
+ let upgraders: Vec<_> = (0..30_u32)
+ .map(move |i| {
+ move |tx: &Transaction| {
+ tx.execute(
+ "INSERT INTO persistent.test (test_field) VALUES(?);",
+ params![i + 1],
+ )
+ .with_context(|| format!("In upgrade_from_{}_to_{}.", i, i + 1))?;
+ Ok(i + 1)
+ }
+ })
+ .collect();
+
+ for legacy in &[false, true] {
+ if *legacy {
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS persistent.keyentry (
+ id INTEGER UNIQUE,
+ key_type INTEGER,
+ domain INTEGER,
+ namespace INTEGER,
+ alias BLOB,
+ state INTEGER,
+ km_uuid BLOB);",
+ NO_PARAMS,
+ )
+ .unwrap();
+ }
+ for from in 1..29 {
+ for to in from..30 {
+ conn.execute("DROP TABLE IF EXISTS persistent.version;", NO_PARAMS).unwrap();
+ conn.execute("DROP TABLE IF EXISTS persistent.test;", NO_PARAMS).unwrap();
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS persistent.test (
+ id INTEGER PRIMARY KEY,
+ test_field INTEGER);",
+ NO_PARAMS,
+ )
+ .unwrap();
+
+ {
+ let tx =
+ conn.transaction_with_behavior(TransactionBehavior::Immediate).unwrap();
+ create_or_get_version(&tx, from).unwrap();
+ tx.commit().unwrap();
+ }
+ {
+ let tx =
+ conn.transaction_with_behavior(TransactionBehavior::Immediate).unwrap();
+ upgrade_database(&tx, to, &upgraders).unwrap();
+ tx.commit().unwrap();
+ }
+
+ // In the legacy database case all upgraders starting from 0 have to run. So
+ // after the upgrade step, the expectations need to be adjusted.
+ let from = if *legacy { 0 } else { from };
+
+ // There must be exactly to - from rows.
+ assert_eq!(
+ to - from,
+ conn.query_row(
+ "SELECT COUNT(test_field) FROM persistent.test;",
+ NO_PARAMS,
+ |row| row.get(0)
+ )
+ .unwrap()
+ );
+ // Each row must have the correct relation between id and test_field. If this
+ // is not the case, the upgraders were not executed in the correct order.
+ assert_eq!(
+ to - from,
+ conn.query_row(
+ "SELECT COUNT(test_field) FROM persistent.test
+ WHERE id = test_field - ?;",
+ params![from],
+ |row| row.get(0)
+ )
+ .unwrap()
+ );
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn create_or_get_version_new_database() {
+ let mut conn = Connection::open_in_memory().unwrap();
+ conn.execute("ATTACH DATABASE 'file::memory:' as persistent;", NO_PARAMS).unwrap();
+ {
+ let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate).unwrap();
+ let version = create_or_get_version(&tx, 3).unwrap();
+ tx.commit().unwrap();
+ assert_eq!(version, 3);
+ }
+
+ // Was the version table created as expected?
+ assert_eq!(
+ Ok("version".to_owned()),
+ conn.query_row(
+ "SELECT name FROM persistent.sqlite_master
+ WHERE type = 'table' AND name = 'version';",
+ NO_PARAMS,
+ |row| row.get(0),
+ )
+ );
+
+ // There is exactly one row in the version table.
+ assert_eq!(
+ Ok(1),
+ conn.query_row("SELECT COUNT(id) from persistent.version;", NO_PARAMS, |row| row
+ .get(0))
+ );
+
+ // The version must be set to 3
+ assert_eq!(
+ Ok(3),
+ conn.query_row(
+ "SELECT version from persistent.version WHERE id = 0;",
+ NO_PARAMS,
+ |row| row.get(0)
+ )
+ );
+
+ // Will subsequent calls to create_or_get_version still return the same version even
+ // if the current version changes.
+ {
+ let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate).unwrap();
+ let version = create_or_get_version(&tx, 5).unwrap();
+ tx.commit().unwrap();
+ assert_eq!(version, 3);
+ }
+
+ // There is still exactly one row in the version table.
+ assert_eq!(
+ Ok(1),
+ conn.query_row("SELECT COUNT(id) from persistent.version;", NO_PARAMS, |row| row
+ .get(0))
+ );
+
+ // Bump the version.
+ {
+ let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate).unwrap();
+ update_version(&tx, 5).unwrap();
+ tx.commit().unwrap();
+ }
+
+ // Now the version should have changed.
+ {
+ let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate).unwrap();
+ let version = create_or_get_version(&tx, 7).unwrap();
+ tx.commit().unwrap();
+ assert_eq!(version, 5);
+ }
+
+ // There is still exactly one row in the version table.
+ assert_eq!(
+ Ok(1),
+ conn.query_row("SELECT COUNT(id) from persistent.version;", NO_PARAMS, |row| row
+ .get(0))
+ );
+
+ // The version must be set to 5
+ assert_eq!(
+ Ok(5),
+ conn.query_row(
+ "SELECT version from persistent.version WHERE id = 0;",
+ NO_PARAMS,
+ |row| row.get(0)
+ )
+ );
+ }
+
+ #[test]
+ fn create_or_get_version_legacy_database() {
+ let mut conn = Connection::open_in_memory().unwrap();
+ conn.execute("ATTACH DATABASE 'file::memory:' as persistent;", NO_PARAMS).unwrap();
+ // A legacy (version 0) database is detected if the keyentry table exists but no
+ // version table.
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS persistent.keyentry (
+ id INTEGER UNIQUE,
+ key_type INTEGER,
+ domain INTEGER,
+ namespace INTEGER,
+ alias BLOB,
+ state INTEGER,
+ km_uuid BLOB);",
+ NO_PARAMS,
+ )
+ .unwrap();
+
+ {
+ let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate).unwrap();
+ let version = create_or_get_version(&tx, 3).unwrap();
+ tx.commit().unwrap();
+ // In the legacy case, version 0 must be returned.
+ assert_eq!(version, 0);
+ }
+
+ // Was the version table created as expected?
+ assert_eq!(
+ Ok("version".to_owned()),
+ conn.query_row(
+ "SELECT name FROM persistent.sqlite_master
+ WHERE type = 'table' AND name = 'version';",
+ NO_PARAMS,
+ |row| row.get(0),
+ )
+ );
+
+ // There is exactly one row in the version table.
+ assert_eq!(
+ Ok(1),
+ conn.query_row("SELECT COUNT(id) from persistent.version;", NO_PARAMS, |row| row
+ .get(0))
+ );
+
+ // The version must be set to 0
+ assert_eq!(
+ Ok(0),
+ conn.query_row(
+ "SELECT version from persistent.version WHERE id = 0;",
+ NO_PARAMS,
+ |row| row.get(0)
+ )
+ );
+
+ // Will subsequent calls to create_or_get_version still return the same version even
+ // if the current version changes.
+ {
+ let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate).unwrap();
+ let version = create_or_get_version(&tx, 5).unwrap();
+ tx.commit().unwrap();
+ assert_eq!(version, 0);
+ }
+
+ // There is still exactly one row in the version table.
+ assert_eq!(
+ Ok(1),
+ conn.query_row("SELECT COUNT(id) from persistent.version;", NO_PARAMS, |row| row
+ .get(0))
+ );
+
+ // Bump the version.
+ {
+ let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate).unwrap();
+ update_version(&tx, 5).unwrap();
+ tx.commit().unwrap();
+ }
+
+ // Now the version should have changed.
+ {
+ let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate).unwrap();
+ let version = create_or_get_version(&tx, 7).unwrap();
+ tx.commit().unwrap();
+ assert_eq!(version, 5);
+ }
+
+ // There is still exactly one row in the version table.
+ assert_eq!(
+ Ok(1),
+ conn.query_row("SELECT COUNT(id) from persistent.version;", NO_PARAMS, |row| row
+ .get(0))
+ );
+
+ // The version must be set to 5
+ assert_eq!(
+ Ok(5),
+ conn.query_row(
+ "SELECT version from persistent.version WHERE id = 0;",
+ NO_PARAMS,
+ |row| row.get(0)
+ )
+ );
+ }
+}
diff --git a/keystore2/src/enforcements.rs b/keystore2/src/enforcements.rs
index 3f003be..29a3f0b 100644
--- a/keystore2/src/enforcements.rs
+++ b/keystore2/src/enforcements.rs
@@ -14,11 +14,14 @@
//! This is the Keystore 2.0 Enforcements module.
// TODO: more description to follow.
-use crate::database::{AuthTokenEntry, MonotonicRawTime};
use crate::error::{map_binder_status, Error, ErrorCode};
use crate::globals::{get_timestamp_service, ASYNC_TASK, DB, ENFORCEMENTS};
use crate::key_parameter::{KeyParameter, KeyParameterValue};
use crate::{authorization::Error as AuthzError, super_key::SuperEncryptionType};
+use crate::{
+ database::{AuthTokenEntry, MonotonicRawTime},
+ globals::SUPER_KEY,
+};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
Algorithm::Algorithm, ErrorCode::ErrorCode as Ec, HardwareAuthToken::HardwareAuthToken,
HardwareAuthenticatorType::HardwareAuthenticatorType,
@@ -34,11 +37,9 @@
};
use android_system_keystore2::binder::Strong;
use anyhow::{Context, Result};
-use keystore2_system_property::PropertyWatcher;
use std::{
collections::{HashMap, HashSet},
sync::{
- atomic::{AtomicI32, Ordering},
mpsc::{channel, Receiver, Sender, TryRecvError},
Arc, Mutex, Weak,
},
@@ -60,47 +61,54 @@
state: AuthRequestState,
/// This need to be set to Some to fulfill a AuthRequestState::OpAuth or
/// AuthRequestState::TimeStampedOpAuth.
- hat: Option<HardwareAuthToken>,
+ hat: Mutex<Option<HardwareAuthToken>>,
}
+unsafe impl Sync for AuthRequest {}
+
impl AuthRequest {
- fn op_auth() -> Arc<Mutex<Self>> {
- Arc::new(Mutex::new(Self { state: AuthRequestState::OpAuth, hat: None }))
+ fn op_auth() -> Arc<Self> {
+ Arc::new(Self { state: AuthRequestState::OpAuth, hat: Mutex::new(None) })
}
- fn timestamped_op_auth(receiver: Receiver<Result<TimeStampToken, Error>>) -> Arc<Mutex<Self>> {
- Arc::new(Mutex::new(Self {
+ fn timestamped_op_auth(receiver: Receiver<Result<TimeStampToken, Error>>) -> Arc<Self> {
+ Arc::new(Self {
state: AuthRequestState::TimeStampedOpAuth(receiver),
- hat: None,
- }))
+ hat: Mutex::new(None),
+ })
}
fn timestamp(
hat: HardwareAuthToken,
receiver: Receiver<Result<TimeStampToken, Error>>,
- ) -> Arc<Mutex<Self>> {
- Arc::new(Mutex::new(Self { state: AuthRequestState::TimeStamp(receiver), hat: Some(hat) }))
+ ) -> Arc<Self> {
+ Arc::new(Self { state: AuthRequestState::TimeStamp(receiver), hat: Mutex::new(Some(hat)) })
}
- fn add_auth_token(&mut self, hat: HardwareAuthToken) {
- self.hat = Some(hat)
+ fn add_auth_token(&self, hat: HardwareAuthToken) {
+ *self.hat.lock().unwrap() = Some(hat)
}
- fn get_auth_tokens(&mut self) -> Result<(HardwareAuthToken, Option<TimeStampToken>)> {
- match (&self.state, self.hat.is_some()) {
- (AuthRequestState::OpAuth, true) => Ok((self.hat.take().unwrap(), None)),
- (AuthRequestState::TimeStampedOpAuth(recv), true)
- | (AuthRequestState::TimeStamp(recv), true) => {
+ fn get_auth_tokens(&self) -> Result<(HardwareAuthToken, Option<TimeStampToken>)> {
+ let hat = self
+ .hat
+ .lock()
+ .unwrap()
+ .take()
+ .ok_or(Error::Km(ErrorCode::KEY_USER_NOT_AUTHENTICATED))
+ .context("In get_auth_tokens: No operation auth token received.")?;
+
+ let tst = match &self.state {
+ AuthRequestState::TimeStampedOpAuth(recv) | AuthRequestState::TimeStamp(recv) => {
let result = recv.recv().context("In get_auth_tokens: Sender disconnected.")?;
- let tst = result.context(concat!(
+ Some(result.context(concat!(
"In get_auth_tokens: Worker responded with error ",
"from generating timestamp token."
- ))?;
- Ok((self.hat.take().unwrap(), Some(tst)))
+ ))?)
}
- (_, false) => Err(Error::Km(ErrorCode::KEY_USER_NOT_AUTHENTICATED))
- .context("In get_auth_tokens: No operation auth token received."),
- }
+ AuthRequestState::OpAuth => None,
+ };
+ Ok((hat, tst))
}
}
@@ -126,7 +134,7 @@
/// We block on timestamp tokens, because we can always make progress on these requests.
/// The per-op auth tokens might never come, which means we fail if the client calls
/// update or finish before we got a per-op auth token.
- Waiting(Arc<Mutex<AuthRequest>>),
+ Waiting(Arc<AuthRequest>),
/// In this state we have gotten all of the required tokens, we just cache them to
/// be used when the operation progresses.
Token(HardwareAuthToken, Option<TimeStampToken>),
@@ -168,9 +176,15 @@
const CLEANUP_PERIOD: u8 = 25;
pub fn add_auth_token(&self, hat: HardwareAuthToken) {
- let mut map = self.map_and_cleanup_counter.lock().unwrap();
- let (ref mut map, _) = *map;
- if let Some((_, recv)) = map.remove_entry(&hat.challenge) {
+ let recv = {
+ // Limit the scope of the mutex guard, so that it is not held while the auth token is
+ // added.
+ let mut map = self.map_and_cleanup_counter.lock().unwrap();
+ let (ref mut map, _) = *map;
+ map.remove_entry(&hat.challenge)
+ };
+
+ if let Some((_, recv)) = recv {
recv.add_auth_token(hat);
}
}
@@ -190,7 +204,7 @@
}
#[derive(Debug)]
-struct TokenReceiver(Weak<Mutex<AuthRequest>>);
+struct TokenReceiver(Weak<AuthRequest>);
impl TokenReceiver {
fn is_obsolete(&self) -> bool {
@@ -199,8 +213,7 @@
fn add_auth_token(&self, hat: HardwareAuthToken) {
if let Some(state_arc) = self.0.upgrade() {
- let mut state = state_arc.lock().unwrap();
- state.add_auth_token(hat);
+ state_arc.add_auth_token(hat);
}
}
}
@@ -325,8 +338,7 @@
/// tokens into the DeferredAuthState::Token state for future use.
fn get_auth_tokens(&mut self) -> Result<(Option<HardwareAuthToken>, Option<TimeStampToken>)> {
let deferred_tokens = if let DeferredAuthState::Waiting(ref auth_request) = self.state {
- let mut state = auth_request.lock().unwrap();
- Some(state.get_auth_tokens().context("In AuthInfo::get_auth_tokens.")?)
+ Some(auth_request.get_auth_tokens().context("In AuthInfo::get_auth_tokens.")?)
} else {
None
};
@@ -369,8 +381,6 @@
/// The enforcement module will try to get a confirmation token from this channel whenever
/// an operation that requires confirmation finishes.
confirmation_token_receiver: Arc<Mutex<Option<Receiver<Vec<u8>>>>>,
- /// Highest boot level seen in keystore.boot_level; used to enforce MAX_BOOT_LEVEL tag.
- boot_level: AtomicI32,
}
impl Enforcements {
@@ -596,7 +606,7 @@
}
if let Some(level) = max_boot_level {
- if level < self.boot_level.load(Ordering::SeqCst) {
+ if !SUPER_KEY.level_accessible(level) {
return Err(Error::Km(Ec::BOOT_LEVEL_EXCEEDED))
.context("In authorize_create: boot level is too late.");
}
@@ -628,8 +638,7 @@
} else {
unlocked_device_required
}
- })
- .context("In authorize_create: Trying to get required auth token.")?;
+ });
Some(
hat_and_last_off_body
.ok_or(Error::Km(Ec::KEY_USER_NOT_AUTHENTICATED))
@@ -672,9 +681,10 @@
// So the HAT cannot be presented on create. So on update/finish we present both
// an per-op-bound auth token and a timestamp token.
(Some(_), true, true) => (None, DeferredAuthState::TimeStampedOpAuthRequired),
- (Some(hat), true, false) => {
- (None, DeferredAuthState::TimeStampRequired(hat.take_auth_token()))
- }
+ (Some(hat), true, false) => (
+ Some(hat.auth_token().clone()),
+ DeferredAuthState::TimeStampRequired(hat.take_auth_token()),
+ ),
(Some(hat), false, true) => {
(Some(hat.take_auth_token()), DeferredAuthState::OpAuthRequired)
}
@@ -689,15 +699,11 @@
})
}
- fn find_auth_token<F>(p: F) -> Result<Option<(AuthTokenEntry, MonotonicRawTime)>>
+ fn find_auth_token<F>(p: F) -> Option<(AuthTokenEntry, MonotonicRawTime)>
where
F: Fn(&AuthTokenEntry) -> bool,
{
- DB.with(|db| {
- let mut db = db.borrow_mut();
- db.find_auth_token_entry(p).context("Trying to find auth token.")
- })
- .context("In find_auth_token.")
+ DB.with(|db| db.borrow().find_auth_token_entry(p))
}
/// Checks if the time now since epoch is greater than (or equal, if is_given_time_inclusive is
@@ -741,11 +747,9 @@
/// Add this auth token to the database.
/// Then present the auth token to the op auth map. If an operation is waiting for this
/// auth token this fulfills the request and removes the receiver from the map.
- pub fn add_auth_token(&self, hat: HardwareAuthToken) -> Result<()> {
- DB.with(|db| db.borrow_mut().insert_auth_token(&hat)).context("In add_auth_token.")?;
-
+ pub fn add_auth_token(&self, hat: HardwareAuthToken) {
+ DB.with(|db| db.borrow_mut().insert_auth_token(&hat));
self.op_auth_map.add_auth_token(hat);
- Ok(())
}
/// This allows adding an entry to the op_auth_map, indexed by the operation challenge.
@@ -762,27 +766,35 @@
key_parameters: &[KeyParameter],
flags: Option<i32>,
) -> SuperEncryptionType {
- if *domain != Domain::APP {
- return SuperEncryptionType::None;
- }
if let Some(flags) = flags {
if (flags & KEY_FLAG_AUTH_BOUND_WITHOUT_CRYPTOGRAPHIC_LSKF_BINDING) != 0 {
return SuperEncryptionType::None;
}
}
- if key_parameters
- .iter()
- .any(|kp| matches!(kp.key_parameter_value(), KeyParameterValue::UnlockedDeviceRequired))
- {
- return SuperEncryptionType::ScreenLockBound;
+ // Each answer has a priority, numerically largest priority wins.
+ struct Candidate {
+ priority: u32,
+ enc_type: SuperEncryptionType,
}
- if key_parameters
- .iter()
- .any(|kp| matches!(kp.key_parameter_value(), KeyParameterValue::UserSecureID(_)))
- {
- return SuperEncryptionType::LskfBound;
+ let mut result = Candidate { priority: 0, enc_type: SuperEncryptionType::None };
+ for kp in key_parameters {
+ let t = match kp.key_parameter_value() {
+ KeyParameterValue::MaxBootLevel(level) => {
+ Candidate { priority: 3, enc_type: SuperEncryptionType::BootLevel(*level) }
+ }
+ KeyParameterValue::UnlockedDeviceRequired if *domain == Domain::APP => {
+ Candidate { priority: 2, enc_type: SuperEncryptionType::ScreenLockBound }
+ }
+ KeyParameterValue::UserSecureID(_) if *domain == Domain::APP => {
+ Candidate { priority: 1, enc_type: SuperEncryptionType::LskfBound }
+ }
+ _ => Candidate { priority: 0, enc_type: SuperEncryptionType::None },
+ };
+ if t.priority > result.priority {
+ result = t;
+ }
}
- SuperEncryptionType::None
+ result.enc_type
}
/// Finds a matching auth token along with a timestamp token.
@@ -805,28 +817,22 @@
// Filter the matching auth tokens by challenge
let result = Self::find_auth_token(|hat: &AuthTokenEntry| {
(challenge == hat.challenge()) && hat.satisfies(&sids, auth_type)
- })
- .context(
- "In get_auth_tokens: Failed to get a matching auth token filtered by challenge.",
- )?;
+ });
let auth_token = if let Some((auth_token_entry, _)) = result {
auth_token_entry.take_auth_token()
} else {
// Filter the matching auth tokens by age.
if auth_token_max_age_millis != 0 {
- let now_in_millis = MonotonicRawTime::now().milli_seconds();
+ let now_in_millis = MonotonicRawTime::now();
let result = Self::find_auth_token(|auth_token_entry: &AuthTokenEntry| {
let token_valid = now_in_millis
- .checked_sub(auth_token_entry.time_received().milli_seconds())
+ .checked_sub(&auth_token_entry.time_received())
.map_or(false, |token_age_in_millis| {
- auth_token_max_age_millis > token_age_in_millis
+ auth_token_max_age_millis > token_age_in_millis.milliseconds()
});
token_valid && auth_token_entry.satisfies(&sids, auth_type)
- })
- .context(
- "In get_auth_tokens: Failed to get a matching auth token filtered by age.",
- )?;
+ });
if let Some((auth_token_entry, _)) = result {
auth_token_entry.take_auth_token()
@@ -844,35 +850,6 @@
.context("In get_auth_tokens. Error in getting timestamp token.")?;
Ok((auth_token, tst))
}
-
- /// Watch the `keystore.boot_level` system property, and keep self.boot_level up to date.
- /// Blocks waiting for system property changes, so must be run in its own thread.
- pub fn watch_boot_level(&self) -> Result<()> {
- let mut w = PropertyWatcher::new("keystore.boot_level")?;
- loop {
- fn parse_value(_name: &str, value: &str) -> Result<Option<i32>> {
- Ok(if value == "end" { None } else { Some(value.parse::<i32>()?) })
- }
- match w.read(parse_value)? {
- Some(level) => {
- let old = self.boot_level.fetch_max(level, Ordering::SeqCst);
- log::info!(
- "Read keystore.boot_level: {}; boot level {} -> {}",
- level,
- old,
- std::cmp::max(old, level)
- );
- }
- None => {
- log::info!("keystore.boot_level is `end`, finishing.");
- self.boot_level.fetch_max(i32::MAX, Ordering::SeqCst);
- break;
- }
- }
- w.wait()?;
- }
- Ok(())
- }
}
// TODO: Add tests to enforcement module (b/175578618).
diff --git a/keystore2/src/error.rs b/keystore2/src/error.rs
index 465dcfa..f969cb6 100644
--- a/keystore2/src/error.rs
+++ b/keystore2/src/error.rs
@@ -30,16 +30,13 @@
//! Keystore functions should use `anyhow::Result` to return error conditions, and
//! context should be added every time an error is forwarded.
-use std::cmp::PartialEq;
-
pub use android_hardware_security_keymint::aidl::android::hardware::security::keymint::ErrorCode::ErrorCode;
pub use android_system_keystore2::aidl::android::system::keystore2::ResponseCode::ResponseCode;
-
-use keystore2_selinux as selinux;
-
use android_system_keystore2::binder::{
ExceptionCode, Result as BinderResult, Status as BinderStatus, StatusCode,
};
+use keystore2_selinux as selinux;
+use std::cmp::PartialEq;
/// This is the main Keystore error type. It wraps the Keystore `ResponseCode` generated
/// from AIDL in the `Rc` variant and Keymint `ErrorCode` in the Km variant.
@@ -140,7 +137,7 @@
/// This function should be used by Keystore service calls to translate error conditions
/// into service specific exceptions.
///
-/// All error conditions get logged by this function.
+/// All error conditions get logged by this function, except for KEY_NOT_FOUND error.
///
/// All `Error::Rc(x)` and `Error::Km(x)` variants get mapped onto a service specific error
/// code of x. This is possible because KeyMint `ErrorCode` errors are always negative and
@@ -174,7 +171,13 @@
map_err_with(
result,
|e| {
- log::error!("{:?}", e);
+ // Make the key not found errors silent.
+ if !matches!(
+ e.root_cause().downcast_ref::<Error>(),
+ Some(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ ) {
+ log::error!("{:?}", e);
+ }
e
},
handle_ok,
diff --git a/keystore2/src/gc.rs b/keystore2/src/gc.rs
index 6cc0f27..2010c79 100644
--- a/keystore2/src/gc.rs
+++ b/keystore2/src/gc.rs
@@ -20,22 +20,28 @@
use crate::{
async_task,
- database::{KeystoreDB, Uuid},
+ database::{BlobMetaData, KeystoreDB, Uuid},
super_key::SuperKeyManager,
};
use anyhow::{Context, Result};
use async_task::AsyncTask;
-use std::sync::Arc;
+use std::sync::{
+ atomic::{AtomicU8, Ordering},
+ Arc,
+};
pub struct Gc {
async_task: Arc<AsyncTask>,
+ notified: Arc<AtomicU8>,
}
impl Gc {
/// Creates a garbage collector using the given async_task.
- /// The garbage collector needs a function to invalidate key blobs and a database connection.
- /// Both are obtained from the init function. The function is only called if this is first
- /// time a garbage collector was initialized with the given AsyncTask instance.
+ /// The garbage collector needs a function to invalidate key blobs, a database connection,
+ /// and a reference to the `SuperKeyManager`. They are obtained from the init function.
+ /// The function is only called if this is first time a garbage collector was initialized
+ /// with the given AsyncTask instance.
+ /// Note: It is a logical error to initialize different Gc instances with the same `AsyncTask`.
pub fn new_init_with<F>(async_task: Arc<AsyncTask>, init: F) -> Self
where
F: FnOnce() -> (
@@ -46,34 +52,43 @@
+ 'static,
{
let weak_at = Arc::downgrade(&async_task);
+ let notified = Arc::new(AtomicU8::new(0));
+ let notified_clone = notified.clone();
// Initialize the task's shelf.
async_task.queue_hi(move |shelf| {
let (invalidate_key, db, super_key) = init();
+ let notified = notified_clone;
shelf.get_or_put_with(|| GcInternal {
- blob_id_to_delete: None,
+ deleted_blob_ids: vec![],
+ superseded_blobs: vec![],
invalidate_key,
db,
async_task: weak_at,
super_key,
+ notified,
});
});
- Self { async_task }
+ Self { async_task, notified }
}
/// Notifies the key garbage collector to iterate through orphaned and superseded blobs and
/// attempts their deletion. We only process one key at a time and then schedule another
/// attempt by queueing it in the async_task (low priority) queue.
pub fn notify_gc(&self) {
- self.async_task.queue_lo(|shelf| shelf.get_downcast_mut::<GcInternal>().unwrap().step())
+ if let Ok(0) = self.notified.compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed) {
+ self.async_task.queue_lo(|shelf| shelf.get_downcast_mut::<GcInternal>().unwrap().step())
+ }
}
}
struct GcInternal {
- blob_id_to_delete: Option<i64>,
+ deleted_blob_ids: Vec<i64>,
+ superseded_blobs: Vec<(i64, Vec<u8>, BlobMetaData)>,
invalidate_key: Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
db: KeystoreDB,
async_task: std::sync::Weak<AsyncTask>,
super_key: Arc<SuperKeyManager>,
+ notified: Arc<AtomicU8>,
}
impl GcInternal {
@@ -81,16 +96,23 @@
/// We process one key at a time, because deleting a key is a time consuming process which
/// may involve calling into the KeyMint backend and we don't want to hog neither the backend
/// nor the database for extended periods of time.
+ /// To limit the number of database transactions, which are also expensive and competing
+ /// with threads on the critical path, deleted blobs are loaded in batches.
fn process_one_key(&mut self) -> Result<()> {
- if let Some((blob_id, blob, blob_metadata)) = self
- .db
- .handle_next_superseded_blob(self.blob_id_to_delete.take())
- .context("In process_one_key: Trying to handle superseded blob.")?
- {
- // Set the blob_id as the next to be deleted blob. So it will be
+ if self.superseded_blobs.is_empty() {
+ let blobs = self
+ .db
+ .handle_next_superseded_blobs(&self.deleted_blob_ids, 20)
+ .context("In process_one_key: Trying to handle superseded blob.")?;
+ self.deleted_blob_ids = vec![];
+ self.superseded_blobs = blobs;
+ }
+
+ if let Some((blob_id, blob, blob_metadata)) = self.superseded_blobs.pop() {
+ // Add the next blob_id to the deleted blob ids list. So it will be
// removed from the database regardless of whether the following
// succeeds or not.
- self.blob_id_to_delete = Some(blob_id);
+ self.deleted_blob_ids.push(blob_id);
// If the key has a km_uuid we try to get the corresponding device
// and delete the key, unwrapping if necessary and possible.
@@ -110,13 +132,20 @@
/// Processes one key and then schedules another attempt until it runs out of blobs to delete.
fn step(&mut self) {
+ self.notified.store(0, Ordering::Relaxed);
if let Err(e) = self.process_one_key() {
log::error!("Error trying to delete blob entry. {:?}", e);
}
// Schedule the next step. This gives high priority requests a chance to interleave.
- if self.blob_id_to_delete.is_some() {
+ if !self.deleted_blob_ids.is_empty() {
if let Some(at) = self.async_task.upgrade() {
- at.queue_lo(move |shelf| shelf.get_downcast_mut::<GcInternal>().unwrap().step());
+ if let Ok(0) =
+ self.notified.compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
+ {
+ at.queue_lo(move |shelf| {
+ shelf.get_downcast_mut::<GcInternal>().unwrap().step()
+ });
+ }
}
}
}
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 58142a4..89114a6 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -20,6 +20,7 @@
use crate::legacy_blob::LegacyBlobLoader;
use crate::legacy_migrator::LegacyMigrator;
use crate::super_key::SuperKeyManager;
+use crate::utils::watchdog as wd;
use crate::utils::Asp;
use crate::{async_task::AsyncTask, database::MonotonicRawTime};
use crate::{
@@ -38,11 +39,12 @@
use binder::FromIBinder;
use keystore2_vintf::get_aidl_instances;
use lazy_static::lazy_static;
-use std::sync::{Arc, Mutex};
+use std::sync::{Arc, Mutex, RwLock};
use std::{cell::RefCell, sync::Once};
use std::{collections::HashMap, path::Path, path::PathBuf};
static DB_INIT: Once = Once::new();
+static DB_SET_WAL_MODE: Once = Once::new();
/// Open a connection to the Keystore 2.0 database. This is called during the initialization of
/// the thread local DB field. It should never be called directly. The first time this is called
@@ -53,27 +55,19 @@
/// is run only once, as long as the ASYNC_TASK instance is the same. So only one additional
/// database connection is created for the garbage collector worker.
pub fn create_thread_local_db() -> KeystoreDB {
- let gc = Gc::new_init_with(ASYNC_TASK.clone(), || {
- (
- Box::new(|uuid, blob| {
- let km_dev: Strong<dyn IKeyMintDevice> =
- get_keymint_dev_by_uuid(uuid).map(|(dev, _)| dev)?.get_interface()?;
- map_km_error(km_dev.deleteKey(&*blob))
- .context("In invalidate key closure: Trying to invalidate key blob.")
- }),
- KeystoreDB::new(&DB_PATH.lock().expect("Could not get the database directory."), None)
- .expect("Failed to open database."),
- SUPER_KEY.clone(),
- )
+ let db_path = DB_PATH.read().expect("Could not get the database directory.");
+
+ DB_SET_WAL_MODE.call_once(|| {
+ log::info!("Setting Keystore 2.0 database to WAL mode first time since boot.");
+ KeystoreDB::set_wal_mode(&db_path)
+ .expect("In create_thread_local_db: Could not set WAL mode.");
});
- let mut db =
- KeystoreDB::new(&DB_PATH.lock().expect("Could not get the database directory."), Some(gc))
- .expect("Failed to open database.");
+ let mut db = KeystoreDB::new(&db_path, Some(GC.clone())).expect("Failed to open database.");
+
DB_INIT.call_once(|| {
log::info!("Touching Keystore 2.0 database for this first time since boot.");
- db.insert_last_off_body(MonotonicRawTime::now())
- .expect("Could not initialize database with last off body.");
+ db.insert_last_off_body(MonotonicRawTime::now());
log::info!("Calling cleanup leftovers.");
let n = db.cleanup_leftovers().expect("Failed to cleanup database on startup.");
if n != 0 {
@@ -151,7 +145,7 @@
lazy_static! {
/// The path where keystore stores all its keys.
- pub static ref DB_PATH: Mutex<PathBuf> = Mutex::new(
+ pub static ref DB_PATH: RwLock<PathBuf> = RwLock::new(
Path::new("/data/misc/keystore").to_path_buf());
/// Runtime database of unwrapped super keys.
pub static ref SUPER_KEY: Arc<SuperKeyManager> = Default::default();
@@ -169,10 +163,27 @@
/// LegacyBlobLoader is initialized and exists globally.
/// The same directory used by the database is used by the LegacyBlobLoader as well.
pub static ref LEGACY_BLOB_LOADER: Arc<LegacyBlobLoader> = Arc::new(LegacyBlobLoader::new(
- &DB_PATH.lock().expect("Could not get the database path for legacy blob loader.")));
+ &DB_PATH.read().expect("Could not get the database path for legacy blob loader.")));
/// Legacy migrator. Atomically migrates legacy blobs to the database.
pub static ref LEGACY_MIGRATOR: Arc<LegacyMigrator> =
- Arc::new(LegacyMigrator::new(ASYNC_TASK.clone()));
+ Arc::new(LegacyMigrator::new(Arc::new(Default::default())));
+ /// Background thread which handles logging via statsd and logd
+ pub static ref LOGS_HANDLER: Arc<AsyncTask> = Default::default();
+
+ static ref GC: Arc<Gc> = Arc::new(Gc::new_init_with(ASYNC_TASK.clone(), || {
+ (
+ Box::new(|uuid, blob| {
+ let km_dev: Strong<dyn IKeyMintDevice> =
+ get_keymint_dev_by_uuid(uuid).map(|(dev, _)| dev)?.get_interface()?;
+ let _wp = wd::watch_millis("In invalidate key closure: calling deleteKey", 500);
+ map_km_error(km_dev.deleteKey(&*blob))
+ .context("In invalidate key closure: Trying to invalidate key blob.")
+ }),
+ KeystoreDB::new(&DB_PATH.read().expect("Could not get the database directory."), None)
+ .expect("Failed to open database."),
+ SUPER_KEY.clone(),
+ )
+ }));
}
static KEYMINT_SERVICE_NAME: &str = "android.hardware.security.keymint.IKeyMintDevice";
@@ -225,8 +236,10 @@
.context("In connect_keymint: Trying to get Legacy wrapper.")
}?;
+ let wp = wd::watch_millis("In connect_keymint: calling getHardwareInfo()", 500);
let hw_info = map_km_error(keymint.getHardwareInfo())
.context("In connect_keymint: Failed to get hardware info.")?;
+ drop(wp);
Ok((Asp::new(keymint.as_binder()), hw_info))
}
diff --git a/keystore2/src/key_parameter.rs b/keystore2/src/key_parameter.rs
index 74a9b23..771d609 100644
--- a/keystore2/src/key_parameter.rs
+++ b/keystore2/src/key_parameter.rs
@@ -90,11 +90,9 @@
//! * The termination condition which has an empty in list.
//! * The public interface, which does not have @marker and calls itself with an empty out list.
-#![allow(clippy::from_over_into, clippy::needless_question_mark)]
-
use std::convert::TryInto;
-use crate::db_utils::SqlField;
+use crate::database::utils::SqlField;
use crate::error::Error as KeystoreError;
use crate::error::ResponseCode;
@@ -601,9 +599,9 @@
], [$($in)*]
}};
(@into $enum_name:ident, [$($out:tt)*], []) => {
- impl Into<KmKeyParameter> for $enum_name {
- fn into(self) -> KmKeyParameter {
- match self {
+ impl From<$enum_name> for KmKeyParameter {
+ fn from(x: $enum_name) -> Self {
+ match x {
$($out)*
}
}
@@ -827,6 +825,9 @@
/// When deleted, the key is guaranteed to be permanently deleted and unusable
#[key_param(tag = ROLLBACK_RESISTANCE, field = BoolValue)]
RollbackResistance,
+ /// The Key shall only be used during the early boot stage
+ #[key_param(tag = EARLY_BOOT_ONLY, field = BoolValue)]
+ EarlyBootOnly,
/// The date and time at which the key becomes active
#[key_param(tag = ACTIVE_DATETIME, field = DateTime)]
ActiveDateTime(i64),
@@ -1389,11 +1390,11 @@
db.prepare("SELECT tag, data, security_level FROM persistent.keyparameter")?;
let mut rows = stmt.query(NO_PARAMS)?;
let row = rows.next()?.unwrap();
- Ok(KeyParameter::new_from_sql(
+ KeyParameter::new_from_sql(
Tag(row.get(0)?),
&SqlField::new(1, row),
SecurityLevel(row.get(2)?),
- )?)
+ )
}
}
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index df7ba26..53461da 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -17,6 +17,7 @@
use keystore2::entropy;
use keystore2::globals::ENFORCEMENTS;
use keystore2::maintenance::Maintenance;
+use keystore2::metrics;
use keystore2::remote_provisioning::RemoteProvisioningService;
use keystore2::service::KeystoreService;
use keystore2::{apc::ApcManager, shared_secret_negotiation};
@@ -46,10 +47,6 @@
// Saying hi.
info!("Keystore2 is starting.");
- // Initialize the per boot database.
- let _keep_me_alive = keystore2::database::KeystoreDB::keep_perboot_db_alive()
- .expect("Failed to initialize the perboot database.");
-
let mut args = std::env::args();
args.next().expect("That's odd. How is there not even a first argument?");
@@ -59,7 +56,7 @@
// For the ground truth check the service startup rule for init (typically in keystore2.rc).
let id_rotation_state = if let Some(dir) = args.next() {
let db_path = Path::new(&dir);
- *keystore2::globals::DB_PATH.lock().expect("Could not lock DB_PATH.") =
+ *keystore2::globals::DB_PATH.write().expect("Could not lock DB_PATH.") =
db_path.to_path_buf();
IdRotationState::new(&db_path)
} else {
@@ -70,13 +67,6 @@
ENFORCEMENTS.install_confirmation_token_receiver(confirmation_token_receiver);
- info!("Starting boot level watcher.");
- std::thread::spawn(|| {
- keystore2::globals::ENFORCEMENTS
- .watch_boot_level()
- .unwrap_or_else(|e| error!("watch_boot_level failed: {}", e));
- });
-
entropy::register_feeder();
shared_secret_negotiation::perform_shared_secret_negotiation();
@@ -131,7 +121,7 @@
}
let vpnprofilestore = VpnProfileStore::new_native_binder(
- &keystore2::globals::DB_PATH.lock().expect("Could not get DB_PATH."),
+ &keystore2::globals::DB_PATH.read().expect("Could not get DB_PATH."),
);
binder::add_service(VPNPROFILESTORE_SERVICE_NAME, vpnprofilestore.as_binder()).unwrap_or_else(
|e| {
@@ -142,6 +132,13 @@
},
);
+ std::thread::spawn(|| {
+ match metrics::register_pull_metrics_callbacks() {
+ Err(e) => error!("register_pull_metrics_callbacks failed: {:?}.", e),
+ _ => info!("Pull metrics callbacks successfully registered."),
+ };
+ });
+
info!("Successfully registered Keystore 2.0 service.");
info!("Joining thread pool now.");
diff --git a/keystore2/src/km_compat/km_compat.cpp b/keystore2/src/km_compat/km_compat.cpp
index b824aa8..f6f8bfe 100644
--- a/keystore2/src/km_compat/km_compat.cpp
+++ b/keystore2/src/km_compat/km_compat.cpp
@@ -108,7 +108,6 @@
case Tag::EC_CURVE:
case Tag::RSA_PUBLIC_EXPONENT:
case Tag::RSA_OAEP_MGF_DIGEST:
- case Tag::BLOB_USAGE_REQUIREMENTS:
case Tag::BOOTLOADER_ONLY:
case Tag::ROLLBACK_RESISTANCE:
case Tag::EARLY_BOOT_ONLY:
@@ -189,7 +188,7 @@
return std::make_pair(true, isSoftKeyMint);
}
-// Returns the prefix from a blob. If there's no prefix, returns the passed-in blob.
+// Removes the prefix from a blob. If there's no prefix, returns the passed-in blob.
//
std::vector<uint8_t> prefixedKeyBlobRemovePrefix(const std::vector<uint8_t>& prefixedBlob) {
auto parsed = prefixedKeyBlobParsePrefix(prefixedBlob);
@@ -209,6 +208,21 @@
return parsed.second;
}
+// Inspects the given blob for prefixes.
+// Returns the blob stripped of the prefix if present. The boolean argument is true if the blob was
+// a software blob.
+std::pair<std::vector<uint8_t>, bool>
+dissectPrefixedKeyBlob(const std::vector<uint8_t>& prefixedBlob) {
+ auto [hasPrefix, isSoftware] = prefixedKeyBlobParsePrefix(prefixedBlob);
+ if (!hasPrefix) {
+ // Not actually prefixed, blob was probably persisted to disk prior to the
+ // prefixing code being introduced.
+ return {prefixedBlob, false};
+ }
+ return {std::vector<uint8_t>(prefixedBlob.begin() + kKeyBlobPrefixSize, prefixedBlob.end()),
+ isSoftware};
+}
+
/*
* Returns true if the parameter is not understood by KM 4.1 and older but can be enforced by
* Keystore. These parameters need to be included in the returned KeyCharacteristics, but will not
@@ -290,7 +304,14 @@
static std::vector<KeyCharacteristics>
processLegacyCharacteristics(KeyMintSecurityLevel securityLevel,
const std::vector<KeyParameter>& genParams,
- const V4_0_KeyCharacteristics& legacyKc) {
+ const V4_0_KeyCharacteristics& legacyKc, bool hwEnforcedOnly = false) {
+
+ KeyCharacteristics hwEnforced{securityLevel,
+ convertKeyParametersFromLegacy(legacyKc.hardwareEnforced)};
+
+ if (hwEnforcedOnly) {
+ return {hwEnforced};
+ }
KeyCharacteristics keystoreEnforced{KeyMintSecurityLevel::KEYSTORE,
convertKeyParametersFromLegacy(legacyKc.softwareEnforced)};
@@ -309,8 +330,6 @@
return {keystoreEnforced};
}
- KeyCharacteristics hwEnforced{securityLevel,
- convertKeyParametersFromLegacy(legacyKc.hardwareEnforced)};
return {hwEnforced, keystoreEnforced};
}
@@ -589,7 +608,7 @@
ScopedAStatus KeyMintDevice::begin(KeyPurpose in_inPurpose,
const std::vector<uint8_t>& prefixedKeyBlob,
const std::vector<KeyParameter>& in_inParams,
- const HardwareAuthToken& in_inAuthToken,
+ const std::optional<HardwareAuthToken>& in_inAuthToken,
BeginResult* _aidl_return) {
if (!mOperationSlots.claimSlot()) {
return convertErrorCode(V4_0_ErrorCode::TOO_MANY_OPERATIONS);
@@ -688,9 +707,35 @@
return convertErrorCode(km_error);
}
-ScopedAStatus KeyMintDevice::performOperation(const std::vector<uint8_t>& /* request */,
- std::vector<uint8_t>* /* response */) {
- return convertErrorCode(KMV1::ErrorCode::UNIMPLEMENTED);
+ScopedAStatus KeyMintDevice::getKeyCharacteristics(
+ const std::vector<uint8_t>& prefixedKeyBlob, const std::vector<uint8_t>& appId,
+ const std::vector<uint8_t>& appData, std::vector<KeyCharacteristics>* keyCharacteristics) {
+ auto [strippedKeyBlob, isSoftware] = dissectPrefixedKeyBlob(prefixedKeyBlob);
+ if (isSoftware) {
+ return softKeyMintDevice_->getKeyCharacteristics(strippedKeyBlob, appId, appData,
+ keyCharacteristics);
+ } else {
+ KMV1::ErrorCode km_error;
+ auto ret = mDevice->getKeyCharacteristics(
+ strippedKeyBlob, appId, appData,
+ [&](V4_0_ErrorCode errorCode, const V4_0_KeyCharacteristics& v40KeyCharacteristics) {
+ km_error = convert(errorCode);
+ *keyCharacteristics =
+ processLegacyCharacteristics(securityLevel_, {} /* getParams */,
+ v40KeyCharacteristics, true /* hwEnforcedOnly */);
+ });
+
+ if (!ret.isOk()) {
+ LOG(ERROR) << __func__ << " getKeyCharacteristics failed: " << ret.description();
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
+ }
+ if (km_error != KMV1::ErrorCode::OK) {
+ LOG(ERROR) << __func__
+ << " getKeyCharacteristics failed with code: " << toString(km_error);
+ }
+
+ return convertErrorCode(km_error);
+ }
}
ScopedAStatus KeyMintOperation::updateAad(const std::vector<uint8_t>& input,
@@ -1350,8 +1395,7 @@
if (!device) {
return ScopedAStatus::fromStatus(STATUS_NAME_NOT_FOUND);
}
- bool inserted = false;
- std::tie(i, inserted) = mDeviceCache.insert({in_securityLevel, std::move(device)});
+ i = mDeviceCache.insert(i, {in_securityLevel, std::move(device)});
}
*_aidl_return = i->second;
return ScopedAStatus::ok();
@@ -1359,14 +1403,15 @@
ScopedAStatus KeystoreCompatService::getSharedSecret(KeyMintSecurityLevel in_securityLevel,
std::shared_ptr<ISharedSecret>* _aidl_return) {
- if (!mSharedSecret) {
+ auto i = mSharedSecretCache.find(in_securityLevel);
+ if (i == mSharedSecretCache.end()) {
auto secret = SharedSecret::createSharedSecret(in_securityLevel);
if (!secret) {
return ScopedAStatus::fromStatus(STATUS_NAME_NOT_FOUND);
}
- mSharedSecret = std::move(secret);
+ i = mSharedSecretCache.insert(i, {in_securityLevel, std::move(secret)});
}
- *_aidl_return = mSharedSecret;
+ *_aidl_return = i->second;
return ScopedAStatus::ok();
}
diff --git a/keystore2/src/km_compat/km_compat.h b/keystore2/src/km_compat/km_compat.h
index 69c24b4..2d892da 100644
--- a/keystore2/src/km_compat/km_compat.h
+++ b/keystore2/src/km_compat/km_compat.h
@@ -109,7 +109,7 @@
ScopedAStatus destroyAttestationIds() override;
ScopedAStatus begin(KeyPurpose in_inPurpose, const std::vector<uint8_t>& in_inKeyBlob,
const std::vector<KeyParameter>& in_inParams,
- const HardwareAuthToken& in_inAuthToken,
+ const std::optional<HardwareAuthToken>& in_inAuthToken,
BeginResult* _aidl_return) override;
ScopedAStatus deviceLocked(bool passwordOnly,
const std::optional<TimeStampToken>& timestampToken) override;
@@ -118,8 +118,10 @@
ScopedAStatus convertStorageKeyToEphemeral(const std::vector<uint8_t>& storageKeyBlob,
std::vector<uint8_t>* ephemeralKeyBlob) override;
- ScopedAStatus performOperation(const std::vector<uint8_t>& request,
- std::vector<uint8_t>* response) override;
+ ScopedAStatus
+ getKeyCharacteristics(const std::vector<uint8_t>& storageKeyBlob,
+ const std::vector<uint8_t>& appId, const std::vector<uint8_t>& appData,
+ std::vector<KeyCharacteristics>* keyCharacteristics) override;
// These are public to allow testing code to use them directly.
// This class should not be used publicly anyway.
@@ -195,7 +197,7 @@
class KeystoreCompatService : public BnKeystoreCompatService {
private:
std::unordered_map<KeyMintSecurityLevel, std::shared_ptr<IKeyMintDevice>> mDeviceCache;
- std::shared_ptr<ISharedSecret> mSharedSecret;
+ std::unordered_map<KeyMintSecurityLevel, std::shared_ptr<ISharedSecret>> mSharedSecretCache;
std::shared_ptr<ISecureClock> mSecureClock;
public:
diff --git a/keystore2/src/km_compat/km_compat_type_conversion.h b/keystore2/src/km_compat/km_compat_type_conversion.h
index e3240e9..de09477 100644
--- a/keystore2/src/km_compat/km_compat_type_conversion.h
+++ b/keystore2/src/km_compat/km_compat_type_conversion.h
@@ -503,9 +503,6 @@
return V4_0::makeKeyParameter(V4_0::TAG_INCLUDE_UNIQUE_ID, v->get());
}
break;
- case KMV1::Tag::BLOB_USAGE_REQUIREMENTS:
- // This tag has been removed. Mapped on invalid.
- break;
case KMV1::Tag::BOOTLOADER_ONLY:
if (auto v = KMV1::authorizationValue(KMV1::TAG_BOOTLOADER_ONLY, kp)) {
return V4_0::makeKeyParameter(V4_0::TAG_BOOTLOADER_ONLY, v->get());
diff --git a/keystore2/src/km_compat/lib.rs b/keystore2/src/km_compat/lib.rs
index 5ece8a7..56c35bf 100644
--- a/keystore2/src/km_compat/lib.rs
+++ b/keystore2/src/km_compat/lib.rs
@@ -30,8 +30,8 @@
use super::*;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
Algorithm::Algorithm, BeginResult::BeginResult, BlockMode::BlockMode, Digest::Digest,
- ErrorCode::ErrorCode, HardwareAuthToken::HardwareAuthToken, IKeyMintDevice::IKeyMintDevice,
- KeyCreationResult::KeyCreationResult, KeyFormat::KeyFormat, KeyParameter::KeyParameter,
+ ErrorCode::ErrorCode, IKeyMintDevice::IKeyMintDevice, KeyCreationResult::KeyCreationResult,
+ KeyFormat::KeyFormat, KeyOrigin::KeyOrigin, KeyParameter::KeyParameter,
KeyParameterValue::KeyParameterValue, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
SecurityLevel::SecurityLevel, Tag::Tag,
};
@@ -260,7 +260,7 @@
if let Some(mut extras) = extra_params {
kps.append(&mut extras);
}
- let result = legacy.begin(purpose, &blob, &kps, &HardwareAuthToken::default());
+ let result = legacy.begin(purpose, &blob, &kps, None);
assert!(result.is_ok(), "{:?}", result);
result.unwrap()
}
@@ -376,4 +376,85 @@
assert!(result.is_ok(), "{:?}", result);
assert_ne!(result.unwrap().len(), 0);
}
+
+ #[test]
+ fn test_get_key_characteristics() {
+ let legacy = get_device_or_skip_test!();
+ let hw_info = legacy.getHardwareInfo().expect("GetHardwareInfo");
+
+ let blob = generate_rsa_key(legacy.as_ref(), false, false);
+ let characteristics =
+ legacy.getKeyCharacteristics(&blob, &[], &[]).expect("GetKeyCharacteristics.");
+
+ assert!(characteristics.iter().any(|kc| kc.securityLevel == hw_info.securityLevel));
+ let sec_level_enforced = &characteristics
+ .iter()
+ .find(|kc| kc.securityLevel == hw_info.securityLevel)
+ .expect("There should be characteristics matching the device's security level.")
+ .authorizations;
+
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::SIGN)
+ }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter { tag: Tag::DIGEST, value: KeyParameterValue::Digest(Digest::SHA_2_256) }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter {
+ tag: Tag::PADDING,
+ value: KeyParameterValue::PaddingMode(PaddingMode::RSA_PSS)
+ }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter {
+ tag: Tag::ALGORITHM,
+ value: KeyParameterValue::Algorithm(Algorithm::RSA)
+ }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter { tag: Tag::KEY_SIZE, value: KeyParameterValue::Integer(2048) }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter {
+ tag: Tag::RSA_PUBLIC_EXPONENT,
+ value: KeyParameterValue::LongInteger(65537)
+ }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter { tag: Tag::NO_AUTH_REQUIRED, value: KeyParameterValue::BoolValue(true) }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter {
+ tag: Tag::ORIGIN,
+ value: KeyParameterValue::Origin(KeyOrigin::GENERATED)
+ }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter { tag: Tag::OS_VERSION, value: KeyParameterValue::Integer(_) }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter { tag: Tag::OS_PATCHLEVEL, value: KeyParameterValue::Integer(_) }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter { tag: Tag::VENDOR_PATCHLEVEL, value: KeyParameterValue::Integer(_) }
+ )));
+ assert!(sec_level_enforced.iter().any(|kp| matches!(
+ kp,
+ KeyParameter { tag: Tag::BOOT_PATCHLEVEL, value: KeyParameterValue::Integer(_) }
+ )));
+ }
}
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 65e6818..9eebb36 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -14,8 +14,6 @@
//! This module implements methods to load legacy keystore key blob files.
-#![allow(clippy::redundant_slicing)]
-
use crate::{
error::{Error as KsError, ResponseCode},
key_parameter::{KeyParameter, KeyParameterValue},
@@ -206,7 +204,7 @@
}
impl LegacyBlobLoader {
- const IV_SIZE: usize = keystore2_crypto::IV_LENGTH;
+ const IV_SIZE: usize = keystore2_crypto::LEGACY_IV_LENGTH;
const GCM_TAG_LENGTH: usize = keystore2_crypto::TAG_LENGTH;
const SALT_SIZE: usize = keystore2_crypto::SALT_LENGTH;
@@ -484,7 +482,7 @@
let element_size =
read_ne_u32(stream).context("In read_key_parameters: While reading element size.")?;
- let elements_buffer = stream
+ let mut element_stream = stream
.get(0..element_size as usize)
.ok_or(KsError::Rc(ResponseCode::VALUE_CORRUPTED))
.context("In read_key_parameters: While reading elements buffer.")?;
@@ -492,8 +490,6 @@
// update the stream position.
*stream = &stream[element_size as usize..];
- let mut element_stream = &elements_buffer[..];
-
let mut params: Vec<KeyParameterValue> = Vec::new();
for _ in 0..element_count {
let tag = Tag(read_ne_i32(&mut element_stream).context("In read_key_parameters.")?);
@@ -686,10 +682,18 @@
let user_id = uid_to_android_user(uid);
path.push(format!("user_{}", user_id));
let uid_str = uid.to_string();
- let dir =
- Self::with_retry_interrupted(|| fs::read_dir(path.as_path())).with_context(|| {
- format!("In list_vpn_profiles: Failed to open legacy blob database. {:?}", path)
- })?;
+ let dir = match Self::with_retry_interrupted(|| fs::read_dir(path.as_path())) {
+ Ok(dir) => dir,
+ Err(e) => match e.kind() {
+ ErrorKind::NotFound => return Ok(Default::default()),
+ _ => {
+ return Err(e).context(format!(
+ "In list_vpn_profiles: Failed to open legacy blob database. {:?}",
+ path
+ ))
+ }
+ },
+ };
let mut result: Vec<String> = Vec::new();
for entry in dir {
let file_name =
@@ -801,10 +805,18 @@
/// encoded with UID prefix.
fn list_user(&self, user_id: u32) -> Result<Vec<String>> {
let path = self.make_user_path_name(user_id);
- let dir =
- Self::with_retry_interrupted(|| fs::read_dir(path.as_path())).with_context(|| {
- format!("In list_user: Failed to open legacy blob database. {:?}", path)
- })?;
+ let dir = match Self::with_retry_interrupted(|| fs::read_dir(path.as_path())) {
+ Ok(dir) => dir,
+ Err(e) => match e.kind() {
+ ErrorKind::NotFound => return Ok(Default::default()),
+ _ => {
+ return Err(e).context(format!(
+ "In list_user: Failed to open legacy blob database. {:?}",
+ path
+ ))
+ }
+ },
+ };
let mut result: Vec<String> = Vec::new();
for entry in dir {
let file_name = entry.context("In list_user: Trying to access dir entry")?.file_name();
@@ -1352,4 +1364,24 @@
Ok(())
}
+
+ #[test]
+ fn list_non_existing_user() -> Result<()> {
+ let temp_dir = TempDir::new("list_non_existing_user")?;
+ let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
+
+ assert!(legacy_blob_loader.list_user(20)?.is_empty());
+
+ Ok(())
+ }
+
+ #[test]
+ fn list_vpn_profiles_on_non_existing_user() -> Result<()> {
+ let temp_dir = TempDir::new("list_vpn_profiles_on_non_existing_user")?;
+ let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
+
+ assert!(legacy_blob_loader.list_vpn_profiles(20)?.is_empty());
+
+ Ok(())
+ }
}
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_migrator.rs
index e5bcae4..f92fd45 100644
--- a/keystore2/src/legacy_migrator.rs
+++ b/keystore2/src/legacy_migrator.rs
@@ -14,11 +14,11 @@
//! This module acts as a bridge between the legacy key database and the keystore2 database.
-use crate::error::Error;
use crate::key_parameter::KeyParameterValue;
use crate::legacy_blob::BlobValue;
-use crate::utils::uid_to_android_user;
+use crate::utils::{uid_to_android_user, watchdog as wd};
use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
+use crate::{database::KeyType, error::Error};
use crate::{
database::{
BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData,
@@ -196,6 +196,8 @@
/// List all aliases for uid in the legacy database.
pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
+ let _wp = wd::watch_millis("LegacyMigrator::list_uid", 500);
+
let uid = match (domain, namespace) {
(Domain::APP, namespace) => namespace as u32,
(Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
@@ -290,6 +292,8 @@
where
F: Fn() -> Result<T>,
{
+ let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate", 500);
+
// Access the key and return on success.
match key_accessor() {
Ok(result) => return Ok(result),
@@ -342,6 +346,8 @@
where
F: FnMut() -> Result<Option<T>>,
{
+ let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate_super_key", 500);
+
match key_accessor() {
Ok(Some(result)) => return Ok(Some(result)),
Ok(None) => {}
@@ -364,6 +370,8 @@
/// Deletes all keys belonging to the given namespace, migrating them into the database
/// for subsequent garbage collection if necessary.
pub fn bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()> {
+ let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_uid", 500);
+
let uid = match (domain, nspace) {
(Domain::APP, nspace) => nspace as u32,
(Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
@@ -385,6 +393,8 @@
user_id: u32,
keep_non_super_encrypted_keys: bool,
) -> Result<()> {
+ let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_user", 500);
+
let result = self.do_serialized(move |migrator_state| {
migrator_state
.bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
@@ -420,7 +430,7 @@
.context("In list_uid: Trying to list legacy entries.")
}
- /// This is a key migration request that can run in the migrator thread. This should
+ /// This is a key migration request that must run in the migrator thread. This must
/// be passed to do_serialized.
fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
let alias = key.alias.clone().ok_or_else(|| {
@@ -513,6 +523,7 @@
self.db
.store_new_key(
&key,
+ KeyType::Client,
¶ms,
&(&blob, &blob_metadata),
&CertificateInfo::new(user_cert, ca_cert),
@@ -525,7 +536,7 @@
None => {
if let Some(ca_cert) = ca_cert {
self.db
- .store_new_certificate(&key, &ca_cert, &KEYSTORE_UUID)
+ .store_new_certificate(&key, KeyType::Client, &ca_cert, &KEYSTORE_UUID)
.context("In check_and_migrate: Failed to insert new certificate.")?;
Ok(())
} else {
diff --git a/keystore2/src/lib.rs b/keystore2/src/lib.rs
index 154b5b3..51316d7 100644
--- a/keystore2/src/lib.rs
+++ b/keystore2/src/lib.rs
@@ -18,6 +18,7 @@
pub mod apc;
pub mod async_task;
pub mod authorization;
+pub mod boot_level_keys;
pub mod database;
pub mod ec_crypto;
pub mod enforcements;
@@ -33,6 +34,7 @@
pub mod metrics;
pub mod operation;
pub mod permission;
+pub mod raw_device;
pub mod remote_provisioning;
pub mod security_level;
pub mod service;
@@ -41,6 +43,9 @@
pub mod utils;
mod attestation_key_utils;
-mod db_utils;
+mod audit_log;
mod gc;
mod super_key;
+
+#[cfg(feature = "watchdog")]
+mod watchdog;
diff --git a/keystore2/src/maintenance.rs b/keystore2/src/maintenance.rs
index e059a0b..0633bc1 100644
--- a/keystore2/src/maintenance.rs
+++ b/keystore2/src/maintenance.rs
@@ -14,25 +14,29 @@
//! This module implements IKeystoreMaintenance AIDL interface.
+use crate::database::{KeyEntryLoadBits, KeyType, MonotonicRawTime};
use crate::error::map_km_error;
-use crate::error::Error as KeystoreError;
+use crate::error::map_or_log_err;
+use crate::error::Error;
use crate::globals::get_keymint_device;
use crate::globals::{DB, LEGACY_MIGRATOR, SUPER_KEY};
-use crate::permission::KeystorePerm;
+use crate::permission::{KeyPerm, KeystorePerm};
use crate::super_key::UserState;
-use crate::utils::check_keystore_permission;
-use crate::{database::MonotonicRawTime, error::map_or_log_err};
+use crate::utils::{check_key_permission, check_keystore_permission, watchdog as wd};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::IKeyMintDevice::IKeyMintDevice;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
use android_security_maintenance::aidl::android::security::maintenance::{
IKeystoreMaintenance::{BnKeystoreMaintenance, IKeystoreMaintenance},
UserState::UserState as AidlUserState,
};
-use android_security_maintenance::binder::{Interface, Result as BinderResult};
-use android_system_keystore2::aidl::android::system::keystore2::Domain::Domain;
+use android_security_maintenance::binder::{
+ BinderFeatures, Interface, Result as BinderResult, Strong, ThreadState,
+};
use android_system_keystore2::aidl::android::system::keystore2::ResponseCode::ResponseCode;
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, KeyDescriptor::KeyDescriptor,
+};
use anyhow::{Context, Result};
-use binder::{IBinderInternal, Strong};
use keystore2_crypto::Password;
/// This struct is defined to implement the aforementioned AIDL interface.
@@ -42,9 +46,10 @@
impl Maintenance {
/// Create a new instance of Keystore User Manager service.
pub fn new_native_binder() -> Result<Strong<dyn IKeystoreMaintenance>> {
- let result = BnKeystoreMaintenance::new_binder(Self);
- result.as_binder().set_requesting_sid(true);
- Ok(result)
+ Ok(BnKeystoreMaintenance::new_binder(
+ Self,
+ BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
+ ))
}
fn on_user_password_changed(user_id: i32, password: Option<Password>) -> Result<()> {
@@ -74,7 +79,7 @@
{
UserState::LskfLocked => {
// Error - password can not be changed when the device is locked
- Err(KeystoreError::Rc(ResponseCode::LOCKED))
+ Err(Error::Rc(ResponseCode::LOCKED))
.context("In on_user_password_changed. Device is locked.")
}
_ => {
@@ -128,11 +133,17 @@
}
}
- fn early_boot_ended_help(sec_level: &SecurityLevel) -> Result<()> {
- let (dev, _, _) =
- get_keymint_device(sec_level).context("In early_boot_ended: getting keymint device")?;
+ fn early_boot_ended_help(sec_level: SecurityLevel) -> Result<()> {
+ let (dev, _, _) = get_keymint_device(&sec_level)
+ .context("In early_boot_ended: getting keymint device")?;
let km_dev: Strong<dyn IKeyMintDevice> =
dev.get_interface().context("In early_boot_ended: getting keymint device interface")?;
+
+ let _wp = wd::watch_millis_with(
+ "In early_boot_ended_help: calling earlyBootEnded()",
+ 500,
+ move || format!("Seclevel: {:?}", sec_level),
+ );
map_km_error(km_dev.earlyBootEnded())
.context("In keymint device: calling earlyBootEnded")?;
Ok(())
@@ -141,13 +152,18 @@
fn early_boot_ended() -> Result<()> {
check_keystore_permission(KeystorePerm::early_boot_ended())
.context("In early_boot_ended. Checking permission")?;
+ log::info!("In early_boot_ended.");
+
+ if let Err(e) = DB.with(|db| SUPER_KEY.set_up_boot_level_cache(&mut db.borrow_mut())) {
+ log::error!("SUPER_KEY.set_up_boot_level_cache failed:\n{:?}\n:(", e);
+ }
let sec_levels = [
(SecurityLevel::TRUSTED_ENVIRONMENT, "TRUSTED_ENVIRONMENT"),
(SecurityLevel::STRONGBOX, "STRONGBOX"),
];
sec_levels.iter().fold(Ok(()), |result, (sec_level, sec_level_string)| {
- let curr_result = Maintenance::early_boot_ended_help(sec_level);
+ let curr_result = Maintenance::early_boot_ended_help(*sec_level);
if curr_result.is_err() {
log::error!(
"Call to earlyBootEnded failed for security level {}.",
@@ -163,8 +179,45 @@
check_keystore_permission(KeystorePerm::report_off_body())
.context("In on_device_off_body.")?;
- DB.with(|db| db.borrow_mut().update_last_off_body(MonotonicRawTime::now()))
- .context("In on_device_off_body: Trying to update last off body time.")
+ DB.with(|db| db.borrow_mut().update_last_off_body(MonotonicRawTime::now()));
+ Ok(())
+ }
+
+ fn migrate_key_namespace(source: &KeyDescriptor, destination: &KeyDescriptor) -> Result<()> {
+ let caller_uid = ThreadState::get_calling_uid();
+
+ DB.with(|db| {
+ let key_id_guard = match source.domain {
+ Domain::APP | Domain::SELINUX | Domain::KEY_ID => {
+ let (key_id_guard, _) = LEGACY_MIGRATOR
+ .with_try_migrate(&source, caller_uid, || {
+ db.borrow_mut().load_key_entry(
+ &source,
+ KeyType::Client,
+ KeyEntryLoadBits::NONE,
+ caller_uid,
+ |k, av| {
+ check_key_permission(KeyPerm::use_(), k, &av)?;
+ check_key_permission(KeyPerm::delete(), k, &av)?;
+ check_key_permission(KeyPerm::grant(), k, &av)
+ },
+ )
+ })
+ .context("In migrate_key_namespace: Failed to load key blob.")?;
+ key_id_guard
+ }
+ _ => {
+ return Err(Error::Rc(ResponseCode::INVALID_ARGUMENT)).context(concat!(
+ "In migrate_key_namespace: ",
+ "Source domain must be one of APP, SELINUX, or KEY_ID."
+ ))
+ }
+ };
+
+ db.borrow_mut().migrate_key_namespace(key_id_guard, destination, caller_uid, |k| {
+ check_key_permission(KeyPerm::rebind(), k, &None)
+ })
+ })
}
}
@@ -172,30 +225,46 @@
impl IKeystoreMaintenance for Maintenance {
fn onUserPasswordChanged(&self, user_id: i32, password: Option<&[u8]>) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IKeystoreMaintenance::onUserPasswordChanged", 500);
map_or_log_err(Self::on_user_password_changed(user_id, password.map(|pw| pw.into())), Ok)
}
fn onUserAdded(&self, user_id: i32) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IKeystoreMaintenance::onUserAdded", 500);
map_or_log_err(Self::add_or_remove_user(user_id), Ok)
}
fn onUserRemoved(&self, user_id: i32) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IKeystoreMaintenance::onUserRemoved", 500);
map_or_log_err(Self::add_or_remove_user(user_id), Ok)
}
fn clearNamespace(&self, domain: Domain, nspace: i64) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IKeystoreMaintenance::clearNamespace", 500);
map_or_log_err(Self::clear_namespace(domain, nspace), Ok)
}
fn getState(&self, user_id: i32) -> BinderResult<AidlUserState> {
+ let _wp = wd::watch_millis("IKeystoreMaintenance::getState", 500);
map_or_log_err(Self::get_state(user_id), Ok)
}
fn earlyBootEnded(&self) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IKeystoreMaintenance::earlyBootEnded", 500);
map_or_log_err(Self::early_boot_ended(), Ok)
}
fn onDeviceOffBody(&self) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IKeystoreMaintenance::onDeviceOffBody", 500);
map_or_log_err(Self::on_device_off_body(), Ok)
}
+
+ fn migrateKeyNamespace(
+ &self,
+ source: &KeyDescriptor,
+ destination: &KeyDescriptor,
+ ) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IKeystoreMaintenance::migrateKeyNamespace", 500);
+ map_or_log_err(Self::migrate_key_namespace(source, destination), Ok)
+ }
}
diff --git a/keystore2/src/metrics.rs b/keystore2/src/metrics.rs
index c5dd582..07c3d64 100644
--- a/keystore2/src/metrics.rs
+++ b/keystore2/src/metrics.rs
@@ -14,6 +14,7 @@
//! This module provides convenience functions for keystore2 logging.
use crate::error::get_error_code;
+use crate::globals::{DB, LOGS_HANDLER};
use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
use crate::operation::Outcome;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
@@ -22,15 +23,22 @@
KeyParameter::KeyParameter, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
SecurityLevel::SecurityLevel,
};
-use statslog_rust::keystore2_key_creation_event_reported::{
- Algorithm as StatsdAlgorithm, EcCurve as StatsdEcCurve, KeyOrigin as StatsdKeyOrigin,
- Keystore2KeyCreationEventReported, SecurityLevel as StatsdKeyCreationSecurityLevel,
- UserAuthType as StatsdUserAuthType,
+use anyhow::Result;
+use keystore2_system_property::PropertyWatcher;
+use statslog_rust::{
+ keystore2_key_creation_event_reported::{
+ Algorithm as StatsdAlgorithm, EcCurve as StatsdEcCurve, KeyOrigin as StatsdKeyOrigin,
+ Keystore2KeyCreationEventReported, SecurityLevel as StatsdKeyCreationSecurityLevel,
+ UserAuthType as StatsdUserAuthType,
+ },
+ keystore2_key_operation_event_reported::{
+ Keystore2KeyOperationEventReported, Outcome as StatsdOutcome, Purpose as StatsdKeyPurpose,
+ SecurityLevel as StatsdKeyOperationSecurityLevel,
+ },
+ keystore2_storage_stats::StorageType as StatsdStorageType,
};
-use statslog_rust::keystore2_key_operation_event_reported::{
- Keystore2KeyOperationEventReported, Outcome as StatsdOutcome, Purpose as StatsdKeyPurpose,
- SecurityLevel as StatsdKeyOperationSecurityLevel,
-};
+use statslog_rust_header::Atoms;
+use statspull_rust::{set_pull_atom_callback, StatsPullResult};
fn create_default_key_creation_atom() -> Keystore2KeyCreationEventReported {
// If a value is not present, fields represented by bitmaps and i32 fields
@@ -75,19 +83,18 @@
pub fn log_key_creation_event_stats<U>(
sec_level: SecurityLevel,
key_params: &[KeyParameter],
- result: &anyhow::Result<U>,
+ result: &Result<U>,
) {
let key_creation_event_stats =
construct_key_creation_event_stats(sec_level, key_params, result);
- let logging_result = key_creation_event_stats.stats_write();
+ LOGS_HANDLER.queue_lo(move |_| {
+ let logging_result = key_creation_event_stats.stats_write();
- if let Err(e) = logging_result {
- log::error!(
- "In log_key_creation_event_stats. Error in logging key creation event. {:?}",
- e
- );
- }
+ if let Err(e) = logging_result {
+ log::error!("Error in logging key creation event in the async task. {:?}", e);
+ }
+ });
}
/// Log key operation events via statsd API.
@@ -106,20 +113,19 @@
key_upgraded,
);
- let logging_result = key_operation_event_stats.stats_write();
+ LOGS_HANDLER.queue_lo(move |_| {
+ let logging_result = key_operation_event_stats.stats_write();
- if let Err(e) = logging_result {
- log::error!(
- "In log_key_operation_event_stats. Error in logging key operation event. {:?}",
- e
- );
- }
+ if let Err(e) = logging_result {
+ log::error!("Error in logging key operation event in the async task. {:?}", e);
+ }
+ });
}
fn construct_key_creation_event_stats<U>(
sec_level: SecurityLevel,
key_params: &[KeyParameter],
- result: &anyhow::Result<U>,
+ result: &Result<U>,
) -> Keystore2KeyCreationEventReported {
let mut key_creation_event_atom = create_default_key_creation_atom();
@@ -375,6 +381,55 @@
}
bitmap
}
+
+/// Registers pull metrics callbacks
+pub fn register_pull_metrics_callbacks() -> Result<()> {
+ // Before registering the callbacks with statsd, we have to wait for the system to finish
+ // booting up. This avoids possible races that may occur at startup. For example, statsd
+ // depends on a companion service, and if registration happens too soon it will fail since
+ // the companion service isn't up yet.
+ let mut watcher = PropertyWatcher::new("sys.boot_completed")?;
+ loop {
+ watcher.wait()?;
+ let value = watcher.read(|_name, value| Ok(value.trim().to_string()));
+ if value? == "1" {
+ set_pull_atom_callback(Atoms::Keystore2StorageStats, None, pull_metrics_callback);
+ break;
+ }
+ }
+ Ok(())
+}
+
+fn pull_metrics_callback() -> StatsPullResult {
+ let mut result = StatsPullResult::new();
+ let mut append = |stat| {
+ match stat {
+ Ok(s) => result.push(Box::new(s)),
+ Err(error) => {
+ log::error!("pull_metrics_callback: Error getting storage stat: {}", error)
+ }
+ };
+ };
+ DB.with(|db| {
+ let mut db = db.borrow_mut();
+ append(db.get_storage_stat(StatsdStorageType::Database));
+ append(db.get_storage_stat(StatsdStorageType::KeyEntry));
+ append(db.get_storage_stat(StatsdStorageType::KeyEntryIdIndex));
+ append(db.get_storage_stat(StatsdStorageType::KeyEntryDomainNamespaceIndex));
+ append(db.get_storage_stat(StatsdStorageType::BlobEntry));
+ append(db.get_storage_stat(StatsdStorageType::BlobEntryKeyEntryIdIndex));
+ append(db.get_storage_stat(StatsdStorageType::KeyParameter));
+ append(db.get_storage_stat(StatsdStorageType::KeyParameterKeyEntryIdIndex));
+ append(db.get_storage_stat(StatsdStorageType::KeyMetadata));
+ append(db.get_storage_stat(StatsdStorageType::KeyMetadataKeyEntryIdIndex));
+ append(db.get_storage_stat(StatsdStorageType::Grant));
+ append(db.get_storage_stat(StatsdStorageType::AuthToken));
+ append(db.get_storage_stat(StatsdStorageType::BlobMetadata));
+ append(db.get_storage_stat(StatsdStorageType::BlobMetadataBlobEntryIdIndex));
+ });
+ result
+}
+
/// Enum defining the bit position for each padding mode. Since padding mode can be repeatable, it
/// is represented using a bitmap.
#[allow(non_camel_case_types)]
diff --git a/keystore2/src/operation.rs b/keystore2/src/operation.rs
index c2539a7..8d7ad0a 100644
--- a/keystore2/src/operation.rs
+++ b/keystore2/src/operation.rs
@@ -128,16 +128,16 @@
use crate::enforcements::AuthInfo;
use crate::error::{map_err_with, map_km_error, map_or_log_err, Error, ErrorCode, ResponseCode};
use crate::metrics::log_key_operation_event_stats;
-use crate::utils::Asp;
+use crate::utils::{watchdog as wd, Asp};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
IKeyMintOperation::IKeyMintOperation, KeyParameter::KeyParameter, KeyPurpose::KeyPurpose,
SecurityLevel::SecurityLevel,
};
+use android_hardware_security_keymint::binder::BinderFeatures;
use android_system_keystore2::aidl::android::system::keystore2::{
IKeystoreOperation::BnKeystoreOperation, IKeystoreOperation::IKeystoreOperation,
};
use anyhow::{anyhow, Context, Result};
-use binder::IBinderInternal;
use std::{
collections::HashMap,
sync::{Arc, Mutex, MutexGuard, Weak},
@@ -291,6 +291,8 @@
}
};
+ let _wp = wd::watch_millis("In Operation::prune: calling abort()", 500);
+
// We abort the operation. If there was an error we log it but ignore it.
if let Err(e) = map_km_error(km_op.abort()) {
log::error!("In prune: KeyMint::abort failed with {:?}.", e);
@@ -370,10 +372,10 @@
.before_update()
.context("In update_aad: Trying to get auth tokens.")?;
- self.update_outcome(
- &mut *outcome,
- map_km_error(km_op.updateAad(aad_input, hat.as_ref(), tst.as_ref())),
- )
+ self.update_outcome(&mut *outcome, {
+ let _wp = wd::watch_millis("Operation::update_aad: calling updateAad", 500);
+ map_km_error(km_op.updateAad(aad_input, hat.as_ref(), tst.as_ref()))
+ })
.context("In update_aad: KeyMint::update failed.")?;
Ok(())
@@ -397,10 +399,10 @@
.context("In update: Trying to get auth tokens.")?;
let output = self
- .update_outcome(
- &mut *outcome,
- map_km_error(km_op.update(input, hat.as_ref(), tst.as_ref())),
- )
+ .update_outcome(&mut *outcome, {
+ let _wp = wd::watch_millis("Operation::update: calling update", 500);
+ map_km_error(km_op.update(input, hat.as_ref(), tst.as_ref()))
+ })
.context("In update: KeyMint::update failed.")?;
if output.is_empty() {
@@ -430,16 +432,16 @@
.context("In finish: Trying to get auth tokens.")?;
let output = self
- .update_outcome(
- &mut *outcome,
+ .update_outcome(&mut *outcome, {
+ let _wp = wd::watch_millis("Operation::finish: calling finish", 500);
map_km_error(km_op.finish(
input,
signature,
hat.as_ref(),
tst.as_ref(),
confirmation_token.as_deref(),
- )),
- )
+ ))
+ })
.context("In finish: KeyMint::finish failed.")?;
self.auth_info.lock().unwrap().after_finish().context("In finish.")?;
@@ -463,7 +465,10 @@
let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
self.km_op.get_interface().context("In abort: Failed to get KeyMintOperation.")?;
- map_km_error(km_op.abort()).context("In abort: KeyMint::abort failed.")
+ {
+ let _wp = wd::watch_millis("Operation::abort: calling abort", 500);
+ map_km_error(km_op.abort()).context("In abort: KeyMint::abort failed.")
+ }
}
}
@@ -783,16 +788,16 @@
impl KeystoreOperation {
/// Creates a new operation instance wrapped in a
- /// BnKeystoreOperation proxy object. It also
- /// calls `IBinderInternal::set_requesting_sid` on the new interface, because
+ /// BnKeystoreOperation proxy object. It also enables
+ /// `BinderFeatures::set_requesting_sid` on the new interface, because
/// we need it for checking Keystore permissions.
pub fn new_native_binder(
operation: Arc<Operation>,
) -> binder::public_api::Strong<dyn IKeystoreOperation> {
- let result =
- BnKeystoreOperation::new_binder(Self { operation: Mutex::new(Some(operation)) });
- result.as_binder().set_requesting_sid(true);
- result
+ BnKeystoreOperation::new_binder(
+ Self { operation: Mutex::new(Some(operation)) },
+ BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
+ )
}
/// Grabs the outer operation mutex and calls `f` on the locked operation.
@@ -837,6 +842,7 @@
impl IKeystoreOperation for KeystoreOperation {
fn updateAad(&self, aad_input: &[u8]) -> binder::public_api::Result<()> {
+ let _wp = wd::watch_millis("IKeystoreOperation::updateAad", 500);
map_or_log_err(
self.with_locked_operation(
|op| op.update_aad(aad_input).context("In KeystoreOperation::updateAad"),
@@ -847,6 +853,7 @@
}
fn update(&self, input: &[u8]) -> binder::public_api::Result<Option<Vec<u8>>> {
+ let _wp = wd::watch_millis("IKeystoreOperation::update", 500);
map_or_log_err(
self.with_locked_operation(
|op| op.update(input).context("In KeystoreOperation::update"),
@@ -860,6 +867,7 @@
input: Option<&[u8]>,
signature: Option<&[u8]>,
) -> binder::public_api::Result<Option<Vec<u8>>> {
+ let _wp = wd::watch_millis("IKeystoreOperation::finish", 500);
map_or_log_err(
self.with_locked_operation(
|op| op.finish(input, signature).context("In KeystoreOperation::finish"),
@@ -870,6 +878,7 @@
}
fn abort(&self) -> binder::public_api::Result<()> {
+ let _wp = wd::watch_millis("IKeystoreOperation::abort", 500);
map_err_with(
self.with_locked_operation(
|op| op.abort(Outcome::Abort).context("In KeystoreOperation::abort"),
diff --git a/keystore2/src/permission.rs b/keystore2/src/permission.rs
index 726c2ec..e7999bc 100644
--- a/keystore2/src/permission.rs
+++ b/keystore2/src/permission.rs
@@ -18,8 +18,6 @@
//! It also provides KeystorePerm and KeyPerm as convenience wrappers for the SELinux permission
//! defined by keystore2 and keystore2_key respectively.
-#![allow(clippy::from_over_into)]
-
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, KeyDescriptor::KeyDescriptor, KeyPermission::KeyPermission,
};
@@ -151,9 +149,9 @@
}
}
- impl Into<$aidl_name> for $name {
- fn into(self) -> $aidl_name {
- self.0
+ impl From<$name> for $aidl_name {
+ fn from(p: $name) -> $aidl_name {
+ p.0
}
}
@@ -259,9 +257,9 @@
}
}
- impl Into<i32> for $name {
- fn into(self) -> i32 {
- self as i32
+ impl From<$name> for i32 {
+ fn from(p: $name) -> i32 {
+ p as i32
}
}
diff --git a/keystore2/src/raw_device.rs b/keystore2/src/raw_device.rs
new file mode 100644
index 0000000..cd54915
--- /dev/null
+++ b/keystore2/src/raw_device.rs
@@ -0,0 +1,333 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Provide the [`KeyMintDevice`] wrapper for operating directly on a KeyMint device.
+
+use crate::{
+ database::{
+ BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, KeyEntry, KeyEntryLoadBits,
+ KeyIdGuard, KeyMetaData, KeyMetaEntry, KeyType, KeystoreDB, SubComponentType, Uuid,
+ },
+ error::{map_km_error, Error, ErrorCode},
+ globals::get_keymint_device,
+ super_key::KeyBlob,
+ utils::{key_characteristics_to_internal, watchdog as wd, AID_KEYSTORE},
+};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ HardwareAuthToken::HardwareAuthToken, IKeyMintDevice::IKeyMintDevice,
+ IKeyMintOperation::IKeyMintOperation, KeyCharacteristics::KeyCharacteristics,
+ KeyCreationResult::KeyCreationResult, KeyParameter::KeyParameter, KeyPurpose::KeyPurpose,
+ SecurityLevel::SecurityLevel,
+};
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode,
+};
+use anyhow::{Context, Result};
+use binder::Strong;
+
+/// Wrapper for operating directly on a KeyMint device.
+/// These methods often mirror methods in [`crate::security_level`]. However
+/// the functions in [`crate::security_level`] make assumptions that hold, and has side effects
+/// that make sense, only if called by an external client through binder.
+/// In addition we are trying to maintain a separation between interface services
+/// so that the architecture is compatible with a future move to multiple thread pools.
+/// So the simplest approach today is to write new implementations of them for internal use.
+/// Because these methods run very early, we don't even try to cooperate with
+/// the operation slot database; we assume there will be plenty of slots.
+pub struct KeyMintDevice {
+ km_dev: Strong<dyn IKeyMintDevice>,
+ km_uuid: Uuid,
+ version: i32,
+ security_level: SecurityLevel,
+}
+
+impl KeyMintDevice {
+ /// Version number of KeyMasterDevice@V4_0
+ pub const KEY_MASTER_V4_0: i32 = 40;
+ /// Version number of KeyMasterDevice@V4_1
+ pub const KEY_MASTER_V4_1: i32 = 41;
+ /// Version number of KeyMintDevice@V1
+ pub const KEY_MINT_V1: i32 = 100;
+
+ /// Get a [`KeyMintDevice`] for the given [`SecurityLevel`]
+ pub fn get(security_level: SecurityLevel) -> Result<KeyMintDevice> {
+ let (asp, hw_info, km_uuid) = get_keymint_device(&security_level)
+ .context("In KeyMintDevice::get: get_keymint_device failed")?;
+
+ Ok(KeyMintDevice {
+ km_dev: asp.get_interface()?,
+ km_uuid,
+ version: hw_info.versionNumber,
+ security_level: hw_info.securityLevel,
+ })
+ }
+
+ /// Get a [`KeyMintDevice`] for the given [`SecurityLevel`], return
+ /// [`None`] if the error `HARDWARE_TYPE_UNAVAILABLE` is returned
+ pub fn get_or_none(security_level: SecurityLevel) -> Result<Option<KeyMintDevice>> {
+ KeyMintDevice::get(security_level).map(Some).or_else(|e| {
+ match e.root_cause().downcast_ref::<Error>() {
+ Some(Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)) => Ok(None),
+ _ => Err(e),
+ }
+ })
+ }
+
+ /// Returns the version of the underlying KeyMint/KeyMaster device.
+ pub fn version(&self) -> i32 {
+ self.version
+ }
+
+ /// Returns the self advertised security level of the KeyMint device.
+ /// This may differ from the requested security level if the best security level
+ /// on the device is Software.
+ pub fn security_level(&self) -> SecurityLevel {
+ self.security_level
+ }
+
+ /// Create a KM key and store in the database.
+ pub fn create_and_store_key<F>(
+ &self,
+ db: &mut KeystoreDB,
+ key_desc: &KeyDescriptor,
+ key_type: KeyType,
+ creator: F,
+ ) -> Result<()>
+ where
+ F: FnOnce(&Strong<dyn IKeyMintDevice>) -> Result<KeyCreationResult, binder::Status>,
+ {
+ let creation_result = map_km_error(creator(&self.km_dev))
+ .context("In create_and_store_key: creator failed")?;
+ let key_parameters = key_characteristics_to_internal(creation_result.keyCharacteristics);
+
+ let creation_date =
+ DateTime::now().context("In create_and_store_key: DateTime::now() failed")?;
+
+ let mut key_metadata = KeyMetaData::new();
+ key_metadata.add(KeyMetaEntry::CreationDate(creation_date));
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::KmUuid(self.km_uuid));
+
+ db.store_new_key(
+ &key_desc,
+ key_type,
+ &key_parameters,
+ &(&creation_result.keyBlob, &blob_metadata),
+ &CertificateInfo::new(None, None),
+ &key_metadata,
+ &self.km_uuid,
+ )
+ .context("In create_and_store_key: store_new_key failed")?;
+ Ok(())
+ }
+
+ /// Generate a KeyDescriptor for internal-use keys.
+ pub fn internal_descriptor(alias: String) -> KeyDescriptor {
+ KeyDescriptor {
+ domain: Domain::APP,
+ nspace: AID_KEYSTORE as i64,
+ alias: Some(alias),
+ blob: None,
+ }
+ }
+
+ /// Look up an internal-use key in the database given a key descriptor.
+ fn lookup_from_desc(
+ db: &mut KeystoreDB,
+ key_desc: &KeyDescriptor,
+ key_type: KeyType,
+ ) -> Result<(KeyIdGuard, KeyEntry)> {
+ db.load_key_entry(&key_desc, key_type, KeyEntryLoadBits::KM, AID_KEYSTORE, |_, _| Ok(()))
+ .context("In lookup_from_desc: load_key_entry failed.")
+ }
+
+ /// Look up the key in the database, and return None if it is absent.
+ fn not_found_is_none(
+ lookup: Result<(KeyIdGuard, KeyEntry)>,
+ ) -> Result<Option<(KeyIdGuard, KeyEntry)>> {
+ match lookup {
+ Ok(result) => Ok(Some(result)),
+ Err(e) => match e.root_cause().downcast_ref::<Error>() {
+ Some(&Error::Rc(ResponseCode::KEY_NOT_FOUND)) => Ok(None),
+ _ => Err(e),
+ },
+ }
+ }
+
+ /// This does the lookup and store in separate transactions; caller must
+ /// hold a lock before calling.
+ pub fn lookup_or_generate_key<F>(
+ &self,
+ db: &mut KeystoreDB,
+ key_desc: &KeyDescriptor,
+ key_type: KeyType,
+ params: &[KeyParameter],
+ validate_characteristics: F,
+ ) -> Result<(KeyIdGuard, KeyBlob)>
+ where
+ F: FnOnce(&[KeyCharacteristics]) -> bool,
+ {
+ // We use a separate transaction for the lookup than for the store
+ // - to keep the code simple
+ // - because the caller needs to hold a lock in any case
+ // - because it avoids holding database locks during slow
+ // KeyMint operations
+ let lookup = Self::not_found_is_none(Self::lookup_from_desc(db, key_desc, key_type))
+ .context("In lookup_or_generate_key: first lookup failed")?;
+
+ if let Some((key_id_guard, mut key_entry)) = lookup {
+ // If the key is associated with a different km instance
+ // or if there is no blob metadata for some reason the key entry
+ // is considered corrupted and needs to be replaced with a new one.
+ let key_blob = key_entry.take_key_blob_info().and_then(|(key_blob, blob_metadata)| {
+ if Some(&self.km_uuid) == blob_metadata.km_uuid() {
+ Some(key_blob)
+ } else {
+ None
+ }
+ });
+
+ if let Some(key_blob_vec) = key_blob {
+ let (key_characteristics, key_blob) = self
+ .upgrade_keyblob_if_required_with(
+ db,
+ &key_id_guard,
+ KeyBlob::NonSensitive(key_blob_vec),
+ |key_blob| {
+ map_km_error({
+ let _wp = wd::watch_millis(
+ concat!(
+ "In KeyMintDevice::lookup_or_generate_key: ",
+ "calling getKeyCharacteristics."
+ ),
+ 500,
+ );
+ self.km_dev.getKeyCharacteristics(key_blob, &[], &[])
+ })
+ },
+ )
+ .context("In lookup_or_generate_key: calling getKeyCharacteristics")?;
+
+ if validate_characteristics(&key_characteristics) {
+ return Ok((key_id_guard, key_blob));
+ }
+
+ // If this point is reached the existing key is considered outdated or corrupted
+ // in some way. It will be replaced with a new key below.
+ };
+ }
+
+ self.create_and_store_key(db, &key_desc, key_type, |km_dev| {
+ km_dev.generateKey(¶ms, None)
+ })
+ .context("In lookup_or_generate_key: generate_and_store_key failed")?;
+ Self::lookup_from_desc(db, key_desc, key_type)
+ .and_then(|(key_id_guard, mut key_entry)| {
+ Ok((
+ key_id_guard,
+ key_entry
+ .take_key_blob_info()
+ .ok_or(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .map(|(key_blob, _)| KeyBlob::NonSensitive(key_blob))
+ .context("Missing key blob info.")?,
+ ))
+ })
+ .context("In lookup_or_generate_key: second lookup failed")
+ }
+
+ /// Call the passed closure; if it returns `KEY_REQUIRES_UPGRADE`, call upgradeKey, and
+ /// write the upgraded key to the database.
+ fn upgrade_keyblob_if_required_with<'a, T, F>(
+ &self,
+ db: &mut KeystoreDB,
+ key_id_guard: &KeyIdGuard,
+ key_blob: KeyBlob<'a>,
+ f: F,
+ ) -> Result<(T, KeyBlob<'a>)>
+ where
+ F: Fn(&[u8]) -> Result<T, Error>,
+ {
+ match f(&key_blob) {
+ Err(Error::Km(ErrorCode::KEY_REQUIRES_UPGRADE)) => {
+ let upgraded_blob = map_km_error({
+ let _wp = wd::watch_millis(
+ "In KeyMintDevice::upgrade_keyblob_if_required_with: calling upgradeKey.",
+ 500,
+ );
+ self.km_dev.upgradeKey(&key_blob, &[])
+ })
+ .context("In upgrade_keyblob_if_required_with: Upgrade failed")?;
+
+ let mut new_blob_metadata = BlobMetaData::new();
+ new_blob_metadata.add(BlobMetaEntry::KmUuid(self.km_uuid));
+
+ db.set_blob(
+ key_id_guard,
+ SubComponentType::KEY_BLOB,
+ Some(&upgraded_blob),
+ Some(&new_blob_metadata),
+ )
+ .context(concat!(
+ "In upgrade_keyblob_if_required_with: ",
+ "Failed to insert upgraded blob into the database"
+ ))?;
+
+ Ok((
+ f(&upgraded_blob).context(
+ "In upgrade_keyblob_if_required_with: Closure failed after upgrade",
+ )?,
+ KeyBlob::NonSensitive(upgraded_blob),
+ ))
+ }
+ result => Ok((
+ result.context("In upgrade_keyblob_if_required_with: Closure failed")?,
+ key_blob,
+ )),
+ }
+ }
+
+ /// Use the created key in an operation that can be done with
+ /// a call to begin followed by a call to finish.
+ #[allow(clippy::too_many_arguments)]
+ pub fn use_key_in_one_step(
+ &self,
+ db: &mut KeystoreDB,
+ key_id_guard: &KeyIdGuard,
+ key_blob: &[u8],
+ purpose: KeyPurpose,
+ operation_parameters: &[KeyParameter],
+ auth_token: Option<&HardwareAuthToken>,
+ input: &[u8],
+ ) -> Result<Vec<u8>> {
+ let key_blob = KeyBlob::Ref(key_blob);
+
+ let (begin_result, _) = self
+ .upgrade_keyblob_if_required_with(db, key_id_guard, key_blob, |blob| {
+ map_km_error({
+ let _wp = wd::watch_millis("In use_key_in_one_step: calling: begin", 500);
+ self.km_dev.begin(purpose, blob, operation_parameters, auth_token)
+ })
+ })
+ .context("In use_key_in_one_step: Failed to begin operation.")?;
+ let operation: Strong<dyn IKeyMintOperation> = begin_result
+ .operation
+ .ok_or_else(Error::sys)
+ .context("In use_key_in_one_step: Operation missing")?;
+ map_km_error({
+ let _wp = wd::watch_millis("In use_key_in_one_step: calling: finish", 500);
+ operation.finish(Some(input), None, None, None, None)
+ })
+ .context("In use_key_in_one_step: Failed to finish operation.")
+ }
+}
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
index f99805d..1f3f8e8 100644
--- a/keystore2/src/remote_provisioning.rs
+++ b/keystore2/src/remote_provisioning.rs
@@ -19,8 +19,6 @@
//! certificate chains signed by some root authority and stored in a keystore SQLite
//! DB.
-#![allow(clippy::from_over_into, clippy::needless_question_mark, clippy::vec_init_then_push)]
-
use std::collections::HashMap;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
@@ -34,7 +32,7 @@
AttestationPoolStatus::AttestationPoolStatus, IRemoteProvisioning::BnRemoteProvisioning,
IRemoteProvisioning::IRemoteProvisioning,
};
-use android_security_remoteprovisioning::binder::Strong;
+use android_security_remoteprovisioning::binder::{BinderFeatures, Strong};
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, KeyDescriptor::KeyDescriptor,
};
@@ -45,7 +43,7 @@
use crate::database::{CertificateChain, KeystoreDB, Uuid};
use crate::error::{self, map_or_log_err, map_rem_prov_error, Error};
use crate::globals::{get_keymint_device, get_remotely_provisioned_component, DB};
-use crate::utils::Asp;
+use crate::utils::{watchdog as wd, Asp};
/// Contains helper functions to check if remote provisioning is enabled on the system and, if so,
/// to assign and retrieve attestation keys and certificate chains.
@@ -233,7 +231,7 @@
if let Ok(dev) = get_remotely_provisioned_component(&SecurityLevel::STRONGBOX) {
result.device_by_sec_level.insert(SecurityLevel::STRONGBOX, dev);
}
- Ok(BnRemoteProvisioning::new_binder(result))
+ Ok(BnRemoteProvisioning::new_binder(result, BinderFeatures::default()))
}
/// Populates the AttestationPoolStatus parcelable with information about how many
@@ -252,7 +250,7 @@
// attestation keys unless the pool status is checked first, so this call should be
// enough to routinely clean out expired keys.
db.delete_expired_attestation_keys()?;
- Ok(db.get_attestation_pool_status(expired_by, &uuid)?)
+ db.get_attestation_pool_status(expired_by, &uuid)
})
}
@@ -294,14 +292,15 @@
protected_data,
))
.context("In generate_csr: Failed to generate csr")?;
- let mut cose_mac_0 = Vec::<u8>::new();
// TODO(b/180392379): Replace this manual CBOR generation with the cbor-serde crate as well.
// This generates an array consisting of the mac and the public key Maps.
// Just generate the actual MacedPublicKeys structure when the crate is
// available.
- cose_mac_0.push((0b100_00000 | (keys_to_sign.len() + 1)) as u8);
- cose_mac_0.push(0b010_11000); //push mac
- cose_mac_0.push(mac.len() as u8);
+ let mut cose_mac_0: Vec<u8> = vec![
+ (0b100_00000 | (keys_to_sign.len() + 1)) as u8,
+ 0b010_11000, // mac
+ (mac.len() as u8),
+ ];
cose_mac_0.append(&mut mac);
for maced_public_key in keys_to_sign {
if maced_public_key.macedKey.len() > 83 + 8 {
@@ -327,13 +326,13 @@
DB.with::<_, Result<()>>(|db| {
let mut db = db.borrow_mut();
let (_, _, uuid) = get_keymint_device(&sec_level)?;
- Ok(db.store_signed_attestation_certificate_chain(
+ db.store_signed_attestation_certificate_chain(
public_key,
batch_cert,
certs, /* DER encoded certificate chain */
expiration_date,
&uuid,
- )?)
+ )
})
}
@@ -362,7 +361,7 @@
raw_key[32..64].clone_from_slice(&data[53..53 + 32]);
DB.with::<_, Result<()>>(|db| {
let mut db = db.borrow_mut();
- Ok(db.create_attestation_key_entry(&maced_key.macedKey, &raw_key, &priv_key, &uuid)?)
+ db.create_attestation_key_entry(&maced_key.macedKey, &raw_key, &priv_key, &uuid)
})
}
@@ -377,7 +376,7 @@
pub fn delete_all_keys(&self) -> Result<i64> {
DB.with::<_, Result<i64>>(|db| {
let mut db = db.borrow_mut();
- Ok(db.delete_all_attestation_keys()?)
+ db.delete_all_attestation_keys()
})
}
}
@@ -392,6 +391,7 @@
expired_by: i64,
sec_level: SecurityLevel,
) -> binder::public_api::Result<AttestationPoolStatus> {
+ let _wp = wd::watch_millis("IRemoteProvisioning::getPoolStatus", 500);
map_or_log_err(self.get_pool_status(expired_by, sec_level), Ok)
}
@@ -405,6 +405,7 @@
protected_data: &mut ProtectedData,
device_info: &mut DeviceInfo,
) -> binder::public_api::Result<Vec<u8>> {
+ let _wp = wd::watch_millis("IRemoteProvisioning::generateCsr", 500);
map_or_log_err(
self.generate_csr(
test_mode,
@@ -427,6 +428,7 @@
expiration_date: i64,
sec_level: SecurityLevel,
) -> binder::public_api::Result<()> {
+ let _wp = wd::watch_millis("IRemoteProvisioning::provisionCertChain", 500);
map_or_log_err(
self.provision_cert_chain(public_key, batch_cert, certs, expiration_date, sec_level),
Ok,
@@ -438,14 +440,17 @@
is_test_mode: bool,
sec_level: SecurityLevel,
) -> binder::public_api::Result<()> {
+ let _wp = wd::watch_millis("IRemoteProvisioning::generateKeyPair", 500);
map_or_log_err(self.generate_key_pair(is_test_mode, sec_level), Ok)
}
fn getSecurityLevels(&self) -> binder::public_api::Result<Vec<SecurityLevel>> {
+ let _wp = wd::watch_millis("IRemoteProvisioning::getSecurityLevels", 500);
map_or_log_err(self.get_security_levels(), Ok)
}
fn deleteAllKeys(&self) -> binder::public_api::Result<i64> {
+ let _wp = wd::watch_millis("IRemoteProvisioning::deleteAllKeys", 500);
map_or_log_err(self.delete_all_keys(), Ok)
}
}
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index 1cf770f..f78d98b 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -14,24 +14,12 @@
//! This crate implements the IKeystoreSecurityLevel interface.
-use crate::{globals::get_keymint_device, id_rotation::IdRotationState};
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- Algorithm::Algorithm, AttestationKey::AttestationKey,
- HardwareAuthenticatorType::HardwareAuthenticatorType, IKeyMintDevice::IKeyMintDevice,
- KeyCreationResult::KeyCreationResult, KeyFormat::KeyFormat,
- KeyMintHardwareInfo::KeyMintHardwareInfo, KeyParameter::KeyParameter,
- KeyParameterValue::KeyParameterValue, SecurityLevel::SecurityLevel, Tag::Tag,
-};
-use android_system_keystore2::aidl::android::system::keystore2::{
- AuthenticatorSpec::AuthenticatorSpec, CreateOperationResponse::CreateOperationResponse,
- Domain::Domain, IKeystoreOperation::IKeystoreOperation,
- IKeystoreSecurityLevel::BnKeystoreSecurityLevel,
- IKeystoreSecurityLevel::IKeystoreSecurityLevel, KeyDescriptor::KeyDescriptor,
- KeyMetadata::KeyMetadata, KeyParameters::KeyParameters,
-};
-
use crate::attestation_key_utils::{get_attest_key_info, AttestationKeyInfo};
+use crate::audit_log::{
+ log_key_deleted, log_key_generated, log_key_imported, log_key_integrity_violation,
+};
use crate::database::{CertificateInfo, KeyIdGuard};
+use crate::error::{self, map_km_error, map_or_log_err, Error, ErrorCode};
use crate::globals::{DB, ENFORCEMENTS, LEGACY_MIGRATOR, SUPER_KEY};
use crate::key_parameter::KeyParameter as KsKeyParam;
use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
@@ -40,7 +28,7 @@
use crate::super_key::{KeyBlob, SuperKeyManager};
use crate::utils::{
check_device_attestation_permissions, check_key_permission, is_device_id_attestation_tag,
- uid_to_android_user, Asp,
+ key_characteristics_to_internal, uid_to_android_user, watchdog as wd, Asp,
};
use crate::{
database::{
@@ -52,12 +40,23 @@
operation::OperationDb,
permission::KeyPerm,
};
-use crate::{
- error::{self, map_km_error, map_or_log_err, Error, ErrorCode},
- utils::key_characteristics_to_internal,
+use crate::{globals::get_keymint_device, id_rotation::IdRotationState};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ Algorithm::Algorithm, AttestationKey::AttestationKey,
+ HardwareAuthenticatorType::HardwareAuthenticatorType, IKeyMintDevice::IKeyMintDevice,
+ KeyCreationResult::KeyCreationResult, KeyFormat::KeyFormat,
+ KeyMintHardwareInfo::KeyMintHardwareInfo, KeyParameter::KeyParameter,
+ KeyParameterValue::KeyParameterValue, SecurityLevel::SecurityLevel, Tag::Tag,
+};
+use android_hardware_security_keymint::binder::{BinderFeatures, Strong, ThreadState};
+use android_system_keystore2::aidl::android::system::keystore2::{
+ AuthenticatorSpec::AuthenticatorSpec, CreateOperationResponse::CreateOperationResponse,
+ Domain::Domain, EphemeralStorageKeyResponse::EphemeralStorageKeyResponse,
+ IKeystoreOperation::IKeystoreOperation, IKeystoreSecurityLevel::BnKeystoreSecurityLevel,
+ IKeystoreSecurityLevel::IKeystoreSecurityLevel, KeyDescriptor::KeyDescriptor,
+ KeyMetadata::KeyMetadata, KeyParameters::KeyParameters,
};
use anyhow::{anyhow, Context, Result};
-use binder::{IBinderInternal, Strong, ThreadState};
/// Implementation of the IKeystoreSecurityLevel Interface.
pub struct KeystoreSecurityLevel {
@@ -79,8 +78,8 @@
impl KeystoreSecurityLevel {
/// Creates a new security level instance wrapped in a
- /// BnKeystoreSecurityLevel proxy object. It also
- /// calls `IBinderInternal::set_requesting_sid` on the new interface, because
+ /// BnKeystoreSecurityLevel proxy object. It also enables
+ /// `BinderFeatures::set_requesting_sid` on the new interface, because
/// we need it for checking keystore permissions.
pub fn new_native_binder(
security_level: SecurityLevel,
@@ -88,19 +87,26 @@
) -> Result<(Strong<dyn IKeystoreSecurityLevel>, Uuid)> {
let (dev, hw_info, km_uuid) = get_keymint_device(&security_level)
.context("In KeystoreSecurityLevel::new_native_binder.")?;
- let result = BnKeystoreSecurityLevel::new_binder(Self {
- security_level,
- keymint: dev,
- hw_info,
- km_uuid,
- operation_db: OperationDb::new(),
- rem_prov_state: RemProvState::new(security_level, km_uuid),
- id_rotation_state,
- });
- result.as_binder().set_requesting_sid(true);
+ let result = BnKeystoreSecurityLevel::new_binder(
+ Self {
+ security_level,
+ keymint: dev,
+ hw_info,
+ km_uuid,
+ operation_db: OperationDb::new(),
+ rem_prov_state: RemProvState::new(security_level, km_uuid),
+ id_rotation_state,
+ },
+ BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
+ );
Ok((result, km_uuid))
}
+ fn watch_millis(&self, id: &'static str, millis: u64) -> Option<wd::WatchPoint> {
+ let sec_level = self.security_level;
+ wd::watch_millis_with(id, millis, move || format!("SecurityLevel {:?}", sec_level))
+ }
+
fn store_new_key(
&self,
key: KeyDescriptor,
@@ -170,6 +176,7 @@
let key_id = db
.store_new_key(
&key,
+ KeyType::Client,
&key_parameters,
&(&key_blob, &blob_metadata),
&cert_info,
@@ -278,6 +285,12 @@
},
)?;
+ // Remove Tag::PURPOSE from the operation_parameters, since some keymaster devices return
+ // an error on begin() if Tag::PURPOSE is in the operation_parameters.
+ let op_params: Vec<KeyParameter> =
+ operation_parameters.iter().filter(|p| p.tag != Tag::PURPOSE).cloned().collect();
+ let operation_parameters = op_params.as_slice();
+
let (immediate_hat, mut auth_info) = ENFORCEMENTS
.authorize_create(
purpose,
@@ -287,8 +300,6 @@
)
.context("In create_operation.")?;
- let immediate_hat = immediate_hat.unwrap_or_default();
-
let km_blob = SUPER_KEY
.unwrap_key_if_required(&blob_metadata, km_blob)
.context("In create_operation. Failed to handle super encryption.")?;
@@ -306,16 +317,29 @@
&blob_metadata,
&operation_parameters,
|blob| loop {
- match map_km_error(km_dev.begin(
- purpose,
- blob,
- &operation_parameters,
- &immediate_hat,
- )) {
+ match map_km_error({
+ let _wp = self.watch_millis(
+ "In KeystoreSecurityLevel::create_operation: calling begin",
+ 500,
+ );
+ km_dev.begin(purpose, blob, &operation_parameters, immediate_hat.as_ref())
+ }) {
Err(Error::Km(ErrorCode::TOO_MANY_OPERATIONS)) => {
self.operation_db.prune(caller_uid, forced)?;
continue;
}
+ v @ Err(Error::Km(ErrorCode::INVALID_KEY_BLOB)) => {
+ if let Some((key_id, _)) = key_properties {
+ if let Ok(Some(key)) =
+ DB.with(|db| db.borrow_mut().load_key_descriptor(key_id))
+ {
+ log_key_integrity_violation(&key);
+ } else {
+ log::error!("Failed to load key descriptor for audit log");
+ }
+ }
+ return v;
+ }
v => return v,
}
},
@@ -327,12 +351,19 @@
let op_params: Vec<KeyParameter> = operation_parameters.to_vec();
let operation = match begin_result.operation {
- Some(km_op) => {
- self.operation_db.create_operation(km_op, caller_uid, auth_info, forced,
- LoggingInfo::new(self.security_level, purpose, op_params,
- upgraded_blob.is_some()))
- },
- None => return Err(Error::sys()).context("In create_operation: Begin operation returned successfully, but did not return a valid operation."),
+ Some(km_op) => self.operation_db.create_operation(
+ km_op,
+ caller_uid,
+ auth_info,
+ forced,
+ LoggingInfo::new(self.security_level, purpose, op_params, upgraded_blob.is_some()),
+ ),
+ None => {
+ return Err(Error::sys()).context(concat!(
+ "In create_operation: Begin operation returned successfully, ",
+ "but did not return a valid operation."
+ ))
+ }
};
let op_binder: binder::public_api::Strong<dyn IKeystoreOperation> =
@@ -364,9 +395,19 @@
let mut result = params.to_vec();
// If there is an attestation challenge we need to get an application id.
if params.iter().any(|kp| kp.tag == Tag::ATTESTATION_CHALLENGE) {
- let aaid = keystore2_aaid::get_aaid(uid).map_err(|e| {
- anyhow!(format!("In add_certificate_parameters: get_aaid returned status {}.", e))
- })?;
+ let aaid = {
+ let _wp = self.watch_millis(
+ "In KeystoreSecurityLevel::add_certificate_parameters calling: get_aaid",
+ 500,
+ );
+ keystore2_aaid::get_aaid(uid).map_err(|e| {
+ anyhow!(format!(
+ "In add_certificate_parameters: get_aaid returned status {}.",
+ e
+ ))
+ })
+ }?;
+
result.push(KeyParameter {
tag: Tag::ATTESTATION_APPLICATION_ID,
value: KeyParameterValue::Blob(aaid),
@@ -488,21 +529,48 @@
attestKeyParams: vec![],
issuerSubjectName: issuer_subject.clone(),
});
- map_km_error(km_dev.generateKey(¶ms, attest_key.as_ref()))
+ map_km_error({
+ let _wp = self.watch_millis(
+ concat!(
+ "In KeystoreSecurityLevel::generate_key (UserGenerated): ",
+ "calling generate_key."
+ ),
+ 5000, // Generate can take a little longer.
+ );
+ km_dev.generateKey(¶ms, attest_key.as_ref())
+ })
},
)
.context("In generate_key: Using user generated attestation key.")
.map(|(result, _)| result),
Some(AttestationKeyInfo::RemoteProvisioned { attestation_key, attestation_certs }) => {
- map_km_error(km_dev.generateKey(¶ms, Some(&attestation_key)))
- .context("While generating Key with remote provisioned attestation key.")
- .map(|mut creation_result| {
- creation_result.certificateChain.push(attestation_certs);
- creation_result
- })
+ map_km_error({
+ let _wp = self.watch_millis(
+ concat!(
+ "In KeystoreSecurityLevel::generate_key (RemoteProvisioned): ",
+ "calling generate_key.",
+ ),
+ 5000, // Generate can take a little longer.
+ );
+ km_dev.generateKey(¶ms, Some(&attestation_key))
+ })
+ .context("While generating Key with remote provisioned attestation key.")
+ .map(|mut creation_result| {
+ creation_result.certificateChain.push(attestation_certs);
+ creation_result
+ })
}
- None => map_km_error(km_dev.generateKey(¶ms, None))
- .context("While generating Key without explicit attestation key."),
+ None => map_km_error({
+ let _wp = self.watch_millis(
+ concat!(
+ "In KeystoreSecurityLevel::generate_key (No attestation): ",
+ "calling generate_key.",
+ ),
+ 5000, // Generate can take a little longer.
+ );
+ km_dev.generateKey(¶ms, None)
+ })
+ .context("While generating Key without explicit attestation key."),
}
.context("In generate_key.")?;
@@ -559,9 +627,12 @@
let km_dev: Strong<dyn IKeyMintDevice> =
self.keymint.get_interface().context("In import_key: Trying to get the KM device")?;
- let creation_result =
- map_km_error(km_dev.importKey(¶ms, format, key_data, None /* attestKey */))
- .context("In import_key: Trying to call importKey")?;
+ let creation_result = map_km_error({
+ let _wp =
+ self.watch_millis("In KeystoreSecurityLevel::import_key: calling importKey.", 500);
+ km_dev.importKey(¶ms, format, key_data, None /* attestKey */)
+ })
+ .context("In import_key: Trying to call importKey")?;
let user_id = uid_to_android_user(caller_uid);
self.store_new_key(key, creation_result, user_id, Some(flags)).context("In import_key.")
@@ -674,6 +745,10 @@
&wrapping_blob_metadata,
&[],
|wrapping_blob| {
+ let _wp = self.watch_millis(
+ "In KeystoreSecurityLevel::import_wrapped_key: calling importWrappedKey.",
+ 500,
+ );
let creation_result = map_km_error(km_dev.importWrappedKey(
wrapped_data,
wrapping_blob,
@@ -701,7 +776,7 @@
SuperKeyManager::reencrypt_if_required(key_blob, &upgraded_blob)
.context("In store_upgraded_keyblob: Failed to handle super encryption.")?;
- let mut new_blob_metadata = new_blob_metadata.unwrap_or_else(BlobMetaData::new);
+ let mut new_blob_metadata = new_blob_metadata.unwrap_or_default();
if let Some(uuid) = km_uuid {
new_blob_metadata.add(BlobMetaEntry::KmUuid(*uuid));
}
@@ -732,8 +807,17 @@
{
match f(key_blob) {
Err(Error::Km(ErrorCode::KEY_REQUIRES_UPGRADE)) => {
- let upgraded_blob = map_km_error(km_dev.upgradeKey(key_blob, params))
- .context("In upgrade_keyblob_if_required_with: Upgrade failed.")?;
+ let upgraded_blob = {
+ let _wp = self.watch_millis(
+ concat!(
+ "In KeystoreSecurityLevel::upgrade_keyblob_if_required_with: ",
+ "calling upgradeKey."
+ ),
+ 500,
+ );
+ map_km_error(km_dev.upgradeKey(key_blob, params))
+ }
+ .context("In upgrade_keyblob_if_required_with: Upgrade failed.")?;
if let Some(kid) = key_id_guard {
Self::store_upgraded_keyblob(
@@ -777,7 +861,10 @@
}
}
- fn convert_storage_key_to_ephemeral(&self, storage_key: &KeyDescriptor) -> Result<Vec<u8>> {
+ fn convert_storage_key_to_ephemeral(
+ &self,
+ storage_key: &KeyDescriptor,
+ ) -> Result<EphemeralStorageKeyResponse> {
if storage_key.domain != Domain::BLOB {
return Err(error::Error::Km(ErrorCode::INVALID_ARGUMENT)).context(concat!(
"In IKeystoreSecurityLevel convert_storage_key_to_ephemeral: ",
@@ -800,8 +887,47 @@
"In IKeystoreSecurityLevel convert_storage_key_to_ephemeral: ",
"Getting keymint device interface"
))?;
- map_km_error(km_dev.convertStorageKeyToEphemeral(key_blob))
- .context("In keymint device convertStorageKeyToEphemeral")
+ match {
+ let _wp = self.watch_millis(
+ concat!(
+ "In IKeystoreSecurityLevel::convert_storage_key_to_ephemeral: ",
+ "calling convertStorageKeyToEphemeral (1)"
+ ),
+ 500,
+ );
+ map_km_error(km_dev.convertStorageKeyToEphemeral(key_blob))
+ } {
+ Ok(result) => {
+ Ok(EphemeralStorageKeyResponse { ephemeralKey: result, upgradedBlob: None })
+ }
+ Err(error::Error::Km(ErrorCode::KEY_REQUIRES_UPGRADE)) => {
+ let upgraded_blob = {
+ let _wp = self.watch_millis(
+ "In convert_storage_key_to_ephemeral: calling upgradeKey",
+ 500,
+ );
+ map_km_error(km_dev.upgradeKey(key_blob, &[]))
+ }
+ .context("In convert_storage_key_to_ephemeral: Failed to upgrade key blob.")?;
+ let ephemeral_key = {
+ let _wp = self.watch_millis(
+ "In convert_storage_key_to_ephemeral: calling convertStorageKeyToEphemeral (2)",
+ 500,
+ );
+ map_km_error(km_dev.convertStorageKeyToEphemeral(key_blob))
+ }
+ .context(concat!(
+ "In convert_storage_key_to_ephemeral: ",
+ "Failed to retrieve ephemeral key (after upgrade)."
+ ))?;
+ Ok(EphemeralStorageKeyResponse {
+ ephemeralKey: ephemeral_key,
+ upgradedBlob: Some(upgraded_blob),
+ })
+ }
+ Err(e) => Err(e)
+ .context("In convert_storage_key_to_ephemeral: Failed to retrieve ephemeral key."),
+ }
}
fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
@@ -823,7 +949,11 @@
.keymint
.get_interface()
.context("In IKeystoreSecurityLevel delete_key: Getting keymint device interface")?;
- map_km_error(km_dev.deleteKey(&key_blob)).context("In keymint device deleteKey")
+ {
+ let _wp =
+ self.watch_millis("In KeystoreSecuritylevel::delete_key: calling deleteKey", 500);
+ map_km_error(km_dev.deleteKey(&key_blob)).context("In keymint device deleteKey")
+ }
}
}
@@ -836,6 +966,7 @@
operation_parameters: &[KeyParameter],
forced: bool,
) -> binder::public_api::Result<CreateOperationResponse> {
+ let _wp = self.watch_millis("IKeystoreSecurityLevel::createOperation", 500);
map_or_log_err(self.create_operation(key, operation_parameters, forced), Ok)
}
fn generateKey(
@@ -846,8 +977,12 @@
flags: i32,
entropy: &[u8],
) -> binder::public_api::Result<KeyMetadata> {
+ // Duration is set to 5 seconds, because generateKey - especially for RSA keys, takes more
+ // time than other operations
+ let _wp = self.watch_millis("IKeystoreSecurityLevel::generateKey", 5000);
let result = self.generate_key(key, attestation_key, params, flags, entropy);
log_key_creation_event_stats(self.security_level, params, &result);
+ log_key_generated(key, ThreadState::get_calling_uid(), result.is_ok());
map_or_log_err(result, Ok)
}
fn importKey(
@@ -858,8 +993,10 @@
flags: i32,
key_data: &[u8],
) -> binder::public_api::Result<KeyMetadata> {
+ let _wp = self.watch_millis("IKeystoreSecurityLevel::importKey", 500);
let result = self.import_key(key, attestation_key, params, flags, key_data);
log_key_creation_event_stats(self.security_level, params, &result);
+ log_key_imported(key, ThreadState::get_calling_uid(), result.is_ok());
map_or_log_err(result, Ok)
}
fn importWrappedKey(
@@ -870,18 +1007,24 @@
params: &[KeyParameter],
authenticators: &[AuthenticatorSpec],
) -> binder::public_api::Result<KeyMetadata> {
+ let _wp = self.watch_millis("IKeystoreSecurityLevel::importWrappedKey", 500);
let result =
self.import_wrapped_key(key, wrapping_key, masking_key, params, authenticators);
log_key_creation_event_stats(self.security_level, params, &result);
+ log_key_imported(key, ThreadState::get_calling_uid(), result.is_ok());
map_or_log_err(result, Ok)
}
fn convertStorageKeyToEphemeral(
&self,
storage_key: &KeyDescriptor,
- ) -> binder::public_api::Result<Vec<u8>> {
+ ) -> binder::public_api::Result<EphemeralStorageKeyResponse> {
+ let _wp = self.watch_millis("IKeystoreSecurityLevel::convertStorageKeyToEphemeral", 500);
map_or_log_err(self.convert_storage_key_to_ephemeral(storage_key), Ok)
}
fn deleteKey(&self, key: &KeyDescriptor) -> binder::public_api::Result<()> {
- map_or_log_err(self.delete_key(key), Ok)
+ let _wp = self.watch_millis("IKeystoreSecurityLevel::deleteKey", 500);
+ let result = self.delete_key(key);
+ log_key_deleted(key, ThreadState::get_calling_uid(), result.is_ok());
+ map_or_log_err(result, Ok)
}
}
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index 1debe1b..d65743d 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -17,11 +17,12 @@
use std::collections::HashMap;
+use crate::audit_log::log_key_deleted;
use crate::permission::{KeyPerm, KeystorePerm};
use crate::security_level::KeystoreSecurityLevel;
use crate::utils::{
check_grant_permission, check_key_permission, check_keystore_permission,
- key_parameters_to_authorizations, Asp,
+ key_parameters_to_authorizations, watchdog as wd, Asp,
};
use crate::{
database::Uuid,
@@ -37,13 +38,13 @@
id_rotation::IdRotationState,
};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use android_hardware_security_keymint::binder::{BinderFeatures, Strong, ThreadState};
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, IKeystoreSecurityLevel::IKeystoreSecurityLevel,
IKeystoreService::BnKeystoreService, IKeystoreService::IKeystoreService,
KeyDescriptor::KeyDescriptor, KeyEntryResponse::KeyEntryResponse, KeyMetadata::KeyMetadata,
};
use anyhow::{Context, Result};
-use binder::{IBinderInternal, Strong, ThreadState};
use error::Error;
use keystore2_selinux as selinux;
@@ -90,9 +91,10 @@
"In KeystoreService::new_native_binder: Trying to initialize the legacy migrator.",
)?;
- let result = BnKeystoreService::new_binder(result);
- result.as_binder().set_requesting_sid(true);
- Ok(result)
+ Ok(BnKeystoreService::new_binder(
+ result,
+ BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
+ ))
}
fn uuid_to_sec_level(&self, uuid: &Uuid) -> SecurityLevel {
@@ -239,8 +241,13 @@
check_key_permission(KeyPerm::rebind(), &key, &None)
.context("Caller does not have permission to insert this certificate.")?;
- db.store_new_certificate(&key, certificate_chain.unwrap(), &KEYSTORE_UUID)
- .context("Failed to insert new certificate.")?;
+ db.store_new_certificate(
+ &key,
+ KeyType::Client,
+ certificate_chain.unwrap(),
+ &KEYSTORE_UUID,
+ )
+ .context("Failed to insert new certificate.")?;
Ok(())
})
.context("In update_subcomponent.")
@@ -289,7 +296,7 @@
&mut DB
.with(|db| {
let mut db = db.borrow_mut();
- db.list(k.domain, k.nspace)
+ db.list(k.domain, k.nspace, KeyType::Client)
})
.context("In list_entries: Trying to list keystore database.")?,
);
@@ -352,9 +359,13 @@
&self,
security_level: SecurityLevel,
) -> binder::public_api::Result<Strong<dyn IKeystoreSecurityLevel>> {
+ let _wp = wd::watch_millis_with("IKeystoreService::getSecurityLevel", 500, move || {
+ format!("security_level: {}", security_level.0)
+ });
map_or_log_err(self.get_security_level(security_level), Ok)
}
fn getKeyEntry(&self, key: &KeyDescriptor) -> binder::public_api::Result<KeyEntryResponse> {
+ let _wp = wd::watch_millis("IKeystoreService::get_key_entry", 500);
map_or_log_err(self.get_key_entry(key), Ok)
}
fn updateSubcomponent(
@@ -363,6 +374,7 @@
public_cert: Option<&[u8]>,
certificate_chain: Option<&[u8]>,
) -> binder::public_api::Result<()> {
+ let _wp = wd::watch_millis("IKeystoreService::updateSubcomponent", 500);
map_or_log_err(self.update_subcomponent(key, public_cert, certificate_chain), Ok)
}
fn listEntries(
@@ -370,10 +382,14 @@
domain: Domain,
namespace: i64,
) -> binder::public_api::Result<Vec<KeyDescriptor>> {
+ let _wp = wd::watch_millis("IKeystoreService::listEntries", 500);
map_or_log_err(self.list_entries(domain, namespace), Ok)
}
fn deleteKey(&self, key: &KeyDescriptor) -> binder::public_api::Result<()> {
- map_or_log_err(self.delete_key(key), Ok)
+ let _wp = wd::watch_millis("IKeystoreService::deleteKey", 500);
+ let result = self.delete_key(key);
+ log_key_deleted(key, ThreadState::get_calling_uid(), result.is_ok());
+ map_or_log_err(result, Ok)
}
fn grant(
&self,
@@ -381,9 +397,11 @@
grantee_uid: i32,
access_vector: i32,
) -> binder::public_api::Result<KeyDescriptor> {
+ let _wp = wd::watch_millis("IKeystoreService::grant", 500);
map_or_log_err(self.grant(key, grantee_uid, access_vector.into()), Ok)
}
fn ungrant(&self, key: &KeyDescriptor, grantee_uid: i32) -> binder::public_api::Result<()> {
+ let _wp = wd::watch_millis("IKeystoreService::ungrant", 500);
map_or_log_err(self.ungrant(key, grantee_uid), Ok)
}
}
diff --git a/keystore2/src/shared_secret_negotiation.rs b/keystore2/src/shared_secret_negotiation.rs
index fb55f33..64bc2c3 100644
--- a/keystore2/src/shared_secret_negotiation.rs
+++ b/keystore2/src/shared_secret_negotiation.rs
@@ -24,6 +24,7 @@
use anyhow::{Context, Result};
use keystore2_vintf::{get_aidl_instances, get_hidl_instances};
use std::fmt::{self, Display, Formatter};
+use std::time::Duration;
/// This function initiates the shared secret negotiation. It starts a thread and then returns
/// immediately. The thread consults the vintf manifest to enumerate expected negotiation
@@ -109,7 +110,11 @@
/// Lists participants.
fn list_participants() -> Result<Vec<SharedSecretParticipant>> {
- Ok([(4, 0), (4, 1)]
+ // 4.1 implementation always also register as 4.0. So only the highest version of each
+ // "default" and "strongbox" makes the cut.
+ let mut legacy_default_found: bool = false;
+ let mut legacy_strongbox_found: bool = false;
+ Ok([(4, 1), (4, 0)]
.iter()
.map(|(ma, mi)| {
get_hidl_instances(KEYMASTER_PACKAGE_NAME, *ma, *mi, KEYMASTER_INTERFACE_NAME)
@@ -119,7 +124,24 @@
instances
.into_iter()
.filter_map(|name| {
- filter_map_legacy_km_instances(name.to_string(), (*ma, *mi))
+ filter_map_legacy_km_instances(name.to_string(), (*ma, *mi)).and_then(
+ |sp| {
+ if let SharedSecretParticipant::Hidl {
+ is_strongbox: true,
+ ..
+ } = &sp
+ {
+ if !legacy_strongbox_found {
+ legacy_strongbox_found = true;
+ return Some(sp);
+ }
+ } else if !legacy_default_found {
+ legacy_default_found = true;
+ return Some(sp);
+ }
+ None
+ },
+ )
})
.collect::<Vec<SharedSecretParticipant>>()
})
@@ -215,7 +237,7 @@
if participants.is_empty() {
break;
}
- std::thread::sleep(std::time::Duration::from_millis(1000));
+ std::thread::sleep(Duration::from_millis(1000));
}
connected_participants
}
@@ -237,7 +259,7 @@
Err(e) => {
log::warn!("{:?}", e);
log::warn!("Retrying in one second.");
- std::thread::sleep(std::time::Duration::from_millis(1000));
+ std::thread::sleep(Duration::from_millis(1000));
}
Ok(params) => break params,
}
@@ -246,20 +268,28 @@
params.sort_unstable();
// Phase 2: Send the sorted sharing parameters to all participants.
- participants
- .into_iter()
- .try_fold(None, |acc, (s, p)| {
- match (acc, map_binder_status(s.computeSharedSecret(¶ms))) {
- (None, Ok(new_sum)) => Ok(Some(new_sum)),
- (Some(old_sum), Ok(new_sum)) => {
- if old_sum == new_sum {
- Ok(Some(old_sum))
- } else {
- Err(SharedSecretError::Checksum(p))
- }
+ let negotiation_result = participants.into_iter().try_fold(None, |acc, (s, p)| {
+ match (acc, map_binder_status(s.computeSharedSecret(¶ms))) {
+ (None, Ok(new_sum)) => Ok(Some(new_sum)),
+ (Some(old_sum), Ok(new_sum)) => {
+ if old_sum == new_sum {
+ Ok(Some(old_sum))
+ } else {
+ Err(SharedSecretError::Checksum(p))
}
- (_, Err(e)) => Err(SharedSecretError::Computation { e, p }),
}
- })
- .expect("Fatal: Shared secret computation failed.");
+ (_, Err(e)) => Err(SharedSecretError::Computation { e, p }),
+ }
+ });
+
+ if let Err(e) = negotiation_result {
+ log::error!("In negotiate_shared_secret: {:?}.", e);
+ if let SharedSecretError::Checksum(_) = e {
+ log::error!(concat!(
+ "This means that this device is NOT PROVISIONED CORRECTLY.\n",
+ "User authorization and other security functions will not work\n",
+ "as expected. Please contact your OEM for instructions.",
+ ));
+ }
+ }
}
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 3fa4cf0..9fb267a 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -13,33 +13,53 @@
// limitations under the License.
use crate::{
+ boot_level_keys::{get_level_zero_key, BootLevelKeyCache},
database::BlobMetaData,
database::BlobMetaEntry,
database::EncryptedBy,
database::KeyEntry,
database::KeyType,
- database::{KeyMetaData, KeyMetaEntry, KeystoreDB},
+ database::{KeyEntryLoadBits, KeyIdGuard, KeyMetaData, KeyMetaEntry, KeystoreDB},
ec_crypto::ECDHPrivateKey,
enforcements::Enforcements,
error::Error,
error::ResponseCode,
- key_parameter::KeyParameter,
+ key_parameter::{KeyParameter, KeyParameterValue},
legacy_blob::LegacyBlobLoader,
legacy_migrator::LegacyMigrator,
+ raw_device::KeyMintDevice,
try_insert::TryInsert,
+ utils::watchdog as wd,
+ utils::AID_KEYSTORE,
};
-use android_system_keystore2::aidl::android::system::keystore2::Domain::Domain;
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ Algorithm::Algorithm, BlockMode::BlockMode, HardwareAuthToken::HardwareAuthToken,
+ HardwareAuthenticatorType::HardwareAuthenticatorType, KeyFormat::KeyFormat,
+ KeyParameter::KeyParameter as KmKeyParameter, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
+ SecurityLevel::SecurityLevel,
+};
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, KeyDescriptor::KeyDescriptor,
+};
use anyhow::{Context, Result};
use keystore2_crypto::{
aes_gcm_decrypt, aes_gcm_encrypt, generate_aes256_key, generate_salt, Password, ZVec,
AES_256_KEY_LENGTH,
};
-use std::ops::Deref;
+use keystore2_system_property::PropertyWatcher;
use std::{
collections::HashMap,
sync::Arc,
sync::{Mutex, Weak},
};
+use std::{convert::TryFrom, ops::Deref};
+
+const MAX_MAX_BOOT_LEVEL: usize = 1_000_000_000;
+/// Allow up to 15 seconds between the user unlocking using a biometric, and the auth
+/// token being used to unlock in [`SuperKeyManager::try_unlock_user_with_biometric`].
+/// This seems short enough for security purposes, while long enough that even the
+/// very slowest device will present the auth token in time.
+const BIOMETRIC_AUTH_TIMEOUT_S: i32 = 15; // seconds
type UserId = u32;
@@ -90,13 +110,47 @@
LskfBound,
/// Superencrypt with a key cleared from memory when the device is locked.
ScreenLockBound,
+ /// Superencrypt with a key based on the desired boot level
+ BootLevel(i32),
+}
+
+#[derive(Debug, Clone, Copy)]
+pub enum SuperKeyIdentifier {
+ /// id of the super key in the database.
+ DatabaseId(i64),
+ /// Boot level of the encrypting boot level key
+ BootLevel(i32),
+}
+
+impl SuperKeyIdentifier {
+ fn from_metadata(metadata: &BlobMetaData) -> Option<Self> {
+ if let Some(EncryptedBy::KeyId(key_id)) = metadata.encrypted_by() {
+ Some(SuperKeyIdentifier::DatabaseId(*key_id))
+ } else if let Some(boot_level) = metadata.max_boot_level() {
+ Some(SuperKeyIdentifier::BootLevel(*boot_level))
+ } else {
+ None
+ }
+ }
+
+ fn add_to_metadata(&self, metadata: &mut BlobMetaData) {
+ match self {
+ SuperKeyIdentifier::DatabaseId(id) => {
+ metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(*id)));
+ }
+ SuperKeyIdentifier::BootLevel(level) => {
+ metadata.add(BlobMetaEntry::MaxBootLevel(*level));
+ }
+ }
+ }
}
pub struct SuperKey {
algorithm: SuperEncryptionAlgorithm,
key: ZVec,
- // id of the super key in the database.
- id: i64,
+ /// Identifier of the encrypting key, used to write an encrypted blob
+ /// back to the database after re-encryption eg on a key update.
+ id: SuperKeyIdentifier,
/// ECDH is more expensive than AES. So on ECDH private keys we set the
/// reencrypt_with field to point at the corresponding AES key, and the
/// keys will be re-encrypted with AES on first use.
@@ -116,6 +170,72 @@
}
}
+/// A SuperKey that has been encrypted with an AES-GCM key. For
+/// encryption the key is in memory, and for decryption it is in KM.
+struct LockedKey {
+ algorithm: SuperEncryptionAlgorithm,
+ id: SuperKeyIdentifier,
+ nonce: Vec<u8>,
+ ciphertext: Vec<u8>, // with tag appended
+}
+
+impl LockedKey {
+ fn new(key: &[u8], to_encrypt: &Arc<SuperKey>) -> Result<Self> {
+ let (mut ciphertext, nonce, mut tag) = aes_gcm_encrypt(&to_encrypt.key, key)?;
+ ciphertext.append(&mut tag);
+ Ok(LockedKey { algorithm: to_encrypt.algorithm, id: to_encrypt.id, nonce, ciphertext })
+ }
+
+ fn decrypt(
+ &self,
+ db: &mut KeystoreDB,
+ km_dev: &KeyMintDevice,
+ key_id_guard: &KeyIdGuard,
+ key_entry: &KeyEntry,
+ auth_token: &HardwareAuthToken,
+ reencrypt_with: Option<Arc<SuperKey>>,
+ ) -> Result<Arc<SuperKey>> {
+ let key_blob = key_entry
+ .key_blob_info()
+ .as_ref()
+ .map(|(key_blob, _)| KeyBlob::Ref(key_blob))
+ .ok_or(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In LockedKey::decrypt: Missing key blob info.")?;
+ let key_params = vec![
+ KeyParameterValue::Algorithm(Algorithm::AES),
+ KeyParameterValue::KeySize(256),
+ KeyParameterValue::BlockMode(BlockMode::GCM),
+ KeyParameterValue::PaddingMode(PaddingMode::NONE),
+ KeyParameterValue::Nonce(self.nonce.clone()),
+ KeyParameterValue::MacLength(128),
+ ];
+ let key_params: Vec<KmKeyParameter> = key_params.into_iter().map(|x| x.into()).collect();
+ let key = ZVec::try_from(km_dev.use_key_in_one_step(
+ db,
+ key_id_guard,
+ &key_blob,
+ KeyPurpose::DECRYPT,
+ &key_params,
+ Some(auth_token),
+ &self.ciphertext,
+ )?)?;
+ Ok(Arc::new(SuperKey { algorithm: self.algorithm, key, id: self.id, reencrypt_with }))
+ }
+}
+
+/// Keys for unlocking UNLOCKED_DEVICE_REQUIRED keys, as LockedKeys, complete with
+/// a database descriptor for the encrypting key and the sids for the auth tokens
+/// that can be used to decrypt it.
+struct BiometricUnlock {
+ /// List of auth token SIDs that can be used to unlock these keys.
+ sids: Vec<i64>,
+ /// Database descriptor of key to use to unlock.
+ key_desc: KeyDescriptor,
+ /// Locked versions of the matching UserSuperKeys fields
+ screen_lock_bound: LockedKey,
+ screen_lock_bound_private: LockedKey,
+}
+
#[derive(Default)]
struct UserSuperKeys {
/// The per boot key is used for LSKF binding of authentication bound keys. There is one
@@ -130,17 +250,28 @@
/// When the device is locked, screen-lock-bound keys can still be encrypted, using
/// ECDH public-key encryption. This field holds the decryption private key.
screen_lock_bound_private: Option<Arc<SuperKey>>,
+ /// Versions of the above two keys, locked behind a biometric.
+ biometric_unlock: Option<BiometricUnlock>,
}
#[derive(Default)]
struct SkmState {
user_keys: HashMap<UserId, UserSuperKeys>,
key_index: HashMap<i64, Weak<SuperKey>>,
+ boot_level_key_cache: Option<BootLevelKeyCache>,
}
impl SkmState {
- fn add_key_to_key_index(&mut self, super_key: &Arc<SuperKey>) {
- self.key_index.insert(super_key.id, Arc::downgrade(super_key));
+ fn add_key_to_key_index(&mut self, super_key: &Arc<SuperKey>) -> Result<()> {
+ if let SuperKeyIdentifier::DatabaseId(id) = super_key.id {
+ self.key_index.insert(id, Arc::downgrade(super_key));
+ Ok(())
+ } else {
+ Err(Error::sys()).context(format!(
+ "In add_key_to_key_index: cannot add key with ID {:?}",
+ super_key.id
+ ))
+ }
}
}
@@ -150,19 +281,101 @@
}
impl SuperKeyManager {
+ pub fn set_up_boot_level_cache(self: &Arc<Self>, db: &mut KeystoreDB) -> Result<()> {
+ let mut data = self.data.lock().unwrap();
+ if data.boot_level_key_cache.is_some() {
+ log::info!("In set_up_boot_level_cache: called for a second time");
+ return Ok(());
+ }
+ let level_zero_key = get_level_zero_key(db)
+ .context("In set_up_boot_level_cache: get_level_zero_key failed")?;
+ data.boot_level_key_cache = Some(BootLevelKeyCache::new(level_zero_key));
+ log::info!("Starting boot level watcher.");
+ let clone = self.clone();
+ std::thread::spawn(move || {
+ clone
+ .watch_boot_level()
+ .unwrap_or_else(|e| log::error!("watch_boot_level failed:\n{:?}", e));
+ });
+ Ok(())
+ }
+
+ /// Watch the `keystore.boot_level` system property, and keep boot level up to date.
+ /// Blocks waiting for system property changes, so must be run in its own thread.
+ fn watch_boot_level(&self) -> Result<()> {
+ let mut w = PropertyWatcher::new("keystore.boot_level")
+ .context("In watch_boot_level: PropertyWatcher::new failed")?;
+ loop {
+ let level = w
+ .read(|_n, v| v.parse::<usize>().map_err(std::convert::Into::into))
+ .context("In watch_boot_level: read of property failed")?;
+ // watch_boot_level should only be called once data.boot_level_key_cache is Some,
+ // so it's safe to unwrap in the branches below.
+ if level < MAX_MAX_BOOT_LEVEL {
+ log::info!("Read keystore.boot_level value {}", level);
+ let mut data = self.data.lock().unwrap();
+ data.boot_level_key_cache
+ .as_mut()
+ .unwrap()
+ .advance_boot_level(level)
+ .context("In watch_boot_level: advance_boot_level failed")?;
+ } else {
+ log::info!(
+ "keystore.boot_level {} hits maximum {}, finishing.",
+ level,
+ MAX_MAX_BOOT_LEVEL
+ );
+ let mut data = self.data.lock().unwrap();
+ data.boot_level_key_cache.as_mut().unwrap().finish();
+ break;
+ }
+ w.wait().context("In watch_boot_level: property wait failed")?;
+ }
+ Ok(())
+ }
+
+ pub fn level_accessible(&self, boot_level: i32) -> bool {
+ self.data
+ .lock()
+ .unwrap()
+ .boot_level_key_cache
+ .as_ref()
+ .map_or(false, |c| c.level_accessible(boot_level as usize))
+ }
+
pub fn forget_all_keys_for_user(&self, user: UserId) {
let mut data = self.data.lock().unwrap();
data.user_keys.remove(&user);
}
- fn install_per_boot_key_for_user(&self, user: UserId, super_key: Arc<SuperKey>) {
+ fn install_per_boot_key_for_user(&self, user: UserId, super_key: Arc<SuperKey>) -> Result<()> {
let mut data = self.data.lock().unwrap();
- data.add_key_to_key_index(&super_key);
+ data.add_key_to_key_index(&super_key)
+ .context("In install_per_boot_key_for_user: add_key_to_key_index failed")?;
data.user_keys.entry(user).or_default().per_boot = Some(super_key);
+ Ok(())
}
- fn lookup_key(&self, key_id: &i64) -> Option<Arc<SuperKey>> {
- self.data.lock().unwrap().key_index.get(key_id).and_then(|k| k.upgrade())
+ fn lookup_key(&self, key_id: &SuperKeyIdentifier) -> Result<Option<Arc<SuperKey>>> {
+ let mut data = self.data.lock().unwrap();
+ Ok(match key_id {
+ SuperKeyIdentifier::DatabaseId(id) => data.key_index.get(id).and_then(|k| k.upgrade()),
+ SuperKeyIdentifier::BootLevel(level) => data
+ .boot_level_key_cache
+ .as_mut()
+ .map(|b| b.aes_key(*level as usize))
+ .transpose()
+ .context("In lookup_key: aes_key failed")?
+ .flatten()
+ .map(|key| {
+ Arc::new(SuperKey {
+ algorithm: SuperEncryptionAlgorithm::Aes256Gcm,
+ key,
+ id: *key_id,
+ reencrypt_with: None,
+ })
+ }),
+ })
}
pub fn get_per_boot_key_by_user_id(&self, user_id: UserId) -> Option<Arc<SuperKey>> {
@@ -215,26 +428,27 @@
Ok(())
}
- /// Unwraps an encrypted key blob given metadata identifying the encryption key.
- /// The function queries `metadata.encrypted_by()` to determine the encryption key.
- /// It then checks if the required key is memory resident, and if so decrypts the
- /// blob.
- pub fn unwrap_key<'a>(&self, blob: &'a [u8], metadata: &BlobMetaData) -> Result<KeyBlob<'a>> {
- let key_id = if let Some(EncryptedBy::KeyId(key_id)) = metadata.encrypted_by() {
- key_id
+ /// Check if a given key is super-encrypted, from its metadata. If so, unwrap the key using
+ /// the relevant super key.
+ pub fn unwrap_key_if_required<'a>(
+ &self,
+ metadata: &BlobMetaData,
+ blob: &'a [u8],
+ ) -> Result<KeyBlob<'a>> {
+ Ok(if let Some(key_id) = SuperKeyIdentifier::from_metadata(metadata) {
+ let super_key = self
+ .lookup_key(&key_id)
+ .context("In unwrap_key: lookup_key failed")?
+ .ok_or(Error::Rc(ResponseCode::LOCKED))
+ .context("In unwrap_key: Required super decryption key is not in memory.")?;
+ KeyBlob::Sensitive {
+ key: Self::unwrap_key_with_key(blob, metadata, &super_key)
+ .context("In unwrap_key: unwrap_key_with_key failed")?,
+ reencrypt_with: super_key.reencrypt_with.as_ref().unwrap_or(&super_key).clone(),
+ force_reencrypt: super_key.reencrypt_with.is_some(),
+ }
} else {
- return Err(Error::Rc(ResponseCode::VALUE_CORRUPTED))
- .context("In unwrap_key: Cannot determine wrapping key.");
- };
- let super_key = self
- .lookup_key(&key_id)
- .ok_or(Error::Rc(ResponseCode::LOCKED))
- .context("In unwrap_key: Required super decryption key is not in memory.")?;
- Ok(KeyBlob::Sensitive {
- key: Self::unwrap_key_with_key(blob, metadata, &super_key)
- .context("In unwrap_key: unwrap_key_with_key failed")?,
- reencrypt_with: super_key.reencrypt_with.as_ref().unwrap_or(&super_key).clone(),
- force_reencrypt: super_key.reencrypt_with.is_some(),
+ KeyBlob::Ref(blob)
})
}
@@ -389,7 +603,7 @@
.context(
"In populate_cache_from_super_key_blob. Failed to extract super key from key entry",
)?;
- self.install_per_boot_key_for_user(user_id, super_key.clone());
+ self.install_per_boot_key_for_user(user_id, super_key.clone())?;
Ok(super_key)
}
@@ -430,7 +644,12 @@
));
}
};
- Ok(Arc::new(SuperKey { algorithm, key, id: entry.id(), reencrypt_with }))
+ Ok(Arc::new(SuperKey {
+ algorithm,
+ key,
+ id: SuperKeyIdentifier::DatabaseId(entry.id()),
+ reencrypt_with,
+ }))
} else {
Err(Error::Rc(ResponseCode::VALUE_CORRUPTED))
.context("In extract_super_key_from_key_entry: No key blob info.")
@@ -498,13 +717,13 @@
.context("In encrypt_with_aes_super_key: Failed to encrypt new super key.")?;
metadata.add(BlobMetaEntry::Iv(iv));
metadata.add(BlobMetaEntry::AeadTag(tag));
- metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key.id)));
+ super_key.id.add_to_metadata(&mut metadata);
Ok((encrypted_key, metadata))
}
/// Check if super encryption is required and if so, super-encrypt the key to be stored in
/// the database.
- #[allow(clippy::clippy::too_many_arguments)]
+ #[allow(clippy::too_many_arguments)]
pub fn handle_super_encryption_on_key_init(
&self,
db: &mut KeystoreDB,
@@ -554,27 +773,23 @@
metadata.add(BlobMetaEntry::Salt(salt));
metadata.add(BlobMetaEntry::Iv(iv));
metadata.add(BlobMetaEntry::AeadTag(aead_tag));
- metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(key_id_guard.id())));
+ SuperKeyIdentifier::DatabaseId(key_id_guard.id())
+ .add_to_metadata(&mut metadata);
Ok((encrypted_key, metadata))
}
}
- }
- }
-
- /// Check if a given key is super-encrypted, from its metadata. If so, unwrap the key using
- /// the relevant super key.
- pub fn unwrap_key_if_required<'a>(
- &self,
- metadata: &BlobMetaData,
- key_blob: &'a [u8],
- ) -> Result<KeyBlob<'a>> {
- if Self::key_super_encrypted(&metadata) {
- let unwrapped_key = self
- .unwrap_key(key_blob, metadata)
- .context("In unwrap_key_if_required. Error in unwrapping the key.")?;
- Ok(unwrapped_key)
- } else {
- Ok(KeyBlob::Ref(key_blob))
+ SuperEncryptionType::BootLevel(level) => {
+ let key_id = SuperKeyIdentifier::BootLevel(level);
+ let super_key = self
+ .lookup_key(&key_id)
+ .context("In handle_super_encryption_on_key_init: lookup_key failed")?
+ .ok_or(Error::Rc(ResponseCode::LOCKED))
+ .context("In handle_super_encryption_on_key_init: Boot stage key absent")?;
+ Self::encrypt_with_aes_super_key(key_blob, &super_key).context(concat!(
+ "In handle_super_encryption_on_key_init: ",
+ "Failed to encrypt with BootLevel key."
+ ))
+ }
}
}
@@ -596,14 +811,6 @@
}
}
- // Helper function to decide if a key is super encrypted, given metadata.
- fn key_super_encrypted(metadata: &BlobMetaData) -> bool {
- if let Some(&EncryptedBy::KeyId(_)) = metadata.encrypted_by() {
- return true;
- }
- false
- }
-
/// Fetch a superencryption key from the database, or create it if it doesn't already exist.
/// When this is called, the caller must hold the lock on the SuperKeyManager.
/// So it's OK that the check and creation are different DB transactions.
@@ -663,7 +870,7 @@
Ok(Arc::new(SuperKey {
algorithm: key_type.algorithm,
key: super_key,
- id: key_entry.id(),
+ id: SuperKeyIdentifier::DatabaseId(key_entry.id()),
reencrypt_with,
}))
}
@@ -702,18 +909,154 @@
)
})?
.clone();
- data.add_key_to_key_index(&aes);
- data.add_key_to_key_index(&ecdh);
+ data.add_key_to_key_index(&aes)?;
+ data.add_key_to_key_index(&ecdh)?;
Ok(())
}
/// Wipe the screen-lock bound keys for this user from memory.
- pub fn lock_screen_lock_bound_key(&self, user_id: UserId) {
+ pub fn lock_screen_lock_bound_key(
+ &self,
+ db: &mut KeystoreDB,
+ user_id: UserId,
+ unlocking_sids: &[i64],
+ ) {
+ log::info!("Locking screen bound for user {} sids {:?}", user_id, unlocking_sids);
let mut data = self.data.lock().unwrap();
let mut entry = data.user_keys.entry(user_id).or_default();
+ if !unlocking_sids.is_empty() {
+ if let (Some(aes), Some(ecdh)) = (
+ entry.screen_lock_bound.as_ref().cloned(),
+ entry.screen_lock_bound_private.as_ref().cloned(),
+ ) {
+ let res = (|| -> Result<()> {
+ let key_desc = KeyMintDevice::internal_descriptor(format!(
+ "biometric_unlock_key_{}",
+ user_id
+ ));
+ let encrypting_key = generate_aes256_key()?;
+ let km_dev: KeyMintDevice =
+ KeyMintDevice::get(SecurityLevel::TRUSTED_ENVIRONMENT)
+ .context("In lock_screen_lock_bound_key: KeyMintDevice::get failed")?;
+ let mut key_params = vec![
+ KeyParameterValue::Algorithm(Algorithm::AES),
+ KeyParameterValue::KeySize(256),
+ KeyParameterValue::BlockMode(BlockMode::GCM),
+ KeyParameterValue::PaddingMode(PaddingMode::NONE),
+ KeyParameterValue::CallerNonce,
+ KeyParameterValue::KeyPurpose(KeyPurpose::DECRYPT),
+ KeyParameterValue::MinMacLength(128),
+ KeyParameterValue::AuthTimeout(BIOMETRIC_AUTH_TIMEOUT_S),
+ KeyParameterValue::HardwareAuthenticatorType(
+ HardwareAuthenticatorType::FINGERPRINT,
+ ),
+ ];
+ for sid in unlocking_sids {
+ key_params.push(KeyParameterValue::UserSecureID(*sid));
+ }
+ let key_params: Vec<KmKeyParameter> =
+ key_params.into_iter().map(|x| x.into()).collect();
+ km_dev.create_and_store_key(
+ db,
+ &key_desc,
+ KeyType::Client, /* TODO Should be Super b/189470584 */
+ |dev| {
+ let _wp = wd::watch_millis(
+ "In lock_screen_lock_bound_key: calling importKey.",
+ 500,
+ );
+ dev.importKey(
+ key_params.as_slice(),
+ KeyFormat::RAW,
+ &encrypting_key,
+ None,
+ )
+ },
+ )?;
+ entry.biometric_unlock = Some(BiometricUnlock {
+ sids: unlocking_sids.into(),
+ key_desc,
+ screen_lock_bound: LockedKey::new(&encrypting_key, &aes)?,
+ screen_lock_bound_private: LockedKey::new(&encrypting_key, &ecdh)?,
+ });
+ Ok(())
+ })();
+ // There is no reason to propagate an error here upwards. We must discard
+ // entry.screen_lock_bound* in any case.
+ if let Err(e) = res {
+ log::error!("Error setting up biometric unlock: {:#?}", e);
+ }
+ }
+ }
entry.screen_lock_bound = None;
entry.screen_lock_bound_private = None;
}
+
+ /// User has unlocked, not using a password. See if any of our stored auth tokens can be used
+ /// to unlock the keys protecting UNLOCKED_DEVICE_REQUIRED keys.
+ pub fn try_unlock_user_with_biometric(
+ &self,
+ db: &mut KeystoreDB,
+ user_id: UserId,
+ ) -> Result<()> {
+ let mut data = self.data.lock().unwrap();
+ let mut entry = data.user_keys.entry(user_id).or_default();
+ if let Some(biometric) = entry.biometric_unlock.as_ref() {
+ let (key_id_guard, key_entry) = db
+ .load_key_entry(
+ &biometric.key_desc,
+ KeyType::Client, // This should not be a Client key.
+ KeyEntryLoadBits::KM,
+ AID_KEYSTORE,
+ |_, _| Ok(()),
+ )
+ .context("In try_unlock_user_with_biometric: load_key_entry failed")?;
+ let km_dev: KeyMintDevice = KeyMintDevice::get(SecurityLevel::TRUSTED_ENVIRONMENT)
+ .context("In try_unlock_user_with_biometric: KeyMintDevice::get failed")?;
+ for sid in &biometric.sids {
+ if let Some((auth_token_entry, _)) = db.find_auth_token_entry(|entry| {
+ entry.auth_token().userId == *sid || entry.auth_token().authenticatorId == *sid
+ }) {
+ let res: Result<(Arc<SuperKey>, Arc<SuperKey>)> = (|| {
+ let slb = biometric.screen_lock_bound.decrypt(
+ db,
+ &km_dev,
+ &key_id_guard,
+ &key_entry,
+ auth_token_entry.auth_token(),
+ None,
+ )?;
+ let slbp = biometric.screen_lock_bound_private.decrypt(
+ db,
+ &km_dev,
+ &key_id_guard,
+ &key_entry,
+ auth_token_entry.auth_token(),
+ Some(slb.clone()),
+ )?;
+ Ok((slb, slbp))
+ })();
+ match res {
+ Ok((slb, slbp)) => {
+ entry.screen_lock_bound = Some(slb.clone());
+ entry.screen_lock_bound_private = Some(slbp.clone());
+ data.add_key_to_key_index(&slb)?;
+ data.add_key_to_key_index(&slbp)?;
+ log::info!(concat!(
+ "In try_unlock_user_with_biometric: ",
+ "Successfully unlocked with biometric"
+ ));
+ return Ok(());
+ }
+ Err(e) => {
+ log::warn!("In try_unlock_user_with_biometric: attempt failed: {:?}", e)
+ }
+ }
+ }
+ }
+ }
+ Ok(())
+ }
}
/// This enum represents different states of the user's life cycle in the device.
diff --git a/keystore2/src/utils.rs b/keystore2/src/utils.rs
index 48e9bfb..a110c64 100644
--- a/keystore2/src/utils.rs
+++ b/keystore2/src/utils.rs
@@ -36,7 +36,6 @@
APC_COMPAT_ERROR_IGNORED, APC_COMPAT_ERROR_OK, APC_COMPAT_ERROR_OPERATION_PENDING,
APC_COMPAT_ERROR_SYSTEM_ERROR,
};
-use std::convert::TryFrom;
use std::sync::Mutex;
/// This function uses its namesake in the permission module and in
@@ -107,11 +106,17 @@
let permission_controller: binder::Strong<dyn IPermissionController::IPermissionController> =
binder::get_interface("permission")?;
- let binder_result = permission_controller.checkPermission(
- "android.permission.READ_PRIVILEGED_PHONE_STATE",
- ThreadState::get_calling_pid(),
- ThreadState::get_calling_uid() as i32,
- );
+ let binder_result = {
+ let _wp = watchdog::watch_millis(
+ "In check_device_attestation_permissions: calling checkPermission.",
+ 500,
+ );
+ permission_controller.checkPermission(
+ "android.permission.READ_PRIVILEGED_PHONE_STATE",
+ ThreadState::get_calling_pid(),
+ ThreadState::get_calling_uid() as i32,
+ )
+ };
let has_permissions = map_binder_status(binder_result)
.context("In check_device_attestation_permissions: checkPermission failed")?;
match has_permissions {
@@ -180,19 +185,15 @@
parameters.into_iter().map(|p| p.into_authorization()).collect()
}
-/// This returns the current time (in seconds) as an instance of a monotonic clock, by invoking the
-/// system call since Rust does not support getting monotonic time instance as an integer.
-pub fn get_current_time_in_seconds() -> i64 {
+/// This returns the current time (in milliseconds) as an instance of a monotonic clock,
+/// by invoking the system call since Rust does not support getting monotonic time instance
+/// as an integer.
+pub fn get_current_time_in_milliseconds() -> i64 {
let mut current_time = libc::timespec { tv_sec: 0, tv_nsec: 0 };
// Following unsafe block includes one system call to get monotonic time.
// Therefore, it is not considered harmful.
unsafe { libc::clock_gettime(libc::CLOCK_MONOTONIC_RAW, &mut current_time) };
- // It is safe to unwrap here because try_from() returns std::convert::Infallible, which is
- // defined to be an error that can never happen (i.e. the result is always ok).
- // This suppresses the compiler's complaint about converting tv_sec to i64 in method
- // get_current_time_in_seconds.
- #[allow(clippy::useless_conversion)]
- i64::try_from(current_time.tv_sec).unwrap()
+ current_time.tv_sec as i64 * 1000 + (current_time.tv_nsec as i64 / 1_000_000)
}
/// Converts a response code as returned by the Android Protected Confirmation HIDL compatibility
@@ -223,12 +224,65 @@
/// AID offset for uid space partitioning.
pub const AID_USER_OFFSET: u32 = cutils_bindgen::AID_USER_OFFSET;
+/// AID of the keystore process itself, used for keys that
+/// keystore generates for its own use.
+pub const AID_KEYSTORE: u32 = cutils_bindgen::AID_KEYSTORE;
+
/// Extracts the android user from the given uid.
pub fn uid_to_android_user(uid: u32) -> u32 {
// Safety: No memory access
unsafe { cutils_bindgen::multiuser_get_user_id(uid) }
}
+/// This module provides helpers for simplified use of the watchdog module.
+#[cfg(feature = "watchdog")]
+pub mod watchdog {
+ pub use crate::watchdog::WatchPoint;
+ use crate::watchdog::Watchdog;
+ use lazy_static::lazy_static;
+ use std::sync::Arc;
+ use std::time::Duration;
+
+ lazy_static! {
+ /// A Watchdog thread, that can be used to create watch points.
+ static ref WD: Arc<Watchdog> = Watchdog::new(Duration::from_secs(10));
+ }
+
+ /// Sets a watch point with `id` and a timeout of `millis` milliseconds.
+ pub fn watch_millis(id: &'static str, millis: u64) -> Option<WatchPoint> {
+ Watchdog::watch(&WD, id, Duration::from_millis(millis))
+ }
+
+ /// Like `watch_millis` but with a callback that is called every time a report
+ /// is printed about this watch point.
+ pub fn watch_millis_with(
+ id: &'static str,
+ millis: u64,
+ callback: impl Fn() -> String + Send + 'static,
+ ) -> Option<WatchPoint> {
+ Watchdog::watch_with(&WD, id, Duration::from_millis(millis), callback)
+ }
+}
+
+/// This module provides empty/noop implementations of the watch dog utility functions.
+#[cfg(not(feature = "watchdog"))]
+pub mod watchdog {
+ /// Noop watch point.
+ pub struct WatchPoint();
+ /// Sets a Noop watch point.
+ fn watch_millis(_: &'static str, _: u64) -> Option<WatchPoint> {
+ None
+ }
+
+ pub fn watch_millis_with(
+ _: &'static str,
+ _: u64,
+ _: impl Fn() -> String + Send + 'static,
+ ) -> Option<WatchPoint> {
+ None
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/keystore2/src/vintf/Android.bp b/keystore2/src/vintf/Android.bp
index feec8ae..3ab0ec5 100644
--- a/keystore2/src/vintf/Android.bp
+++ b/keystore2/src/vintf/Android.bp
@@ -53,11 +53,11 @@
shared_libs: ["libvintf"],
bindgen_flags: [
"--size_t-is-usize",
- "--whitelist-function", "getHalNames",
- "--whitelist-function", "getHalNamesAndVersions",
- "--whitelist-function", "getHidlInstances",
- "--whitelist-function", "getAidlInstances",
- "--whitelist-function", "freeNames",
+ "--allowlist-function", "getHalNames",
+ "--allowlist-function", "getHalNamesAndVersions",
+ "--allowlist-function", "getHidlInstances",
+ "--allowlist-function", "getAidlInstances",
+ "--allowlist-function", "freeNames",
],
}
diff --git a/keystore2/src/watchdog.rs b/keystore2/src/watchdog.rs
new file mode 100644
index 0000000..9cca171
--- /dev/null
+++ b/keystore2/src/watchdog.rs
@@ -0,0 +1,326 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Can be removed when instrumentations are added to keystore.
+#![allow(dead_code)]
+
+//! This module implements a watchdog thread.
+
+use std::{
+ cmp::min,
+ collections::HashMap,
+ sync::Arc,
+ sync::{Condvar, Mutex, MutexGuard},
+ thread,
+};
+use std::{
+ marker::PhantomData,
+ time::{Duration, Instant},
+};
+
+/// Represents a Watchdog record. It can be created with `Watchdog::watch` or
+/// `Watchdog::watch_with`. It disarms the record when dropped.
+pub struct WatchPoint {
+ id: &'static str,
+ wd: Arc<Watchdog>,
+ not_send: PhantomData<*mut ()>, // WatchPoint must not be Send.
+}
+
+impl Drop for WatchPoint {
+ fn drop(&mut self) {
+ self.wd.disarm(self.id)
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+enum State {
+ NotRunning,
+ Running,
+}
+
+#[derive(Debug, Clone, Hash, PartialEq, Eq)]
+struct Index {
+ tid: thread::ThreadId,
+ id: &'static str,
+}
+
+struct Record {
+ started: Instant,
+ deadline: Instant,
+ callback: Option<Box<dyn Fn() -> String + Send + 'static>>,
+}
+
+struct WatchdogState {
+ state: State,
+ thread: Option<thread::JoinHandle<()>>,
+ timeout: Duration,
+ records: HashMap<Index, Record>,
+ last_report: Instant,
+ has_overdue: bool,
+}
+
+impl WatchdogState {
+ fn update_overdue_and_find_next_timeout(&mut self) -> (bool, Option<Duration>) {
+ let now = Instant::now();
+ let mut next_timeout: Option<Duration> = None;
+ let mut has_overdue = false;
+ for (_, r) in self.records.iter() {
+ let timeout = r.deadline.saturating_duration_since(now);
+ if timeout == Duration::new(0, 0) {
+ has_overdue = true;
+ continue;
+ }
+ next_timeout = match next_timeout {
+ Some(nt) => {
+ if timeout < nt {
+ Some(timeout)
+ } else {
+ Some(nt)
+ }
+ }
+ None => Some(timeout),
+ };
+ }
+ (has_overdue, next_timeout)
+ }
+
+ fn log_report(&mut self, has_overdue: bool) -> bool {
+ match (self.has_overdue, has_overdue) {
+ (true, true) => {
+ if self.last_report.elapsed() < Watchdog::NOISY_REPORT_TIMEOUT {
+ self.has_overdue = false;
+ return false;
+ }
+ }
+ (_, false) => {
+ self.has_overdue = false;
+ return false;
+ }
+ (false, true) => {}
+ }
+ self.last_report = Instant::now();
+ self.has_overdue = has_overdue;
+ log::warn!("Keystore Watchdog report:");
+ log::warn!("Overdue records:");
+ let now = Instant::now();
+ for (i, r) in self.records.iter() {
+ if r.deadline.saturating_duration_since(now) == Duration::new(0, 0) {
+ match &r.callback {
+ Some(cb) => {
+ log::warn!(
+ "{:?} {} Pending: {:?} Overdue {:?}: {}",
+ i.tid,
+ i.id,
+ r.started.elapsed(),
+ r.deadline.elapsed(),
+ (cb)()
+ );
+ }
+ None => {
+ log::warn!(
+ "{:?} {} Pending: {:?} Overdue {:?}",
+ i.tid,
+ i.id,
+ r.started.elapsed(),
+ r.deadline.elapsed()
+ );
+ }
+ }
+ }
+ }
+ true
+ }
+
+ fn disarm(&mut self, index: Index) {
+ self.records.remove(&index);
+ }
+
+ fn arm(&mut self, index: Index, record: Record) {
+ if self.records.insert(index.clone(), record).is_some() {
+ log::warn!("Recursive watchdog record at \"{:?}\" replaces previous record.", index);
+ }
+ }
+}
+
+/// Watchdog spawns a thread that logs records of all overdue watch points when a deadline
+/// is missed and at least every second as long as overdue watch points exist.
+/// The thread terminates when idle for a given period of time.
+pub struct Watchdog {
+ state: Arc<(Condvar, Mutex<WatchdogState>)>,
+}
+
+impl Watchdog {
+ /// If we have overdue records, we want to be noisy about it and log a report
+ /// at least every `NOISY_REPORT_TIMEOUT` interval.
+ const NOISY_REPORT_TIMEOUT: Duration = Duration::from_secs(1);
+
+ /// Construct a [`Watchdog`]. When `timeout` has elapsed since the watchdog thread became
+ /// idle, i.e., there are no more active or overdue watch points, the watchdog thread
+ /// terminates.
+ pub fn new(timeout: Duration) -> Arc<Self> {
+ Arc::new(Self {
+ state: Arc::new((
+ Condvar::new(),
+ Mutex::new(WatchdogState {
+ state: State::NotRunning,
+ thread: None,
+ timeout,
+ records: HashMap::new(),
+ last_report: Instant::now(),
+ has_overdue: false,
+ }),
+ )),
+ })
+ }
+
+ fn watch_with_optional(
+ wd: &Arc<Self>,
+ callback: Option<Box<dyn Fn() -> String + Send + 'static>>,
+ id: &'static str,
+ timeout: Duration,
+ ) -> Option<WatchPoint> {
+ let deadline = Instant::now().checked_add(timeout);
+ if deadline.is_none() {
+ log::warn!("Deadline computation failed for WatchPoint \"{}\"", id);
+ log::warn!("WatchPoint not armed.");
+ return None;
+ }
+ wd.arm(callback, id, deadline.unwrap());
+ Some(WatchPoint { id, wd: wd.clone(), not_send: Default::default() })
+ }
+
+ /// Create a new watch point. If the WatchPoint is not dropped before the timeout
+ /// expires, a report is logged at least every second, which includes the id string
+ /// and whatever string the callback returns.
+ pub fn watch_with(
+ wd: &Arc<Self>,
+ id: &'static str,
+ timeout: Duration,
+ callback: impl Fn() -> String + Send + 'static,
+ ) -> Option<WatchPoint> {
+ Self::watch_with_optional(wd, Some(Box::new(callback)), id, timeout)
+ }
+
+ /// Like `watch_with`, but without a callback.
+ pub fn watch(wd: &Arc<Self>, id: &'static str, timeout: Duration) -> Option<WatchPoint> {
+ Self::watch_with_optional(wd, None, id, timeout)
+ }
+
+ fn arm(
+ &self,
+ callback: Option<Box<dyn Fn() -> String + Send + 'static>>,
+ id: &'static str,
+ deadline: Instant,
+ ) {
+ let tid = thread::current().id();
+ let index = Index { tid, id };
+ let record = Record { started: Instant::now(), deadline, callback };
+
+ let (ref condvar, ref state) = *self.state;
+
+ let mut state = state.lock().unwrap();
+ state.arm(index, record);
+
+ if state.state != State::Running {
+ self.spawn_thread(&mut state);
+ }
+ drop(state);
+ condvar.notify_all();
+ }
+
+ fn disarm(&self, id: &'static str) {
+ let tid = thread::current().id();
+ let index = Index { tid, id };
+ let (_, ref state) = *self.state;
+
+ let mut state = state.lock().unwrap();
+ state.disarm(index);
+ // There is no need to notify condvar. There is no action required for the
+ // watchdog thread before the next deadline.
+ }
+
+ fn spawn_thread(&self, state: &mut MutexGuard<WatchdogState>) {
+ if let Some(t) = state.thread.take() {
+ t.join().expect("Watchdog thread panicked.");
+ }
+
+ let cloned_state = self.state.clone();
+
+ state.thread = Some(thread::spawn(move || {
+ let (ref condvar, ref state) = *cloned_state;
+
+ let mut state = state.lock().unwrap();
+
+ loop {
+ let (has_overdue, next_timeout) = state.update_overdue_and_find_next_timeout();
+ state.log_report(has_overdue);
+ let (next_timeout, idle) = match (has_overdue, next_timeout) {
+ (true, Some(next_timeout)) => {
+ (min(next_timeout, Self::NOISY_REPORT_TIMEOUT), false)
+ }
+ (false, Some(next_timeout)) => (next_timeout, false),
+ (true, None) => (Self::NOISY_REPORT_TIMEOUT, false),
+ (false, None) => (state.timeout, true),
+ };
+
+ let (s, timeout) = condvar.wait_timeout(state, next_timeout).unwrap();
+ state = s;
+
+ if idle && timeout.timed_out() && state.records.is_empty() {
+ state.state = State::NotRunning;
+ break;
+ }
+ }
+ log::info!("Watchdog thread idle -> terminating. Have a great day.");
+ }));
+ state.state = State::Running;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ use super::*;
+ use std::sync::atomic;
+ use std::thread;
+ use std::time::Duration;
+
+ #[test]
+ fn test_watchdog() {
+ android_logger::init_once(
+ android_logger::Config::default()
+ .with_tag("keystore2_watchdog_tests")
+ .with_min_level(log::Level::Debug),
+ );
+
+ let wd = Watchdog::new(Watchdog::NOISY_REPORT_TIMEOUT.checked_mul(3).unwrap());
+ let hit_count = Arc::new(atomic::AtomicU8::new(0));
+ let hit_count_clone = hit_count.clone();
+ let wp =
+ Watchdog::watch_with(&wd, "test_watchdog", Duration::from_millis(100), move || {
+ format!("hit_count: {}", hit_count_clone.fetch_add(1, atomic::Ordering::Relaxed))
+ });
+ assert_eq!(0, hit_count.load(atomic::Ordering::Relaxed));
+ thread::sleep(Duration::from_millis(500));
+ assert_eq!(1, hit_count.load(atomic::Ordering::Relaxed));
+ thread::sleep(Watchdog::NOISY_REPORT_TIMEOUT);
+ assert_eq!(2, hit_count.load(atomic::Ordering::Relaxed));
+ drop(wp);
+ thread::sleep(Watchdog::NOISY_REPORT_TIMEOUT.checked_mul(4).unwrap());
+ assert_eq!(2, hit_count.load(atomic::Ordering::Relaxed));
+ let (_, ref state) = *wd.state;
+ let state = state.lock().unwrap();
+ assert_eq!(state.state, State::NotRunning);
+ }
+}
diff --git a/keystore2/system_property/Android.bp b/keystore2/system_property/Android.bp
index 5a13c90..9e7b056 100644
--- a/keystore2/system_property/Android.bp
+++ b/keystore2/system_property/Android.bp
@@ -29,9 +29,9 @@
bindgen_flags: [
"--size_t-is-usize",
- "--whitelist-function=__system_property_find",
- "--whitelist-function=__system_property_read_callback",
- "--whitelist-function=__system_property_wait",
+ "--allowlist-function=__system_property_find",
+ "--allowlist-function=__system_property_read_callback",
+ "--allowlist-function=__system_property_wait",
],
}
diff --git a/keystore2/system_property/lib.rs b/keystore2/system_property/lib.rs
index f14cf0e..be13c88 100644
--- a/keystore2/system_property/lib.rs
+++ b/keystore2/system_property/lib.rs
@@ -15,8 +15,9 @@
//! This crate provides the PropertyWatcher type, which watches for changes
//! in Android system properties.
+use keystore2_system_property_bindgen::prop_info as PropInfo;
use std::os::raw::c_char;
-use std::ptr::null_mut;
+use std::ptr::null;
use std::{
ffi::{c_void, CStr, CString},
str::Utf8Error,
@@ -56,27 +57,34 @@
/// as `keystore.boot_level`; it can report the current value of this
/// property, or wait for it to change.
pub struct PropertyWatcher {
- prop_info: *const keystore2_system_property_bindgen::prop_info,
+ prop_name: CString,
+ prop_info: *const PropInfo,
serial: keystore2_system_property_bindgen::__uint32_t,
}
impl PropertyWatcher {
/// Create a PropertyWatcher for the named system property.
pub fn new(name: &str) -> Result<Self> {
- let cstr = CString::new(name)?;
- // Unsafe FFI call. We generate the CStr in this function
- // and so ensure it is valid during call.
- // Returned pointer is valid for the lifetime of the program.
- let prop_info =
- unsafe { keystore2_system_property_bindgen::__system_property_find(cstr.as_ptr()) };
- if prop_info.is_null() {
- Err(PropertyWatcherError::SystemPropertyAbsent)
+ Ok(Self { prop_name: CString::new(name)?, prop_info: null(), serial: 0 })
+ }
+
+ // Lazy-initializing accessor for self.prop_info.
+ fn get_prop_info(&mut self) -> Option<*const PropInfo> {
+ if self.prop_info.is_null() {
+ // Unsafe required for FFI call. Input and output are both const.
+ // The returned pointer is valid for the lifetime of the program.
+ self.prop_info = unsafe {
+ keystore2_system_property_bindgen::__system_property_find(self.prop_name.as_ptr())
+ };
+ }
+ if self.prop_info.is_null() {
+ None
} else {
- Ok(Self { prop_info, serial: 0 })
+ Some(self.prop_info)
}
}
- fn read_raw(&self, mut f: impl FnOnce(Option<&CStr>, Option<&CStr>)) {
+ fn read_raw(prop_info: *const PropInfo, mut f: impl FnOnce(Option<&CStr>, Option<&CStr>)) {
// Unsafe function converts values passed to us by
// __system_property_read_callback to Rust form
// and pass them to inner callback.
@@ -98,7 +106,7 @@
// to a void pointer, and unwrap it in our callback.
unsafe {
keystore2_system_property_bindgen::__system_property_read_callback(
- self.prop_info,
+ prop_info,
Some(callback),
&mut f as *mut _ as *mut c_void,
)
@@ -108,12 +116,14 @@
/// Call the passed function, passing it the name and current value
/// of this system property. See documentation for
/// `__system_property_read_callback` for details.
- pub fn read<T, F>(&self, f: F) -> Result<T>
+ /// Returns an error if the property is empty or doesn't exist.
+ pub fn read<T, F>(&mut self, f: F) -> Result<T>
where
F: FnOnce(&str, &str) -> anyhow::Result<T>,
{
+ let prop_info = self.get_prop_info().ok_or(PropertyWatcherError::SystemPropertyAbsent)?;
let mut result = Err(PropertyWatcherError::ReadCallbackNotCalled);
- self.read_raw(|name, value| {
+ Self::read_raw(prop_info, |name, value| {
// use a wrapping closure as an erzatz try block.
result = (|| {
let name = name.ok_or(PropertyWatcherError::MissingCString)?.to_str()?;
@@ -124,10 +134,43 @@
result
}
+ // Waits for the property that self is watching to be created. Returns immediately if the
+ // property already exists.
+ fn wait_for_property_creation(&mut self) -> Result<()> {
+ let mut global_serial = 0;
+ loop {
+ match self.get_prop_info() {
+ Some(_) => return Ok(()),
+ None => {
+ // Unsafe call for FFI. The function modifies only global_serial, and has
+ // no side-effects.
+ if !unsafe {
+ // Wait for a global serial number change, then try again. On success,
+ // the function will update global_serial with the last version seen.
+ keystore2_system_property_bindgen::__system_property_wait(
+ null(),
+ global_serial,
+ &mut global_serial,
+ null(),
+ )
+ } {
+ return Err(PropertyWatcherError::WaitFailed);
+ }
+ }
+ }
+ }
+ }
+
/// Wait for the system property to change. This
/// records the serial number of the last change, so
/// race conditions are avoided.
pub fn wait(&mut self) -> Result<()> {
+ // If the property is null, then wait for it to be created. Subsequent waits will
+ // skip this step and wait for our specific property to change.
+ if self.prop_info.is_null() {
+ return self.wait_for_property_creation();
+ }
+
let mut new_serial = self.serial;
// Unsafe block to call __system_property_wait.
// All arguments are private to PropertyWatcher so we
@@ -137,7 +180,7 @@
self.prop_info,
self.serial,
&mut new_serial,
- null_mut(),
+ null(),
)
} {
return Err(PropertyWatcherError::WaitFailed);
diff --git a/keystore2/vpnprofilestore/lib.rs b/keystore2/vpnprofilestore/lib.rs
index f92eacd..df2731a 100644
--- a/keystore2/vpnprofilestore/lib.rs
+++ b/keystore2/vpnprofilestore/lib.rs
@@ -18,31 +18,54 @@
IVpnProfileStore::BnVpnProfileStore, IVpnProfileStore::IVpnProfileStore,
IVpnProfileStore::ERROR_PROFILE_NOT_FOUND, IVpnProfileStore::ERROR_SYSTEM_ERROR,
};
-use android_security_vpnprofilestore::binder::{Result as BinderResult, Status as BinderStatus};
-use anyhow::{Context, Result};
-use binder::{ExceptionCode, Strong, ThreadState};
-use keystore2::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
+use android_security_vpnprofilestore::binder::{
+ BinderFeatures, ExceptionCode, Result as BinderResult, Status as BinderStatus, Strong,
+ ThreadState,
+};
+use anyhow::{anyhow, Context, Result};
+use keystore2::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader, utils::watchdog as wd};
use rusqlite::{
params, Connection, OptionalExtension, Transaction, TransactionBehavior, NO_PARAMS,
};
use std::{
collections::HashSet,
path::{Path, PathBuf},
+ sync::Once,
};
+static DB_SET_WAL_MODE: Once = Once::new();
+
struct DB {
conn: Connection,
}
impl DB {
fn new(db_file: &Path) -> Result<Self> {
+ DB_SET_WAL_MODE.call_once(|| {
+ log::info!("Setting VpnProfileStore database to WAL mode first time since boot.");
+ Self::set_wal_mode(&db_file).expect("In vpnprofilestore: Could not set WAL mode.");
+ });
+
let mut db = Self {
conn: Connection::open(db_file).context("Failed to initialize SQLite connection.")?,
};
+
db.init_tables().context("Trying to initialize vpnstore db.")?;
Ok(db)
}
+ fn set_wal_mode(db_file: &Path) -> Result<()> {
+ let conn = Connection::open(db_file)
+ .context("In VpnProfileStore set_wal_mode: Failed to open DB.")?;
+ let mode: String = conn
+ .pragma_update_and_check(None, "journal_mode", &"WAL", |row| row.get(0))
+ .context("In VpnProfileStore set_wal_mode: Failed to set journal_mode")?;
+ match mode.as_str() {
+ "wal" => Ok(()),
+ _ => Err(anyhow!("Unable to set WAL mode, db is still in {} mode.", mode)),
+ }
+ }
+
fn with_transaction<T, F>(&mut self, behavior: TransactionBehavior, f: F) -> Result<T>
where
F: Fn(&Transaction) -> Result<T>,
@@ -71,15 +94,11 @@
}
fn is_locked_error(e: &anyhow::Error) -> bool {
- matches!(e.root_cause().downcast_ref::<rusqlite::ffi::Error>(),
- Some(rusqlite::ffi::Error {
- code: rusqlite::ErrorCode::DatabaseBusy,
- ..
- })
- | Some(rusqlite::ffi::Error {
- code: rusqlite::ErrorCode::DatabaseLocked,
- ..
- }))
+ matches!(
+ e.root_cause().downcast_ref::<rusqlite::ffi::Error>(),
+ Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseBusy, .. })
+ | Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseLocked, .. })
+ )
}
fn init_tables(&mut self) -> Result<()> {
@@ -173,7 +192,7 @@
/// This function should be used by vpnprofilestore service calls to translate error conditions
/// into service specific exceptions.
///
-/// All error conditions get logged by this function.
+/// All error conditions get logged by this function, except for ERROR_PROFILE_NOT_FOUND error.
///
/// `Error::Error(x)` variants get mapped onto a service specific error code of `x`.
///
@@ -188,12 +207,16 @@
{
result.map_or_else(
|e| {
- log::error!("{:#?}", e);
let root_cause = e.root_cause();
- let rc = match root_cause.downcast_ref::<Error>() {
- Some(Error::Error(e)) => *e,
- Some(Error::Binder(_, _)) | None => ERROR_SYSTEM_ERROR,
+ let (rc, log_error) = match root_cause.downcast_ref::<Error>() {
+ // Make the profile not found errors silent.
+ Some(Error::Error(ERROR_PROFILE_NOT_FOUND)) => (ERROR_PROFILE_NOT_FOUND, false),
+ Some(Error::Error(e)) => (*e, true),
+ Some(Error::Binder(_, _)) | None => (ERROR_SYSTEM_ERROR, true),
};
+ if log_error {
+ log::error!("{:?}", e);
+ }
Err(BinderStatus::new_service_specific_error(rc, None))
},
handle_ok,
@@ -220,7 +243,7 @@
let result = Self { db_path, async_task: Default::default() };
result.init_shelf(path);
- BnVpnProfileStore::new_binder(result)
+ BnVpnProfileStore::new_binder(result, BinderFeatures::default())
}
fn open_db(&self) -> Result<DB> {
@@ -360,15 +383,19 @@
impl IVpnProfileStore for VpnProfileStore {
fn get(&self, alias: &str) -> BinderResult<Vec<u8>> {
+ let _wp = wd::watch_millis("IVpnProfileStore::get", 500);
map_or_log_err(self.get(alias), Ok)
}
fn put(&self, alias: &str, profile: &[u8]) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IVpnProfileStore::put", 500);
map_or_log_err(self.put(alias, profile), Ok)
}
fn remove(&self, alias: &str) -> BinderResult<()> {
+ let _wp = wd::watch_millis("IVpnProfileStore::remove", 500);
map_or_log_err(self.remove(alias), Ok)
}
fn list(&self, prefix: &str) -> BinderResult<Vec<String>> {
+ let _wp = wd::watch_millis("IVpnProfileStore::list", 500);
map_or_log_err(self.list(prefix), Ok)
}
}
@@ -377,7 +404,12 @@
mod db_test {
use super::*;
use keystore2_test_utils::TempDir;
+ use std::sync::Arc;
+ use std::thread;
+ use std::time::Duration;
+ use std::time::Instant;
+ static TEST_ALIAS: &str = &"test_alias";
static TEST_BLOB1: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 0];
static TEST_BLOB2: &[u8] = &[2, 2, 3, 4, 5, 6, 7, 8, 9, 0];
static TEST_BLOB3: &[u8] = &[3, 2, 3, 4, 5, 6, 7, 8, 9, 0];
@@ -440,4 +472,107 @@
db.get(2, "test1").expect("Failed to get profile.").as_deref()
);
}
+
+ #[test]
+ fn concurrent_vpn_profile_test() -> Result<()> {
+ let temp_dir = Arc::new(
+ TempDir::new("concurrent_vpn_profile_test_").expect("Failed to create temp dir."),
+ );
+
+ let db_path = temp_dir.build().push("vpnprofile.sqlite").to_owned();
+
+ let test_begin = Instant::now();
+
+ let mut db = DB::new(&db_path).expect("Failed to open database.");
+ const PROFILE_COUNT: u32 = 5000u32;
+ const PROFILE_DB_COUNT: u32 = 5000u32;
+
+ let mode: String = db.conn.pragma_query_value(None, "journal_mode", |row| row.get(0))?;
+ assert_eq!(mode, "wal");
+
+ let mut actual_profile_count = PROFILE_COUNT;
+ // First insert PROFILE_COUNT profiles.
+ for count in 0..PROFILE_COUNT {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(15) {
+ actual_profile_count = count;
+ break;
+ }
+ let alias = format!("test_alias_{}", count);
+ db.put(1, &alias, TEST_BLOB1).expect("Failed to add profile (1).");
+ }
+
+ // Insert more keys from a different thread and into a different namespace.
+ let db_path1 = db_path.clone();
+ let handle1 = thread::spawn(move || {
+ let mut db = DB::new(&db_path1).expect("Failed to open database.");
+
+ for count in 0..actual_profile_count {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let alias = format!("test_alias_{}", count);
+ db.put(2, &alias, TEST_BLOB2).expect("Failed to add profile (2).");
+ }
+
+ // Then delete them again.
+ for count in 0..actual_profile_count {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let alias = format!("test_alias_{}", count);
+ db.remove(2, &alias).expect("Remove Failed (2).");
+ }
+ });
+
+ // And start deleting the first set of profiles.
+ let db_path2 = db_path.clone();
+ let handle2 = thread::spawn(move || {
+ let mut db = DB::new(&db_path2).expect("Failed to open database.");
+
+ for count in 0..actual_profile_count {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let alias = format!("test_alias_{}", count);
+ db.remove(1, &alias).expect("Remove Failed (1)).");
+ }
+ });
+
+ // While a lot of inserting and deleting is going on we have to open database connections
+ // successfully and then insert and delete a specific profile.
+ let db_path3 = db_path.clone();
+ let handle3 = thread::spawn(move || {
+ for _count in 0..PROFILE_DB_COUNT {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let mut db = DB::new(&db_path3).expect("Failed to open database.");
+
+ db.put(3, &TEST_ALIAS, TEST_BLOB3).expect("Failed to add profile (3).");
+
+ db.remove(3, &TEST_ALIAS).expect("Remove failed (3).");
+ }
+ });
+
+ // While thread 3 is inserting and deleting TEST_ALIAS, we try to get the alias.
+ // This may yield an entry or none, but it must not fail.
+ let handle4 = thread::spawn(move || {
+ for _count in 0..PROFILE_DB_COUNT {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let mut db = DB::new(&db_path).expect("Failed to open database.");
+
+ // This may return Some or None but it must not fail.
+ db.get(3, &TEST_ALIAS).expect("Failed to get profile (4).");
+ }
+ });
+
+ handle1.join().expect("Thread 1 panicked.");
+ handle2.join().expect("Thread 2 panicked.");
+ handle3.join().expect("Thread 3 panicked.");
+ handle4.join().expect("Thread 4 panicked.");
+
+ Ok(())
+ }
}
diff --git a/ondevice-signing/Android.bp b/ondevice-signing/Android.bp
index 2e5e02e..432e585 100644
--- a/ondevice-signing/Android.bp
+++ b/ondevice-signing/Android.bp
@@ -84,22 +84,19 @@
srcs: [
"odsign_main.cpp",
"CertUtils.cpp",
- "Keymaster.cpp",
- "KeymasterSigningKey.cpp",
"KeystoreKey.cpp",
+ "KeystoreHmacKey.cpp",
"VerityUtils.cpp",
],
header_libs: ["odrefresh_headers"],
static_libs: [
- "libmini_keyctl_static", // TODO need static?
"libc++fs",
"lib_odsign_proto",
],
shared_libs: [
- "android.hardware.keymaster@4.1",
"android.system.keystore2-V1-cpp",
"android.hardware.security.keymint-V1-cpp",
"libbase",
@@ -107,11 +104,7 @@
"libcrypto",
"libcrypto_utils",
"libfsverity",
- "libhidlbase",
"liblogwrap",
- "libkeymaster4support", // For authorization_set
- "libkeymaster4_1support",
- "libkeyutils",
"libprotobuf-cpp-full",
"libutils",
],
diff --git a/ondevice-signing/CertUtils.cpp b/ondevice-signing/CertUtils.cpp
index b0b75a6..9867f62 100644
--- a/ondevice-signing/CertUtils.cpp
+++ b/ondevice-signing/CertUtils.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "CertUtils.h"
+
#include <android-base/logging.h>
#include <android-base/result.h>
@@ -21,6 +23,7 @@
#include <openssl/crypto.h>
#include <openssl/pkcs7.h>
#include <openssl/rsa.h>
+#include <openssl/x509.h>
#include <openssl/x509v3.h>
#include <fcntl.h>
@@ -31,6 +34,7 @@
const char kBasicConstraints[] = "CA:TRUE";
const char kKeyUsage[] = "critical,keyCertSign,cRLSign,digitalSignature";
const char kSubjectKeyIdentifier[] = "hash";
+const char kAuthorityKeyIdentifier[] = "keyid:always";
constexpr int kCertLifetimeSeconds = 10 * 365 * 24 * 60 * 60;
using android::base::Result;
@@ -56,12 +60,17 @@
}
Result<bssl::UniquePtr<RSA>> getRsa(const std::vector<uint8_t>& publicKey) {
+ bssl::UniquePtr<BIGNUM> n(BN_new());
+ bssl::UniquePtr<BIGNUM> e(BN_new());
bssl::UniquePtr<RSA> rsaPubkey(RSA_new());
- rsaPubkey->n = BN_new();
- rsaPubkey->e = BN_new();
-
- BN_bin2bn(publicKey.data(), publicKey.size(), rsaPubkey->n);
- BN_set_word(rsaPubkey->e, kRsaKeyExponent);
+ if (!n || !e || !rsaPubkey || !BN_bin2bn(publicKey.data(), publicKey.size(), n.get()) ||
+ !BN_set_word(e.get(), kRsaKeyExponent) ||
+ !RSA_set0_key(rsaPubkey.get(), n.get(), e.get(), /*d=*/nullptr)) {
+ return Error() << "Failed to create RSA key";
+ }
+ // RSA_set0_key takes ownership of |n| and |e| on success.
+ (void)n.release();
+ (void)e.release();
return rsaPubkey;
}
@@ -69,6 +78,9 @@
Result<void> verifySignature(const std::string& message, const std::string& signature,
const std::vector<uint8_t>& publicKey) {
auto rsaKey = getRsa(publicKey);
+ if (!rsaKey.ok()) {
+ return rsaKey.error();
+ }
uint8_t hashBuf[SHA256_DIGEST_LENGTH];
SHA256(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(message.c_str())),
message.length(), hashBuf);
@@ -82,6 +94,21 @@
return {};
}
+static Result<bssl::UniquePtr<EVP_PKEY>> toRsaPkey(const std::vector<uint8_t>& publicKey) {
+ // "publicKey" corresponds to the raw public key bytes - need to create
+ // a new RSA key with the correct exponent.
+ auto rsaPubkey = getRsa(publicKey);
+ if (!rsaPubkey.ok()) {
+ return rsaPubkey.error();
+ }
+
+ bssl::UniquePtr<EVP_PKEY> public_key(EVP_PKEY_new());
+ if (!EVP_PKEY_assign_RSA(public_key.get(), rsaPubkey->release())) {
+ return Error() << "Failed to assign key";
+ }
+ return public_key;
+}
+
Result<void> createSelfSignedCertificate(
const std::vector<uint8_t>& publicKey,
const std::function<Result<std::string>(const std::string&)>& signFunction,
@@ -96,14 +123,12 @@
X509_gmtime_adj(X509_get_notBefore(x509.get()), 0);
X509_gmtime_adj(X509_get_notAfter(x509.get()), kCertLifetimeSeconds);
- // "publicKey" corresponds to the raw public key bytes - need to create
- // a new RSA key with the correct exponent.
- auto rsaPubkey = getRsa(publicKey);
+ auto public_key = toRsaPkey(publicKey);
+ if (!public_key.ok()) {
+ return public_key.error();
+ }
- EVP_PKEY* public_key = EVP_PKEY_new();
- EVP_PKEY_assign_RSA(public_key, rsaPubkey->release());
-
- if (!X509_set_pubkey(x509.get(), public_key)) {
+ if (!X509_set_pubkey(x509.get(), public_key.value().get())) {
return Error() << "Unable to set x509 public key";
}
@@ -124,29 +149,32 @@
add_ext(x509.get(), NID_basic_constraints, kBasicConstraints);
add_ext(x509.get(), NID_key_usage, kKeyUsage);
add_ext(x509.get(), NID_subject_key_identifier, kSubjectKeyIdentifier);
- add_ext(x509.get(), NID_authority_key_identifier, "keyid:always");
+ add_ext(x509.get(), NID_authority_key_identifier, kAuthorityKeyIdentifier);
- X509_ALGOR_set0(x509->cert_info->signature, OBJ_nid2obj(NID_sha256WithRSAEncryption),
- V_ASN1_NULL, NULL);
- X509_ALGOR_set0(x509->sig_alg, OBJ_nid2obj(NID_sha256WithRSAEncryption), V_ASN1_NULL, NULL);
+ bssl::UniquePtr<X509_ALGOR> algor(X509_ALGOR_new());
+ if (!algor ||
+ !X509_ALGOR_set0(algor.get(), OBJ_nid2obj(NID_sha256WithRSAEncryption), V_ASN1_NULL,
+ NULL) ||
+ !X509_set1_signature_algo(x509.get(), algor.get())) {
+ return Error() << "Unable to set x509 signature algorithm";
+ }
// Get the data to be signed
- char* to_be_signed_buf(nullptr);
- size_t to_be_signed_length = i2d_re_X509_tbs(x509.get(), (unsigned char**)&to_be_signed_buf);
+ unsigned char* to_be_signed_buf(nullptr);
+ size_t to_be_signed_length = i2d_re_X509_tbs(x509.get(), &to_be_signed_buf);
- auto signed_data = signFunction(std::string(to_be_signed_buf, to_be_signed_length));
+ auto signed_data = signFunction(
+ std::string(reinterpret_cast<const char*>(to_be_signed_buf), to_be_signed_length));
if (!signed_data.ok()) {
return signed_data.error();
}
- // This is the only part that doesn't use boringssl default functions - we manually copy in the
- // signature that was provided to us.
- x509->signature->data = (unsigned char*)OPENSSL_malloc(signed_data->size());
- memcpy(x509->signature->data, signed_data->c_str(), signed_data->size());
- x509->signature->length = signed_data->size();
+ if (!X509_set1_signature_value(x509.get(),
+ reinterpret_cast<const uint8_t*>(signed_data->data()),
+ signed_data->size())) {
+ return Error() << "Unable to set x509 signature";
+ }
- x509->signature->flags &= ~(ASN1_STRING_FLAG_BITS_LEFT | 0x07);
- x509->signature->flags |= ASN1_STRING_FLAG_BITS_LEFT;
auto f = fopen(path.c_str(), "wbe");
if (f == nullptr) {
return Error() << "Failed to open " << path;
@@ -154,7 +182,6 @@
i2d_X509_fp(f, x509.get());
fclose(f);
- EVP_PKEY_free(public_key);
return {};
}
@@ -163,15 +190,14 @@
return Error() << "Failed to extract public key from x509 cert";
}
- if (EVP_PKEY_type(pkey->type) != EVP_PKEY_RSA) {
+ if (EVP_PKEY_id(pkey) != EVP_PKEY_RSA) {
return Error() << "The public key is not an RSA key";
}
- RSA* rsa = EVP_PKEY_get1_RSA(pkey);
- auto num_bytes = BN_num_bytes(rsa->n);
+ RSA* rsa = EVP_PKEY_get0_RSA(pkey);
+ auto num_bytes = BN_num_bytes(RSA_get0_n(rsa));
std::vector<uint8_t> pubKey(num_bytes);
- int res = BN_bn2bin(rsa->n, pubKey.data());
- RSA_free(rsa);
+ int res = BN_bn2bin(RSA_get0_n(rsa), pubKey.data());
if (!res) {
return Error() << "Failed to convert public key to bytes";
@@ -183,14 +209,14 @@
Result<std::vector<uint8_t>>
extractPublicKeyFromSubjectPublicKeyInfo(const std::vector<uint8_t>& keyData) {
auto keyDataBytes = keyData.data();
- EVP_PKEY* public_key = d2i_PUBKEY(nullptr, &keyDataBytes, keyData.size());
+ bssl::UniquePtr<EVP_PKEY> public_key(d2i_PUBKEY(nullptr, &keyDataBytes, keyData.size()));
- return extractPublicKey(public_key);
+ return extractPublicKey(public_key.get());
}
-Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::vector<uint8_t>& keyData) {
- auto keyDataBytes = keyData.data();
- bssl::UniquePtr<X509> decoded_cert(d2i_X509(nullptr, &keyDataBytes, keyData.size()));
+Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::vector<uint8_t>& derCert) {
+ auto derCertBytes = derCert.data();
+ bssl::UniquePtr<X509> decoded_cert(d2i_X509(nullptr, &derCertBytes, derCert.size()));
if (decoded_cert.get() == nullptr) {
return Error() << "Failed to decode X509 certificate.";
}
@@ -199,19 +225,71 @@
return extractPublicKey(decoded_pkey.get());
}
-Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::string& path) {
- X509* cert;
+static Result<bssl::UniquePtr<X509>> loadX509(const std::string& path) {
+ X509* rawCert;
auto f = fopen(path.c_str(), "re");
if (f == nullptr) {
return Error() << "Failed to open " << path;
}
- if (!d2i_X509_fp(f, &cert)) {
+ if (!d2i_X509_fp(f, &rawCert)) {
fclose(f);
return Error() << "Unable to decode x509 cert at " << path;
}
+ bssl::UniquePtr<X509> cert(rawCert);
fclose(f);
- return extractPublicKey(X509_get_pubkey(cert));
+ return cert;
+}
+
+Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::string& path) {
+ auto cert = loadX509(path);
+ if (!cert.ok()) {
+ return cert.error();
+ }
+ return extractPublicKey(X509_get_pubkey(cert.value().get()));
+}
+
+Result<CertInfo> verifyAndExtractCertInfoFromX509(const std::string& path,
+ const std::vector<uint8_t>& publicKey) {
+ auto public_key = toRsaPkey(publicKey);
+ if (!public_key.ok()) {
+ return public_key.error();
+ }
+
+ auto cert = loadX509(path);
+ if (!cert.ok()) {
+ return cert.error();
+ }
+ X509* x509 = cert.value().get();
+
+ // Make sure we signed it.
+ if (X509_verify(x509, public_key.value().get()) != 1) {
+ return Error() << "Failed to verify certificate.";
+ }
+
+ bssl::UniquePtr<EVP_PKEY> pkey(X509_get_pubkey(x509));
+ auto subject_key = extractPublicKey(pkey.get());
+ if (!subject_key.ok()) {
+ return subject_key.error();
+ }
+
+ // The pointers here are all owned by x509, and each function handles an
+ // error return from the previous call correctly.
+ X509_NAME* name = X509_get_subject_name(x509);
+ int index = X509_NAME_get_index_by_NID(name, NID_commonName, -1);
+ X509_NAME_ENTRY* entry = X509_NAME_get_entry(name, index);
+ ASN1_STRING* asn1cn = X509_NAME_ENTRY_get_data(entry);
+ unsigned char* utf8cn;
+ int length = ASN1_STRING_to_UTF8(&utf8cn, asn1cn);
+ if (length < 0) {
+ return Error() << "Failed to read subject CN";
+ }
+
+ bssl::UniquePtr<unsigned char> utf8owner(utf8cn);
+ std::string cn(reinterpret_cast<char*>(utf8cn), static_cast<size_t>(length));
+
+ CertInfo cert_info{std::move(cn), std::move(subject_key.value())};
+ return cert_info;
}
Result<std::vector<uint8_t>> createPkcs7(const std::vector<uint8_t>& signed_digest) {
diff --git a/ondevice-signing/CertUtils.h b/ondevice-signing/CertUtils.h
index 66dff04..d202fbc 100644
--- a/ondevice-signing/CertUtils.h
+++ b/ondevice-signing/CertUtils.h
@@ -18,6 +18,11 @@
#include <android-base/result.h>
+struct CertInfo {
+ std::string subjectCn;
+ std::vector<uint8_t> subjectKey;
+};
+
android::base::Result<void> createSelfSignedCertificate(
const std::vector<uint8_t>& publicKey,
const std::function<android::base::Result<std::string>(const std::string&)>& signFunction,
@@ -30,6 +35,9 @@
extractPublicKeyFromSubjectPublicKeyInfo(const std::vector<uint8_t>& subjectKeyInfo);
android::base::Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::string& path);
+android::base::Result<CertInfo>
+verifyAndExtractCertInfoFromX509(const std::string& path, const std::vector<uint8_t>& publicKey);
+
android::base::Result<void> verifySignature(const std::string& message,
const std::string& signature,
const std::vector<uint8_t>& publicKey);
diff --git a/ondevice-signing/KeyConstants.h b/ondevice-signing/KeyConstants.h
index 9e1a513..ccc9251 100644
--- a/ondevice-signing/KeyConstants.h
+++ b/ondevice-signing/KeyConstants.h
@@ -16,3 +16,6 @@
static constexpr int kRsaKeySize = 2048;
static constexpr int kRsaKeyExponent = 65537;
+
+static constexpr int kHmacKeySize = 256;
+static constexpr int kHmacMinMacLength = 256;
diff --git a/ondevice-signing/Keymaster.cpp b/ondevice-signing/Keymaster.cpp
deleted file mode 100644
index 6cfb565..0000000
--- a/ondevice-signing/Keymaster.cpp
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string>
-
-#include <android-base/logging.h>
-#include <keymasterV4_1/Keymaster.h>
-#include <keymasterV4_1/authorization_set.h>
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-#include "Keymaster.h"
-
-using AuthorizationSet = ::android::hardware::keymaster::V4_0::AuthorizationSet;
-using AuthorizationSetBuilder = ::android::hardware::keymaster::V4_0::AuthorizationSetBuilder;
-using Digest = ::android::hardware::keymaster::V4_0::Digest;
-using ErrorCode = ::android::hardware::keymaster::V4_0::ErrorCode;
-using HardwareAuthToken = ::android::hardware::keymaster::V4_0::HardwareAuthToken;
-using HidlBuf = ::android::hardware::hidl_vec<uint8_t>;
-using KeyCharacteristics = ::android::hardware::keymaster::V4_0::KeyCharacteristics;
-using KeyFormat = ::android::hardware::keymaster::V4_0::KeyFormat;
-using KeyParameter = ::android::hardware::keymaster::V4_0::KeyParameter;
-using KeyPurpose = ::android::hardware::keymaster::V4_0::KeyPurpose;
-using KmSupport = ::android::hardware::keymaster::V4_1::support::Keymaster;
-using KmDevice = ::android::hardware::keymaster::V4_1::IKeymasterDevice;
-using OperationHandle = ::android::hardware::keymaster::V4_0::OperationHandle;
-using PaddingMode = ::android::hardware::keymaster::V4_0::PaddingMode;
-using VerificationToken = ::android::hardware::keymaster::V4_0::VerificationToken;
-
-using android::sp;
-using android::base::Error;
-using android::base::Result;
-using android::base::unique_fd;
-using android::hardware::hidl_vec;
-
-Keymaster::Keymaster() {}
-
-bool Keymaster::initialize() {
- // TODO(b/165630556): Stop using Keymaster directly and migrate to keystore2
- // (once available).
- auto devices = KmSupport::enumerateAvailableDevices();
- sp<KmDevice> devToUse = nullptr;
- for (const auto& dev : devices) {
- auto version = dev->halVersion();
- if (version.majorVersion > 4 || (version.majorVersion == 4 && version.minorVersion >= 1)) {
- // TODO we probably have a preference for the SE, hoping Keystore2 will provide this
- LOG(INFO) << "Using keymaster " << version.keymasterName << " "
- << (int)version.majorVersion << "." << (int)version.minorVersion;
- devToUse = dev;
- break;
- }
- }
-
- if (devToUse == nullptr) {
- LOG(WARNING) << "Didn't find a keymaster to use.";
- }
- mDevice = devToUse;
-
- return mDevice != nullptr;
-}
-
-std::optional<Keymaster> Keymaster::getInstance() {
- static Keymaster keymaster;
-
- if (!keymaster.initialize()) {
- return {};
- } else {
- return {keymaster};
- }
-}
-
-Result<std::vector<uint8_t>> Keymaster::createKey() const {
- ErrorCode error;
- HidlBuf keyBlob;
-
- auto params = AuthorizationSetBuilder()
- .Authorization(::android::hardware::keymaster::V4_0::TAG_NO_AUTH_REQUIRED)
- // TODO MAKE SURE WE ADD THE EARLY_BOOT_ONLY FLAG here
- // currently doesn't work on cuttlefish (b/173618442)
- //.Authorization(::android::hardware::keymaster::V4_1::TAG_EARLY_BOOT_ONLY)
- .RsaSigningKey(2048, 65537)
- .Digest(Digest::SHA_2_256)
- .Padding(PaddingMode::RSA_PKCS1_1_5_SIGN);
-
- mDevice->generateKey(params.hidl_data(), [&](ErrorCode hidl_error, const HidlBuf& hidl_key_blob,
- const KeyCharacteristics&
- /* hidl_key_characteristics */) {
- error = hidl_error;
- keyBlob = hidl_key_blob;
- });
-
- if (error != ErrorCode::OK) {
- return Error() << "Error creating keymaster signing key: "
- << static_cast<std::underlying_type<ErrorCode>::type>(error);
- }
-
- return keyBlob;
-}
-
-static ErrorCode Begin(const sp<KmDevice>& keymaster_, KeyPurpose purpose, const HidlBuf& key_blob,
- const AuthorizationSet& in_params, AuthorizationSet* out_params,
- OperationHandle* op_handle) {
- ErrorCode error;
- OperationHandle saved_handle = *op_handle;
- CHECK(keymaster_
- ->begin(purpose, key_blob, in_params.hidl_data(), HardwareAuthToken(),
- [&](ErrorCode hidl_error, const hidl_vec<KeyParameter>& hidl_out_params,
- uint64_t hidl_op_handle) {
- error = hidl_error;
- *out_params = hidl_out_params;
- *op_handle = hidl_op_handle;
- })
- .isOk());
- if (error != ErrorCode::OK) {
- // Some implementations may modify *op_handle on error.
- *op_handle = saved_handle;
- }
- return error;
-}
-
-static ErrorCode Update(const sp<KmDevice>& keymaster_, OperationHandle op_handle,
- const AuthorizationSet& in_params, const std::string& input,
- AuthorizationSet* out_params, std::string* output, size_t* input_consumed) {
- ErrorCode error;
- HidlBuf inputData(input.size());
- memcpy(inputData.data(), input.c_str(), input.size());
- CHECK(keymaster_
- ->update(op_handle, in_params.hidl_data(), inputData, HardwareAuthToken(),
- VerificationToken(),
- [&](ErrorCode hidl_error, uint32_t hidl_input_consumed,
- const hidl_vec<KeyParameter>& hidl_out_params,
- const HidlBuf& hidl_output) {
- error = hidl_error;
- out_params->push_back(AuthorizationSet(hidl_out_params));
- std::string retdata(reinterpret_cast<const char*>(hidl_output.data()),
- hidl_output.size());
- output->append(retdata);
- *input_consumed = hidl_input_consumed;
- })
- .isOk());
- return error;
-}
-
-static ErrorCode Finish(const sp<KmDevice>& keymaster_, OperationHandle op_handle,
- const AuthorizationSet& in_params, const std::string& input,
- const std::string& signature, AuthorizationSet* out_params,
- std::string* output) {
- ErrorCode error;
- HidlBuf inputData(input.size());
- memcpy(inputData.data(), input.c_str(), input.size());
- HidlBuf signatureData(signature.size());
- memcpy(signatureData.data(), signature.c_str(), signature.size());
- // TODO still need to handle error -62 - key requires upgrade
- CHECK(keymaster_
- ->finish(op_handle, in_params.hidl_data(), inputData, signatureData,
- HardwareAuthToken(), VerificationToken(),
- [&](ErrorCode hidl_error, const hidl_vec<KeyParameter>& hidl_out_params,
- const HidlBuf& hidl_output) {
- error = hidl_error;
- *out_params = hidl_out_params;
- std::string retdata(reinterpret_cast<const char*>(hidl_output.data()),
- hidl_output.size());
- output->append(retdata);
- })
- .isOk());
- return error;
-}
-
-static std::string ProcessMessage(const sp<KmDevice>& keymaster_, const HidlBuf& key_blob,
- KeyPurpose operation, const std::string& message,
- const AuthorizationSet& in_params, AuthorizationSet* out_params) {
- AuthorizationSet begin_out_params;
- OperationHandle op_handle_;
- ErrorCode ec =
- Begin(keymaster_, operation, key_blob, in_params, &begin_out_params, &op_handle_);
-
- std::string output;
- size_t consumed = 0;
- AuthorizationSet update_params;
- AuthorizationSet update_out_params;
- ec = Update(keymaster_, op_handle_, update_params, message, &update_out_params, &output,
- &consumed);
-
- std::string unused;
- AuthorizationSet finish_params;
- AuthorizationSet finish_out_params;
- ec = Finish(keymaster_, op_handle_, finish_params, message.substr(consumed), unused,
- &finish_out_params, &output);
-
- out_params->push_back(begin_out_params);
- out_params->push_back(finish_out_params);
- return output;
-}
-
-Result<std::vector<uint8_t>>
-Keymaster::extractPublicKey(const std::vector<uint8_t>& keyBlob) const {
- std::vector<uint8_t> publicKey;
- ErrorCode error;
-
- mDevice->exportKey(KeyFormat::X509, keyBlob, {} /* clientId */, {} /* appData */,
- [&](ErrorCode hidl_error, const HidlBuf& keyData) {
- error = hidl_error;
- publicKey = keyData;
- });
-
- if (error != ErrorCode::OK) {
- return Error() << "Error extracting public key: "
- << static_cast<std::underlying_type<ErrorCode>::type>(error);
- }
-
- return publicKey;
-}
-
-Result<KeymasterVerifyResult> Keymaster::verifyKey(const std::vector<uint8_t>& keyBlob) const {
- ErrorCode error;
- KeyCharacteristics characteristics;
-
- mDevice->getKeyCharacteristics(
- keyBlob, {} /* clientId */, {} /* appData */,
- [&](ErrorCode hidl_error, const KeyCharacteristics& hidl_characteristics) {
- error = hidl_error;
- characteristics = hidl_characteristics;
- });
-
- if (error == ErrorCode::KEY_REQUIRES_UPGRADE) {
- return KeymasterVerifyResult::UPGRADE;
- }
-
- if (error != ErrorCode::OK) {
- return Error() << "Error getting key characteristics: "
- << static_cast<std::underlying_type<ErrorCode>::type>(error);
- }
-
- // TODO(b/165630556)
- // Verify this is an early boot key and the other key parameters
- return KeymasterVerifyResult::OK;
-}
-
-Result<std::vector<uint8_t>> Keymaster::upgradeKey(const std::vector<uint8_t>& keyBlob) const {
- ErrorCode error;
- HidlBuf newKeyBlob;
-
- // TODO deduplicate
- auto params = AuthorizationSetBuilder()
- .Authorization(::android::hardware::keymaster::V4_0::TAG_NO_AUTH_REQUIRED)
- // TODO MAKE SURE WE ADD THE EARLY_BOOT_ONLY FLAG here
- // currently doesn't work on cuttlefish (b/173618442)
- //.Authorization(::android::hardware::keymaster::V4_1::TAG_EARLY_BOOT_ONLY)
- .RsaSigningKey(2048, 65537)
- .Digest(Digest::SHA_2_256)
- .Padding(PaddingMode::RSA_PKCS1_1_5_SIGN);
-
- mDevice->upgradeKey(keyBlob, params.hidl_data(),
- [&](ErrorCode hidl_error, const HidlBuf& hidl_key_blob) {
- error = hidl_error;
- newKeyBlob = hidl_key_blob;
- });
-
- if (error != ErrorCode::OK) {
- return Error() << "Error upgrading keymaster signing key: "
- << static_cast<std::underlying_type<ErrorCode>::type>(error);
- }
-
- return newKeyBlob;
-}
-
-Result<std::string> Keymaster::sign(const std::vector<uint8_t>& keyBlob,
- const std::string& message) const {
- AuthorizationSet out_params;
- auto params = AuthorizationSetBuilder()
- .Digest(Digest::SHA_2_256)
- .Padding(PaddingMode::RSA_PKCS1_1_5_SIGN);
- std::string signature =
- ProcessMessage(mDevice, keyBlob, KeyPurpose::SIGN, message, params, &out_params);
- if (!out_params.empty()) {
- return Error() << "Error signing key: expected empty out params.";
- }
- return signature;
-}
diff --git a/ondevice-signing/Keymaster.h b/ondevice-signing/Keymaster.h
deleted file mode 100644
index 455289f..0000000
--- a/ondevice-signing/Keymaster.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <optional>
-
-#include <android-base/macros.h>
-#include <android-base/result.h>
-#include <android-base/unique_fd.h>
-
-#include <keymasterV4_1/Keymaster.h>
-
-#include <utils/StrongPointer.h>
-
-enum class KeymasterVerifyResult {
- OK = 0,
- UPGRADE = -1,
-};
-
-class Keymaster {
- using KmDevice = ::android::hardware::keymaster::V4_1::IKeymasterDevice;
-
- public:
- static std::optional<Keymaster> getInstance();
-
- android::base::Result<std::vector<uint8_t>> createKey() const;
-
- android::base::Result<std::vector<uint8_t>>
- extractPublicKey(const std::vector<uint8_t>& keyBlob) const;
-
- android::base::Result<KeymasterVerifyResult>
- verifyKey(const std::vector<uint8_t>& keyBlob) const;
-
- android::base::Result<std::vector<uint8_t>>
- upgradeKey(const std::vector<uint8_t>& keyBlob) const;
-
- /* Sign a message with an initialized signing key */
- android::base::Result<std::string> sign(const std::vector<uint8_t>& keyBlob,
- const std::string& message) const;
-
- private:
- Keymaster();
- bool initialize();
-
- android::sp<KmDevice> mDevice;
-};
diff --git a/ondevice-signing/KeymasterSigningKey.cpp b/ondevice-signing/KeymasterSigningKey.cpp
deleted file mode 100644
index dc3ef8a..0000000
--- a/ondevice-signing/KeymasterSigningKey.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string>
-
-#include <android-base/file.h>
-#include <android-base/logging.h>
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-#include "CertUtils.h"
-#include "Keymaster.h"
-#include "KeymasterSigningKey.h"
-
-using android::base::ErrnoError;
-using android::base::Error;
-using android::base::ReadFileToString;
-using android::base::Result;
-using android::base::unique_fd;
-
-const std::string kSigningKeyBlob = "/data/misc/odsign/key.blob";
-
-KeymasterSigningKey::KeymasterSigningKey() {}
-
-Result<std::unique_ptr<KeymasterSigningKey>>
-KeymasterSigningKey::loadFromBlobAndVerify(const std::string& path) {
- auto signingKey = std::make_unique<KeymasterSigningKey>();
-
- auto status = signingKey->initializeFromKeyblob(path);
-
- if (!status.ok()) {
- return status.error();
- }
-
- return signingKey;
-}
-
-Result<void> KeymasterSigningKey::saveKeyblob(const std::string& path) const {
- int flags = O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC;
-
- unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), flags, 0600)));
- if (fd == -1) {
- return ErrnoError() << "Error creating key blob file " << path;
- }
-
- if (!android::base::WriteFully(fd, mVerifiedKeyBlob.data(), mVerifiedKeyBlob.size())) {
- return ErrnoError() << "Error writing key blob file " << path;
- } else {
- return {};
- }
-}
-
-Result<void> KeymasterSigningKey::createSigningKey() {
- KeymasterSigningKey signingKey;
- auto keymaster = Keymaster::getInstance();
- if (!keymaster.has_value()) {
- return Error() << "Failed to initialize keymaster.";
- }
- mKeymaster = keymaster;
-
- auto keyBlob = mKeymaster->createKey();
-
- if (!keyBlob.ok()) {
- return keyBlob.error();
- }
-
- mVerifiedKeyBlob.assign(keyBlob->begin(), keyBlob->end());
-
- return {};
-}
-
-Result<std::unique_ptr<KeymasterSigningKey>> KeymasterSigningKey::createAndPersistNewKey() {
- auto signingKey = std::make_unique<KeymasterSigningKey>();
-
- auto status = signingKey->createSigningKey();
-
- if (!status.ok()) {
- return status.error();
- }
-
- status = signingKey->saveKeyblob(kSigningKeyBlob);
- if (!status.ok()) {
- return status.error();
- }
-
- return signingKey;
-}
-
-Result<SigningKey*> KeymasterSigningKey::getInstance() {
- auto key = loadFromBlobAndVerify(kSigningKeyBlob);
-
- if (!key.ok()) {
- key = createAndPersistNewKey();
- if (!key.ok()) {
- return key.error();
- }
- }
-
- return key->release();
-}
-
-Result<std::vector<uint8_t>> KeymasterSigningKey::getPublicKey() const {
- auto publicKey = mKeymaster->extractPublicKey(mVerifiedKeyBlob);
- if (!publicKey.ok()) {
- return publicKey.error();
- }
-
- // Keymaster returns the public key not in a full X509 cert, but just the
- // "SubjectPublicKeyInfo"
- return extractPublicKeyFromSubjectPublicKeyInfo(publicKey.value());
-}
-
-Result<void> KeymasterSigningKey::initializeFromKeyblob(const std::string& path) {
- std::string keyBlobData;
- auto keymaster = Keymaster::getInstance();
- if (!keymaster.has_value()) {
- return Error() << "Failed to initialize keymaster.";
- }
- mKeymaster = keymaster;
-
- bool result = ReadFileToString(path, &keyBlobData);
- if (!result) {
- return ErrnoError() << "Failed to read " << path;
- }
-
- std::vector<uint8_t> keyBlob = {keyBlobData.begin(), keyBlobData.end()};
-
- auto verifyResult = mKeymaster->verifyKey(keyBlob);
- if (!verifyResult.ok()) {
- return Error() << "Failed to verify key: " << verifyResult.error().message();
- }
-
- if (*verifyResult == KeymasterVerifyResult::UPGRADE) {
- auto upgradeResult = mKeymaster->upgradeKey(keyBlob);
- if (!upgradeResult.ok()) {
- return Error() << "Failed to upgrade key: " << upgradeResult.error().message();
- }
- mVerifiedKeyBlob = *upgradeResult;
- // Make sure we persist the new blob
- auto saveResult = saveKeyblob(path);
- if (!saveResult.ok()) {
- return Error() << "Failed to store upgraded key";
- }
- } else {
- mVerifiedKeyBlob = keyBlob;
- }
-
- return {};
-}
-
-Result<std::string> KeymasterSigningKey::sign(const std::string& message) const {
- return mKeymaster->sign(mVerifiedKeyBlob, message);
-}
diff --git a/ondevice-signing/KeymasterSigningKey.h b/ondevice-signing/KeymasterSigningKey.h
deleted file mode 100644
index e66781f..0000000
--- a/ondevice-signing/KeymasterSigningKey.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <android-base/macros.h>
-#include <android-base/result.h>
-#include <android-base/unique_fd.h>
-
-#include <utils/StrongPointer.h>
-
-#include "Keymaster.h"
-#include "SigningKey.h"
-
-class KeymasterSigningKey : public SigningKey {
- using KmDevice = ::android::hardware::keymaster::V4_1::IKeymasterDevice;
-
- public:
- friend std::unique_ptr<KeymasterSigningKey> std::make_unique<KeymasterSigningKey>();
- virtual ~KeymasterSigningKey(){};
-
- // Allow the key to be moved around
- KeymasterSigningKey& operator=(KeymasterSigningKey&& other) = default;
- KeymasterSigningKey(KeymasterSigningKey&& other) = default;
-
- static android::base::Result<SigningKey*> getInstance();
-
- virtual android::base::Result<std::string> sign(const std::string& message) const;
- virtual android::base::Result<std::vector<uint8_t>> getPublicKey() const;
-
- private:
- KeymasterSigningKey();
-
- static android::base::Result<std::unique_ptr<KeymasterSigningKey>> createAndPersistNewKey();
- static android::base::Result<std::unique_ptr<KeymasterSigningKey>>
- loadFromBlobAndVerify(const std::string& path);
-
- android::base::Result<void> createSigningKey();
- android::base::Result<void> initializeFromKeyblob(const std::string& path);
- android::base::Result<void> saveKeyblob(const std::string& path) const;
-
- static android::base::Result<KeymasterSigningKey> createNewKey();
-
- std::optional<Keymaster> mKeymaster;
- std::vector<uint8_t> mVerifiedKeyBlob;
-
- DISALLOW_COPY_AND_ASSIGN(KeymasterSigningKey);
-};
diff --git a/ondevice-signing/KeystoreHmacKey.cpp b/ondevice-signing/KeystoreHmacKey.cpp
new file mode 100644
index 0000000..a2208ce
--- /dev/null
+++ b/ondevice-signing/KeystoreHmacKey.cpp
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <binder/IServiceManager.h>
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "CertUtils.h"
+#include "KeyConstants.h"
+#include "KeystoreHmacKey.h"
+
+using android::sp;
+using android::String16;
+
+using android::hardware::security::keymint::Algorithm;
+using android::hardware::security::keymint::Digest;
+using android::hardware::security::keymint::KeyParameter;
+using android::hardware::security::keymint::KeyParameterValue;
+using android::hardware::security::keymint::KeyPurpose;
+using android::hardware::security::keymint::Tag;
+
+using android::system::keystore2::CreateOperationResponse;
+using android::system::keystore2::Domain;
+using android::system::keystore2::KeyDescriptor;
+using android::system::keystore2::KeyEntryResponse;
+using android::system::keystore2::KeyMetadata;
+
+using android::base::Error;
+using android::base::Result;
+
+using android::base::unique_fd;
+
+// Keystore boot level that the odsign key uses
+static const int kOdsignBootLevel = 30;
+
+static KeyDescriptor getHmacKeyDescriptor() {
+ // AIDL parcelable objects don't have constructor
+ static KeyDescriptor descriptor;
+ static std::once_flag flag;
+ std::call_once(flag, [&]() {
+ descriptor.domain = Domain::SELINUX;
+ descriptor.alias = String16("ondevice-signing-hmac");
+ descriptor.nspace = 101; // odsign_key
+ });
+
+ return descriptor;
+}
+
+Result<void> KeystoreHmacKey::createKey() {
+ std::vector<KeyParameter> params;
+
+ KeyParameter algo;
+ algo.tag = Tag::ALGORITHM;
+ algo.value = KeyParameterValue::make<KeyParameterValue::algorithm>(Algorithm::HMAC);
+ params.push_back(algo);
+
+ KeyParameter key_size;
+ key_size.tag = Tag::KEY_SIZE;
+ key_size.value = KeyParameterValue::make<KeyParameterValue::integer>(kHmacKeySize);
+ params.push_back(key_size);
+
+ KeyParameter min_mac_length;
+ min_mac_length.tag = Tag::MIN_MAC_LENGTH;
+ min_mac_length.value = KeyParameterValue::make<KeyParameterValue::integer>(256);
+ params.push_back(min_mac_length);
+
+ KeyParameter digest;
+ digest.tag = Tag::DIGEST;
+ digest.value = KeyParameterValue::make<KeyParameterValue::digest>(Digest::SHA_2_256);
+ params.push_back(digest);
+
+ KeyParameter purposeSign;
+ purposeSign.tag = Tag::PURPOSE;
+ purposeSign.value = KeyParameterValue::make<KeyParameterValue::keyPurpose>(KeyPurpose::SIGN);
+ params.push_back(purposeSign);
+
+ KeyParameter purposeVerify;
+ purposeVerify.tag = Tag::PURPOSE;
+ purposeVerify.value =
+ KeyParameterValue::make<KeyParameterValue::keyPurpose>(KeyPurpose::VERIFY);
+ params.push_back(purposeVerify);
+
+ KeyParameter auth;
+ auth.tag = Tag::NO_AUTH_REQUIRED;
+ auth.value = KeyParameterValue::make<KeyParameterValue::boolValue>(true);
+ params.push_back(auth);
+
+ KeyParameter boot_level;
+ boot_level.tag = Tag::MAX_BOOT_LEVEL;
+ boot_level.value = KeyParameterValue::make<KeyParameterValue::integer>(kOdsignBootLevel);
+ params.push_back(boot_level);
+
+ KeyMetadata metadata;
+ auto status = mSecurityLevel->generateKey(mDescriptor, {}, params, 0, {}, &metadata);
+ if (!status.isOk()) {
+ return Error() << "Failed to create new HMAC key";
+ }
+
+ return {};
+}
+
+Result<void> KeystoreHmacKey::initialize(sp<IKeystoreService> service,
+ sp<IKeystoreSecurityLevel> securityLevel) {
+ mService = std::move(service);
+ mSecurityLevel = std::move(securityLevel);
+
+ // See if we can fetch an existing key
+ KeyEntryResponse keyEntryResponse;
+ LOG(INFO) << "Trying to retrieve existing HMAC key...";
+ auto status = mService->getKeyEntry(mDescriptor, &keyEntryResponse);
+ bool keyValid = false;
+
+ if (status.isOk()) {
+ // Make sure this is an early boot key
+ for (const auto& auth : keyEntryResponse.metadata.authorizations) {
+ if (auth.keyParameter.tag == Tag::MAX_BOOT_LEVEL) {
+ if (auth.keyParameter.value.get<KeyParameterValue::integer>() == kOdsignBootLevel) {
+ keyValid = true;
+ break;
+ }
+ }
+ }
+ if (!keyValid) {
+ LOG(WARNING) << "Found invalid HMAC key without MAX_BOOT_LEVEL tag";
+ }
+ }
+
+ if (!keyValid) {
+ LOG(INFO) << "Existing HMAC key not found or invalid, creating new key";
+ return createKey();
+ } else {
+ return {};
+ }
+}
+
+KeystoreHmacKey::KeystoreHmacKey() {
+ mDescriptor = getHmacKeyDescriptor();
+}
+
+static std::vector<KeyParameter> getVerifyOpParameters() {
+ std::vector<KeyParameter> opParameters;
+
+ KeyParameter algo;
+ algo.tag = Tag::ALGORITHM;
+ algo.value = KeyParameterValue::make<KeyParameterValue::algorithm>(Algorithm::HMAC);
+ opParameters.push_back(algo);
+
+ KeyParameter digest;
+ digest.tag = Tag::DIGEST;
+ digest.value = KeyParameterValue::make<KeyParameterValue::digest>(Digest::SHA_2_256);
+ opParameters.push_back(digest);
+
+ KeyParameter purpose;
+ purpose.tag = Tag::PURPOSE;
+ purpose.value = KeyParameterValue::make<KeyParameterValue::keyPurpose>(KeyPurpose::VERIFY);
+ opParameters.push_back(purpose);
+
+ return opParameters;
+}
+
+static std::vector<KeyParameter> getSignOpParameters() {
+ std::vector<KeyParameter> opParameters;
+
+ KeyParameter algo;
+ algo.tag = Tag::ALGORITHM;
+ algo.value = KeyParameterValue::make<KeyParameterValue::algorithm>(Algorithm::HMAC);
+ opParameters.push_back(algo);
+
+ KeyParameter mac_length;
+ mac_length.tag = Tag::MAC_LENGTH;
+ mac_length.value = KeyParameterValue::make<KeyParameterValue::integer>(256);
+ opParameters.push_back(mac_length);
+
+ KeyParameter digest;
+ digest.tag = Tag::DIGEST;
+ digest.value = KeyParameterValue::make<KeyParameterValue::digest>(Digest::SHA_2_256);
+ opParameters.push_back(digest);
+
+ KeyParameter purpose;
+ purpose.tag = Tag::PURPOSE;
+ purpose.value = KeyParameterValue::make<KeyParameterValue::keyPurpose>(KeyPurpose::SIGN);
+ opParameters.push_back(purpose);
+
+ return opParameters;
+}
+
+Result<std::string> KeystoreHmacKey::sign(const std::string& message) const {
+ CreateOperationResponse opResponse;
+ static auto params = getSignOpParameters();
+
+ auto status = mSecurityLevel->createOperation(mDescriptor, params, false, &opResponse);
+ if (!status.isOk()) {
+ return Error() << "Failed to create keystore signing operation: "
+ << status.serviceSpecificErrorCode();
+ }
+ auto operation = opResponse.iOperation;
+
+ std::optional<std::vector<uint8_t>> out;
+ status = operation->update({message.begin(), message.end()}, &out);
+ if (!status.isOk()) {
+ return Error() << "Failed to call keystore update operation.";
+ }
+
+ std::optional<std::vector<uint8_t>> signature;
+ status = operation->finish({}, {}, &signature);
+ if (!status.isOk()) {
+ return Error() << "Failed to call keystore finish operation.";
+ }
+
+ if (!signature.has_value()) {
+ return Error() << "Didn't receive a signature from keystore finish operation.";
+ }
+
+ return std::string{signature.value().begin(), signature.value().end()};
+}
+
+Result<void> KeystoreHmacKey::verify(const std::string& message,
+ const std::string& signature) const {
+ CreateOperationResponse opResponse;
+ static auto params = getVerifyOpParameters();
+
+ auto status = mSecurityLevel->createOperation(mDescriptor, params, false, &opResponse);
+ if (!status.isOk()) {
+ return Error() << "Failed to create keystore verification operation: "
+ << status.serviceSpecificErrorCode();
+ }
+ auto operation = opResponse.iOperation;
+
+ std::optional<std::vector<uint8_t>> out;
+ status = operation->update({message.begin(), message.end()}, &out);
+ if (!status.isOk()) {
+ return Error() << "Failed to call keystore update operation.";
+ }
+
+ std::optional<std::vector<uint8_t>> out_signature;
+ std::vector<uint8_t> in_signature{signature.begin(), signature.end()};
+ status = operation->finish({}, in_signature, &out_signature);
+ if (!status.isOk()) {
+ return Error() << "Failed to call keystore finish operation.";
+ }
+
+ return {};
+}
diff --git a/ondevice-signing/KeystoreHmacKey.h b/ondevice-signing/KeystoreHmacKey.h
new file mode 100644
index 0000000..fbad0fd
--- /dev/null
+++ b/ondevice-signing/KeystoreHmacKey.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <optional>
+
+#include <android-base/macros.h>
+#include <android-base/result.h>
+
+#include <utils/StrongPointer.h>
+
+#include <android/system/keystore2/IKeystoreService.h>
+
+class KeystoreHmacKey {
+ using IKeystoreService = ::android::system::keystore2::IKeystoreService;
+ using IKeystoreSecurityLevel = ::android::system::keystore2::IKeystoreSecurityLevel;
+ using KeyDescriptor = ::android::system::keystore2::KeyDescriptor;
+
+ public:
+ KeystoreHmacKey();
+ android::base::Result<void> initialize(android::sp<IKeystoreService> service,
+ android::sp<IKeystoreSecurityLevel> securityLevel);
+ android::base::Result<std::string> sign(const std::string& message) const;
+ android::base::Result<void> verify(const std::string& message,
+ const std::string& signature) const;
+
+ private:
+ android::base::Result<void> createKey();
+ KeyDescriptor mDescriptor;
+ android::sp<IKeystoreService> mService;
+ android::sp<IKeystoreSecurityLevel> mSecurityLevel;
+};
diff --git a/ondevice-signing/KeystoreKey.cpp b/ondevice-signing/KeystoreKey.cpp
index 9b5e505..0951d92 100644
--- a/ondevice-signing/KeystoreKey.cpp
+++ b/ondevice-signing/KeystoreKey.cpp
@@ -46,16 +46,15 @@
using android::system::keystore2::Domain;
using android::system::keystore2::KeyDescriptor;
using android::system::keystore2::KeyEntryResponse;
-using android::system::keystore2::KeyMetadata;
using android::base::Error;
using android::base::Result;
-using android::base::unique_fd;
-
// Keystore boot level that the odsign key uses
static const int kOdsignBootLevel = 30;
+const std::string kPublicKeySignature = "/data/misc/odsign/publickey.signature";
+
static KeyDescriptor getKeyDescriptor() {
// AIDL parcelable objects don't have constructor
static KeyDescriptor descriptor;
@@ -69,9 +68,11 @@
return descriptor;
}
-KeystoreKey::KeystoreKey() {}
+KeystoreKey::KeystoreKey() {
+ mDescriptor = getKeyDescriptor();
+}
-Result<KeyMetadata> KeystoreKey::createNewKey(const KeyDescriptor& descriptor) {
+Result<std::vector<uint8_t>> KeystoreKey::createKey() {
std::vector<KeyParameter> params;
KeyParameter algo;
@@ -116,12 +117,31 @@
params.push_back(boot_level);
KeyMetadata metadata;
- auto status = mSecurityLevel->generateKey(descriptor, {}, params, 0, {}, &metadata);
+ auto status = mSecurityLevel->generateKey(mDescriptor, {}, params, 0, {}, &metadata);
if (!status.isOk()) {
return Error() << "Failed to create new key";
}
- return metadata;
+ // Extract the public key from the certificate, HMAC it and store the signature
+ auto cert = metadata.certificate;
+ if (!cert) {
+ return Error() << "Key did not have a certificate.";
+ }
+ auto publicKey = extractPublicKeyFromX509(cert.value());
+ if (!publicKey.ok()) {
+ return publicKey.error();
+ }
+ std::string publicKeyString = {publicKey->begin(), publicKey->end()};
+ auto signature = mHmacKey.sign(publicKeyString);
+ if (!signature.ok()) {
+ return Error() << "Failed to sign public key.";
+ }
+
+ if (!android::base::WriteStringToFile(*signature, kPublicKeySignature)) {
+ return Error() << "Can't write public key signature.";
+ }
+
+ return *publicKey;
}
bool KeystoreKey::initialize() {
@@ -138,33 +158,100 @@
return false;
}
- auto status = mService->getSecurityLevel(SecurityLevel::STRONGBOX, &mSecurityLevel);
+ auto status = mService->getSecurityLevel(SecurityLevel::TRUSTED_ENVIRONMENT, &mSecurityLevel);
if (!status.isOk()) {
- status = mService->getSecurityLevel(SecurityLevel::TRUSTED_ENVIRONMENT, &mSecurityLevel);
- if (!status.isOk()) {
- return false;
- }
+ return false;
}
- auto descriptor = getKeyDescriptor();
+ // Initialize the HMAC key we use to sign/verify information about this key
+ auto hmacStatus = mHmacKey.initialize(mService, mSecurityLevel);
+ if (!hmacStatus.ok()) {
+ LOG(ERROR) << hmacStatus.error().message();
+ return false;
+ }
+
+ auto key = getOrCreateKey();
+ if (!key.ok()) {
+ LOG(ERROR) << key.error().message();
+ return false;
+ }
+ mPublicKey = *key;
+ LOG(ERROR) << "Initialized Keystore key.";
+ return true;
+}
+
+Result<std::vector<uint8_t>> KeystoreKey::verifyExistingKey() {
// See if we can fetch an existing key
KeyEntryResponse keyEntryResponse;
LOG(INFO) << "Trying to retrieve existing keystore key...";
- status = mService->getKeyEntry(descriptor, &keyEntryResponse);
+ auto status = mService->getKeyEntry(mDescriptor, &keyEntryResponse);
+
if (!status.isOk()) {
- LOG(INFO) << "Existing keystore key not found, creating new key";
- auto newKeyStatus = createNewKey(descriptor);
- if (!newKeyStatus.ok()) {
- LOG(ERROR) << "Failed to create new key";
- return false;
- }
- mKeyMetadata = *newKeyStatus;
- } else {
- mKeyMetadata = keyEntryResponse.metadata;
+ return Error() << "Failed to find keystore key...";
}
- LOG(ERROR) << "Initialized Keystore key.";
- return true;
+ // On some earlier builds, we created this key on the Strongbox security level;
+ // we now use TEE keys instead (mostly for speed). It shouldn't matter since
+ // verified boot is protected by the TEE anyway. If the key happens to be on
+ // the wrong security level, delete it (this should happen just once).
+ if (keyEntryResponse.metadata.keySecurityLevel != SecurityLevel::TRUSTED_ENVIRONMENT) {
+ return Error() << "Found invalid keystore key with security level: "
+ << android::hardware::security::keymint::toString(
+ keyEntryResponse.metadata.keySecurityLevel);
+ }
+
+ // Make sure this is an early boot key
+ bool foundBootLevel = false;
+ for (const auto& auth : keyEntryResponse.metadata.authorizations) {
+ if (auth.keyParameter.tag == Tag::MAX_BOOT_LEVEL) {
+ if (auth.keyParameter.value.get<KeyParameterValue::integer>() == kOdsignBootLevel) {
+ foundBootLevel = true;
+ break;
+ }
+ }
+ }
+ if (!foundBootLevel) {
+ return Error() << "Found invalid keystore key without MAX_BOOT_LEVEL tag";
+ }
+
+ // If the key is still considered valid at this point, extract the public
+ // key from the certificate. Note that we cannot trust this public key,
+ // because it is a part of the keystore2 database, which can be modified by
+ // an attacker. So instead, when creating the key we HMAC the public key
+ // with a key of the same boot level, and verify the signature here.
+ auto cert = keyEntryResponse.metadata.certificate;
+ if (!cert) {
+ return Error() << "Key did not have a certificate.";
+ }
+ auto publicKey = extractPublicKeyFromX509(cert.value());
+ if (!publicKey.ok()) {
+ return publicKey.error();
+ }
+ std::string publicKeyString = {publicKey->begin(), publicKey->end()};
+
+ std::string signature;
+ if (!android::base::ReadFileToString(kPublicKeySignature, &signature)) {
+ return Error() << "Can't find signature for public key.";
+ }
+
+ auto signatureValid = mHmacKey.verify(publicKeyString, signature);
+ if (!signatureValid.ok()) {
+ return Error() << "Signature of public key did not match.";
+ }
+ LOG(INFO) << "Verified public key signature.";
+
+ return *publicKey;
+}
+
+Result<std::vector<uint8_t>> KeystoreKey::getOrCreateKey() {
+ auto existingKey = verifyExistingKey();
+ if (!existingKey.ok()) {
+ LOG(INFO) << existingKey.error().message();
+ LOG(INFO) << "Existing keystore key not found or invalid, creating new key";
+ return createKey();
+ }
+
+ return *existingKey;
}
Result<SigningKey*> KeystoreKey::getInstance() {
@@ -206,11 +293,9 @@
Result<std::string> KeystoreKey::sign(const std::string& message) const {
static auto opParameters = getSignOpParameters();
-
CreateOperationResponse opResponse;
- auto status =
- mSecurityLevel->createOperation(getKeyDescriptor(), opParameters, false, &opResponse);
+ auto status = mSecurityLevel->createOperation(mDescriptor, opParameters, false, &opResponse);
if (!status.isOk()) {
return Error() << "Failed to create keystore signing operation: "
<< status.serviceSpecificErrorCode();
@@ -233,16 +318,9 @@
return Error() << "Didn't receive a signature from keystore finish operation.";
}
- std::string result{signature.value().begin(), signature.value().end()};
-
- return result;
+ return std::string{signature.value().begin(), signature.value().end()};
}
Result<std::vector<uint8_t>> KeystoreKey::getPublicKey() const {
- auto cert = mKeyMetadata.certificate;
- if (cert) {
- return extractPublicKeyFromX509(cert.value());
- } else {
- return Error() << "Key did not have a certificate";
- }
+ return mPublicKey;
}
diff --git a/ondevice-signing/KeystoreKey.h b/ondevice-signing/KeystoreKey.h
index 6b9cb57..1257cbb 100644
--- a/ondevice-signing/KeystoreKey.h
+++ b/ondevice-signing/KeystoreKey.h
@@ -26,6 +26,7 @@
#include <android/system/keystore2/IKeystoreService.h>
+#include "KeystoreHmacKey.h"
#include "SigningKey.h"
class KeystoreKey : public SigningKey {
@@ -44,9 +45,13 @@
private:
KeystoreKey();
bool initialize();
- android::base::Result<KeyMetadata> createNewKey(const KeyDescriptor& descriptor);
+ android::base::Result<std::vector<uint8_t>> verifyExistingKey();
+ android::base::Result<std::vector<uint8_t>> createKey();
+ android::base::Result<std::vector<uint8_t>> getOrCreateKey();
+ KeyDescriptor mDescriptor;
+ KeystoreHmacKey mHmacKey;
android::sp<IKeystoreService> mService;
android::sp<IKeystoreSecurityLevel> mSecurityLevel;
- KeyMetadata mKeyMetadata;
+ std::vector<uint8_t> mPublicKey;
};
diff --git a/ondevice-signing/VerityUtils.cpp b/ondevice-signing/VerityUtils.cpp
index cab92e2..25f949c 100644
--- a/ondevice-signing/VerityUtils.cpp
+++ b/ondevice-signing/VerityUtils.cpp
@@ -243,8 +243,8 @@
return digests;
}
-Result<void> addCertToFsVerityKeyring(const std::string& path) {
- const char* const argv[] = {kFsVerityInitPath, "--load-extra-key", "fsv_ods"};
+Result<void> addCertToFsVerityKeyring(const std::string& path, const char* keyName) {
+ const char* const argv[] = {kFsVerityInitPath, "--load-extra-key", keyName};
int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
pid_t pid = fork();
@@ -255,7 +255,7 @@
char* argv_child[argc + 1];
memcpy(argv_child, argv, argc * sizeof(char*));
argv_child[argc] = nullptr;
- execvp(argv_child[0], const_cast<char**>(argv_child));
+ execvp(argv_child[0], argv_child);
PLOG(ERROR) << "exec in ForkExecvp";
_exit(EXIT_FAILURE);
} else {
diff --git a/ondevice-signing/VerityUtils.h b/ondevice-signing/VerityUtils.h
index 84af319..dca3184 100644
--- a/ondevice-signing/VerityUtils.h
+++ b/ondevice-signing/VerityUtils.h
@@ -20,7 +20,7 @@
#include "SigningKey.h"
-android::base::Result<void> addCertToFsVerityKeyring(const std::string& path);
+android::base::Result<void> addCertToFsVerityKeyring(const std::string& path, const char* keyName);
android::base::Result<std::vector<uint8_t>> createDigest(const std::string& path);
android::base::Result<std::map<std::string, std::string>>
verifyAllFilesInVerity(const std::string& path);
diff --git a/ondevice-signing/odsign_main.cpp b/ondevice-signing/odsign_main.cpp
index 6cab8b6..135c4a0 100644
--- a/ondevice-signing/odsign_main.cpp
+++ b/ondevice-signing/odsign_main.cpp
@@ -32,7 +32,6 @@
#include <odrefresh/odrefresh.h>
#include "CertUtils.h"
-#include "KeymasterSigningKey.h"
#include "KeystoreKey.h"
#include "VerityUtils.h"
@@ -45,7 +44,6 @@
using OdsignInfo = ::odsign::proto::OdsignInfo;
-const std::string kSigningKeyBlob = "/data/misc/odsign/key.blob";
const std::string kSigningKeyCert = "/data/misc/odsign/key.cert";
const std::string kOdsignInfo = "/data/misc/odsign/odsign.info";
const std::string kOdsignInfoSignature = "/data/misc/odsign/odsign.info.signature";
@@ -57,7 +55,10 @@
static const char* kFsVerityProcPath = "/proc/sys/fs/verity";
static const bool kForceCompilation = false;
-static const bool kUseKeystore = true;
+static const bool kUseCompOs = false; // STOPSHIP if true
+
+static const char* kVirtApexPath = "/apex/com.android.virt";
+const std::string kCompOsCert = "/data/misc/odsign/compos_key.cert";
static const char* kOdsignVerificationDoneProp = "odsign.verification.done";
static const char* kOdsignKeyDoneProp = "odsign.key.done";
@@ -66,13 +67,17 @@
static const char* kOdsignVerificationStatusValid = "1";
static const char* kOdsignVerificationStatusError = "0";
-Result<void> verifyExistingCert(const SigningKey& key) {
+bool compOsPresent() {
+ return access(kVirtApexPath, F_OK) == 0;
+}
+
+Result<void> verifyExistingRootCert(const SigningKey& key) {
if (access(kSigningKeyCert.c_str(), F_OK) < 0) {
return ErrnoError() << "Key certificate not found: " << kSigningKeyCert;
}
auto trustedPublicKey = key.getPublicKey();
if (!trustedPublicKey.ok()) {
- return Error() << "Failed to retrieve signing public key.";
+ return Error() << "Failed to retrieve signing public key: " << trustedPublicKey.error();
}
auto publicKeyFromExistingCert = extractPublicKeyFromX509(kSigningKeyCert);
@@ -84,24 +89,49 @@
<< " does not match signing public key.";
}
- // At this point, we know the cert matches
+ // At this point, we know the cert is for our key; it's unimportant whether it's
+ // actually self-signed.
return {};
}
-Result<void> createX509Cert(const SigningKey& key, const std::string& outPath) {
+Result<void> createX509RootCert(const SigningKey& key, const std::string& outPath) {
auto publicKey = key.getPublicKey();
if (!publicKey.ok()) {
return publicKey.error();
}
- auto keymasterSignFunction = [&](const std::string& to_be_signed) {
- return key.sign(to_be_signed);
- };
- createSelfSignedCertificate(*publicKey, keymasterSignFunction, outPath);
+ auto keySignFunction = [&](const std::string& to_be_signed) { return key.sign(to_be_signed); };
+ createSelfSignedCertificate(*publicKey, keySignFunction, outPath);
return {};
}
+Result<std::vector<uint8_t>> extractPublicKeyFromLeafCert(const SigningKey& key,
+ const std::string& certPath,
+ const std::string& expectedCn) {
+ if (access(certPath.c_str(), F_OK) < 0) {
+ return ErrnoError() << "Certificate not found: " << kCompOsCert;
+ }
+ auto trustedPublicKey = key.getPublicKey();
+ if (!trustedPublicKey.ok()) {
+ return Error() << "Failed to retrieve signing public key: " << trustedPublicKey.error();
+ }
+
+ auto existingCertInfo = verifyAndExtractCertInfoFromX509(certPath, trustedPublicKey.value());
+ if (!existingCertInfo.ok()) {
+ return Error() << "Failed to verify certificate at " << certPath << ": "
+ << existingCertInfo.error();
+ }
+
+ auto& actualCn = existingCertInfo.value().subjectCn;
+ if (actualCn != expectedCn) {
+ return Error() << "CN of existing certificate at " << certPath << " is " << actualCn
+ << ", should be " << expectedCn;
+ }
+
+ return existingCertInfo.value().subjectKey;
+}
+
art::odrefresh::ExitCode compileArtifacts(bool force) {
const char* const argv[] = {kOdrefreshPath, force ? "--force-compile" : "--compile"};
const int exit_code =
@@ -267,7 +297,7 @@
// by the next boot.
SetProperty(kOdsignKeyDoneProp, "1");
if (!signInfo.ok()) {
- return Error() << signInfo.error().message();
+ return signInfo.error();
}
std::map<std::string, std::string> trusted_digests(signInfo->file_hashes().begin(),
signInfo->file_hashes().end());
@@ -279,7 +309,7 @@
integrityStatus = verifyIntegrityNoFsVerity(trusted_digests);
}
if (!integrityStatus.ok()) {
- return Error() << integrityStatus.error().message();
+ return integrityStatus.error();
}
return {};
@@ -302,36 +332,27 @@
return 0;
}
- SigningKey* key;
- if (kUseKeystore) {
- auto keystoreResult = KeystoreKey::getInstance();
- if (!keystoreResult.ok()) {
- LOG(ERROR) << "Could not create keystore key: " << keystoreResult.error().message();
- return -1;
- }
- key = keystoreResult.value();
- } else {
- // TODO - keymaster will go away
- auto keymasterResult = KeymasterSigningKey::getInstance();
- if (!keymasterResult.ok()) {
- LOG(ERROR) << "Failed to create keymaster key: " << keymasterResult.error().message();
- return -1;
- }
- key = keymasterResult.value();
+ auto keystoreResult = KeystoreKey::getInstance();
+ if (!keystoreResult.ok()) {
+ LOG(ERROR) << "Could not create keystore key: " << keystoreResult.error().message();
+ return -1;
}
+ SigningKey* key = keystoreResult.value();
bool supportsFsVerity = access(kFsVerityProcPath, F_OK) == 0;
if (!supportsFsVerity) {
LOG(INFO) << "Device doesn't support fsverity. Falling back to full verification.";
}
+ bool supportsCompOs = kUseCompOs && supportsFsVerity && compOsPresent();
+
if (supportsFsVerity) {
- auto existing_cert = verifyExistingCert(*key);
+ auto existing_cert = verifyExistingRootCert(*key);
if (!existing_cert.ok()) {
LOG(WARNING) << existing_cert.error().message();
// Try to create a new cert
- auto new_cert = createX509Cert(*key, kSigningKeyCert);
+ auto new_cert = createX509RootCert(*key, kSigningKeyCert);
if (!new_cert.ok()) {
LOG(ERROR) << "Failed to create X509 certificate: " << new_cert.error().message();
// TODO apparently the key become invalid - delete the blob / cert
@@ -340,7 +361,7 @@
} else {
LOG(INFO) << "Found and verified existing public key certificate: " << kSigningKeyCert;
}
- auto cert_add_result = addCertToFsVerityKeyring(kSigningKeyCert);
+ auto cert_add_result = addCertToFsVerityKeyring(kSigningKeyCert, "fsv_ods");
if (!cert_add_result.ok()) {
LOG(ERROR) << "Failed to add certificate to fs-verity keyring: "
<< cert_add_result.error().message();
@@ -348,6 +369,27 @@
}
}
+ if (supportsCompOs) {
+ auto compos_key = extractPublicKeyFromLeafCert(*key, kCompOsCert, "CompOS");
+ if (compos_key.ok()) {
+ auto cert_add_result = addCertToFsVerityKeyring(kCompOsCert, "fsv_compos");
+ if (cert_add_result.ok()) {
+ LOG(INFO) << "Added CompOs key to fs-verity keyring";
+ } else {
+ LOG(ERROR) << "Failed to add CompOs certificate to fs-verity keyring: "
+ << cert_add_result.error().message();
+ // TODO - what do we do now?
+ // return -1;
+ }
+ } else {
+ LOG(ERROR) << "Failed to retrieve key from CompOs certificate: "
+ << compos_key.error().message();
+ // Best efforts only - nothing we can do if deletion fails.
+ unlink(kCompOsCert.c_str());
+ // TODO - what do we do now?
+ }
+ }
+
art::odrefresh::ExitCode odrefresh_status = compileArtifacts(kForceCompilation);
if (odrefresh_status == art::odrefresh::ExitCode::kOkay) {
LOG(INFO) << "odrefresh said artifacts are VALID";
diff --git a/provisioner/Android.bp b/provisioner/Android.bp
index d3f06fe..12a21d1 100644
--- a/provisioner/Android.bp
+++ b/provisioner/Android.bp
@@ -51,3 +51,19 @@
"android.security.provisioner-java",
],
}
+
+cc_binary {
+ name: "rkp_factory_extraction_tool",
+ srcs: ["rkp_factory_extraction_tool.cpp"],
+ shared_libs: [
+ "android.hardware.security.keymint-V1-ndk_platform",
+ "libbinder",
+ "libbinder_ndk",
+ "libcppbor_external",
+ "libcppcose_rkp",
+ "libcrypto",
+ "liblog",
+ "libvintf",
+ ],
+ //export_include_dirs: ["include"],
+}
diff --git a/provisioner/rkp_factory_extraction_tool.cpp b/provisioner/rkp_factory_extraction_tool.cpp
new file mode 100644
index 0000000..d4842b1
--- /dev/null
+++ b/provisioner/rkp_factory_extraction_tool.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+#include <vector>
+
+#include <aidl/android/hardware/security/keymint/IRemotelyProvisionedComponent.h>
+#include <android/binder_manager.h>
+#include <cppbor.h>
+#include <keymaster/cppcose/cppcose.h>
+#include <log/log.h>
+#include <vintf/VintfObject.h>
+
+using std::set;
+using std::string;
+using std::vector;
+
+using aidl::android::hardware::security::keymint::DeviceInfo;
+using aidl::android::hardware::security::keymint::IRemotelyProvisionedComponent;
+using aidl::android::hardware::security::keymint::MacedPublicKey;
+using aidl::android::hardware::security::keymint::ProtectedData;
+
+using android::vintf::HalManifest;
+using android::vintf::VintfObject;
+
+using namespace cppbor;
+using namespace cppcose;
+
+namespace {
+
+const string kPackage = "android.hardware.security.keymint";
+const string kInterface = "IRemotelyProvisionedComponent";
+const string kFormattedName = kPackage + "." + kInterface + "/";
+
+ErrMsgOr<vector<uint8_t>> generateEekChain(size_t length, const vector<uint8_t>& eekId) {
+ auto eekChain = cppbor::Array();
+
+ vector<uint8_t> prevPrivKey;
+ for (size_t i = 0; i < length - 1; ++i) {
+ vector<uint8_t> pubKey(ED25519_PUBLIC_KEY_LEN);
+ vector<uint8_t> privKey(ED25519_PRIVATE_KEY_LEN);
+
+ ED25519_keypair(pubKey.data(), privKey.data());
+
+ // The first signing key is self-signed.
+ if (prevPrivKey.empty()) prevPrivKey = privKey;
+
+ auto coseSign1 = constructCoseSign1(prevPrivKey,
+ cppbor::Map() /* payload CoseKey */
+ .add(CoseKey::KEY_TYPE, OCTET_KEY_PAIR)
+ .add(CoseKey::ALGORITHM, EDDSA)
+ .add(CoseKey::CURVE, ED25519)
+ .add(CoseKey::PUBKEY_X, pubKey)
+ .canonicalize()
+ .encode(),
+ {} /* AAD */);
+ if (!coseSign1) return coseSign1.moveMessage();
+ eekChain.add(coseSign1.moveValue());
+
+ prevPrivKey = privKey;
+ }
+
+ vector<uint8_t> pubKey(X25519_PUBLIC_VALUE_LEN);
+ vector<uint8_t> privKey(X25519_PRIVATE_KEY_LEN);
+ X25519_keypair(pubKey.data(), privKey.data());
+
+ auto coseSign1 = constructCoseSign1(prevPrivKey,
+ cppbor::Map() /* payload CoseKey */
+ .add(CoseKey::KEY_TYPE, OCTET_KEY_PAIR)
+ .add(CoseKey::KEY_ID, eekId)
+ .add(CoseKey::ALGORITHM, ECDH_ES_HKDF_256)
+ .add(CoseKey::CURVE, cppcose::X25519)
+ .add(CoseKey::PUBKEY_X, pubKey)
+ .canonicalize()
+ .encode(),
+ {} /* AAD */);
+ if (!coseSign1) return coseSign1.moveMessage();
+ eekChain.add(coseSign1.moveValue());
+
+ return eekChain.encode();
+}
+
+std::vector<uint8_t> getChallenge() {
+ return std::vector<uint8_t>(0);
+}
+
+std::vector<uint8_t> composeCertificateRequest(ProtectedData&& protectedData,
+ DeviceInfo&& deviceInfo) {
+ Array emptyMacedKeysToSign;
+ emptyMacedKeysToSign
+ .add(std::vector<uint8_t>(0)) // empty protected headers as bstr
+ .add(Map()) // empty unprotected headers
+ .add(Null()) // nil for the payload
+ .add(std::vector<uint8_t>(0)); // empty tag as bstr
+ Array certificateRequest;
+ certificateRequest.add(EncodedItem(std::move(deviceInfo.deviceInfo)))
+ .add(getChallenge()) // fake challenge
+ .add(EncodedItem(std::move(protectedData.protectedData)))
+ .add(std::move(emptyMacedKeysToSign));
+ return certificateRequest.encode();
+}
+
+int32_t errorMsg(string name) {
+ std::cerr << "Failed for rkp instance: " << name;
+ return -1;
+}
+
+} // namespace
+
+int main() {
+ std::shared_ptr<const HalManifest> manifest = VintfObject::GetDeviceHalManifest();
+ set<string> rkpNames = manifest->getAidlInstances(kPackage, kInterface);
+ for (auto name : rkpNames) {
+ string fullName = kFormattedName + name;
+ if (!AServiceManager_isDeclared(fullName.c_str())) {
+ ALOGE("Could not find the following instance declared in the manifest: %s\n",
+ fullName.c_str());
+ return errorMsg(name);
+ }
+ AIBinder* rkpAiBinder = AServiceManager_getService(fullName.c_str());
+ ::ndk::SpAIBinder rkp_binder(rkpAiBinder);
+ auto rkp_service = IRemotelyProvisionedComponent::fromBinder(rkp_binder);
+ std::vector<uint8_t> keysToSignMac;
+ std::vector<MacedPublicKey> emptyKeys;
+
+ // Replace this eek chain generation with the actual production GEEK
+ std::vector<uint8_t> eekId(10); // replace with real KID later (EEK fingerprint)
+ auto eekOrErr = generateEekChain(3 /* chainlength */, eekId);
+ if (!eekOrErr) {
+ ALOGE("Failed to generate test EEK somehow: %s", eekOrErr.message().c_str());
+ return errorMsg(name);
+ }
+
+ std::vector<uint8_t> eek = eekOrErr.moveValue();
+ DeviceInfo deviceInfo;
+ ProtectedData protectedData;
+ if (rkp_service) {
+ ALOGE("extracting bundle");
+ ::ndk::ScopedAStatus status = rkp_service->generateCertificateRequest(
+ true /* testMode */, emptyKeys, eek, getChallenge(), &deviceInfo, &protectedData,
+ &keysToSignMac);
+ if (!status.isOk()) {
+ ALOGE("Bundle extraction failed. Error code: %d", status.getServiceSpecificError());
+ return errorMsg(name);
+ }
+ std::cout << "\n";
+ std::vector<uint8_t> certificateRequest =
+ composeCertificateRequest(std::move(protectedData), std::move(deviceInfo));
+ std::copy(certificateRequest.begin(), certificateRequest.end(),
+ std::ostream_iterator<char>(std::cout));
+ }
+ }
+}