Merge "verityutils: fix a memory leak"
diff --git a/Android.bp b/Android.bp
index b44c296..4a0253c 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1 +1,31 @@
+package {
+ default_applicable_licenses: ["system_security_license"],
+}
+
+// Added automatically by a large-scale-change that took the approach of
+// 'apply every license found to every target'. While this makes sure we respect
+// every license restriction, it may not be entirely correct.
+//
+// e.g. GPL in an MIT project might only apply to the contrib/ directory.
+//
+// Please consider splitting the single license below into multiple licenses,
+// taking care not to lose any license_kind information, and overriding the
+// default license using the 'licenses: [...]' property on targets as needed.
+//
+// For unused files, consider creating a 'fileGroup' with "//visibility:private"
+// to attach the license to, and including a comment whether the files may be
+// used in the current project.
+// See: http://go/android-license-faq
+license {
+ name: "system_security_license",
+ visibility: [":__subpackages__"],
+ license_kinds: [
+ "SPDX-license-identifier-Apache-2.0",
+ "SPDX-license-identifier-BSD",
+ ],
+ license_text: [
+ "NOTICE",
+ ],
+}
+
subdirs = ["*"]
diff --git a/fsverity_init/Android.bp b/fsverity_init/Android.bp
index 3c9ade0..39d4e6b 100644
--- a/fsverity_init/Android.bp
+++ b/fsverity_init/Android.bp
@@ -1,3 +1,12 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_binary {
name: "fsverity_init",
srcs: [
diff --git a/identity/Android.bp b/identity/Android.bp
index e6d77c8..ed8ff2f 100644
--- a/identity/Android.bp
+++ b/identity/Android.bp
@@ -1,3 +1,12 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_defaults {
name: "identity_defaults",
cflags: [
diff --git a/keystore-engine/Android.bp b/keystore-engine/Android.bp
index 6512c66..b8127d3 100644
--- a/keystore-engine/Android.bp
+++ b/keystore-engine/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-BSD
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_library_shared {
name: "libkeystore-engine",
diff --git a/keystore/Android.bp b/keystore/Android.bp
index 45b721b..7278cee 100644
--- a/keystore/Android.bp
+++ b/keystore/Android.bp
@@ -1,3 +1,13 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ // SPDX-license-identifier-BSD
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_defaults {
name: "keystore_defaults",
diff --git a/keystore/tests/Android.bp b/keystore/tests/Android.bp
index 883e020..327eb93 100644
--- a/keystore/tests/Android.bp
+++ b/keystore/tests/Android.bp
@@ -1,5 +1,14 @@
// Unit test for AuthTokenTable
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_test {
cflags: [
"-Wall",
diff --git a/keystore2/Android.bp b/keystore2/Android.bp
index 0a5fb29..812d5e6 100644
--- a/keystore2/Android.bp
+++ b/keystore2/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
rust_library {
name: "libkeystore2",
crate_name: "keystore2",
@@ -24,6 +33,7 @@
"android.security.authorization-rust",
"android.security.compat-rust",
"android.security.remoteprovisioning-rust",
+ "android.security.usermanager-rust",
"android.system.keystore2-V1-rust",
"libanyhow",
"libbinder_rs",
@@ -66,6 +76,7 @@
"android.security.authorization-rust",
"android.security.compat-rust",
"android.security.remoteprovisioning-rust",
+ "android.security.usermanager-rust",
"android.system.keystore2-V1-rust",
"libandroid_logger",
"libanyhow",
diff --git a/keystore2/TEST_MAPPING b/keystore2/TEST_MAPPING
index 33d157e..d4e20de 100644
--- a/keystore2/TEST_MAPPING
+++ b/keystore2/TEST_MAPPING
@@ -4,9 +4,6 @@
"name": "keystore2_certificate_test"
},
{
- "name": "keystore2_km_compat_test"
- },
- {
"name": "keystore2_test"
}
]
diff --git a/keystore2/aaid/Android.bp b/keystore2/aaid/Android.bp
index 2329400..d27fdf6 100644
--- a/keystore2/aaid/Android.bp
+++ b/keystore2/aaid/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_library {
name: "libkeystore2_aaid",
srcs: [
diff --git a/keystore2/aidl/Android.bp b/keystore2/aidl/Android.bp
index 36cff16..d529fa9 100644
--- a/keystore2/aidl/Android.bp
+++ b/keystore2/aidl/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
aidl_interface {
name: "android.security.attestationmanager",
srcs: [ "android/security/attestationmanager/*.aidl", ],
@@ -105,3 +114,20 @@
},
},
}
+
+aidl_interface {
+ name: "android.security.usermanager",
+ srcs: [ "android/security/usermanager/*.aidl" ],
+ unstable: true,
+ backend: {
+ java: {
+ sdk_version: "module_current",
+ },
+ rust: {
+ enabled: true,
+ },
+ ndk: {
+ enabled: true,
+ }
+ },
+}
diff --git a/keystore2/aidl/android/security/usermanager/IKeystoreUserManager.aidl b/keystore2/aidl/android/security/usermanager/IKeystoreUserManager.aidl
new file mode 100644
index 0000000..3690b1c
--- /dev/null
+++ b/keystore2/aidl/android/security/usermanager/IKeystoreUserManager.aidl
@@ -0,0 +1,61 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package android.security.usermanager;
+
+// TODO: mark the interface with @SensitiveData when the annotation is ready (b/176110256).
+
+/**
+ * IKeystoreUserManager interface exposes the methods for adding/removing users and changing the
+ * user's password.
+ */
+interface IKeystoreUserManager {
+
+ /**
+ * Allows LockSettingsService to inform keystore about adding a new user.
+ * Callers require 'AddUser' permission.
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - if the callers do not have the 'AddUser' permission.
+ * `ResponseCode::SYSTEM_ERROR` - if failed to delete the keys of an existing user with the same
+ * user id.
+ *
+ * @param userId - Android user id
+ */
+ void onUserAdded(in int userId);
+
+ /**
+ * Allows LockSettingsService to inform keystore about removing a user.
+ * Callers require 'RemoveUser' permission.
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - if the callers do not have the 'RemoveUser' permission.
+ * `ResponseCode::SYSTEM_ERROR` - if failed to delete the keys of the user being deleted.
+ *
+ * @param userId - Android user id
+ */
+ void onUserRemoved(in int userId);
+
+ /**
+ * Allows LockSettingsService to inform keystore about password change of a user.
+ * Callers require 'ChangePassword' permission.
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - if the callers do not have the 'ChangePassword'
+ * permission.
+ * `ResponseCode::SYSTEM_ERROR` - if failed to delete the super encrypted keys of the user.
+ * `ResponseCode::Locked' - if the keystore is locked for the given user.
+ *
+ * @param userId - Android user id
+ * @param password - a secret derived from the synthetic password of the user
+ */
+ void onUserPasswordChanged(in int userId, in @nullable byte[] password);
+}
diff --git a/keystore2/apc_compat/Android.bp b/keystore2/apc_compat/Android.bp
index 405e9b8..9519c8e 100644
--- a/keystore2/apc_compat/Android.bp
+++ b/keystore2/apc_compat/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_library {
name: "libkeystore2_apc_compat",
srcs: [
diff --git a/keystore2/selinux/Android.bp b/keystore2/selinux/Android.bp
index acbf5ef..18063d3 100644
--- a/keystore2/selinux/Android.bp
+++ b/keystore2/selinux/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
rust_library {
name: "libkeystore2_selinux",
crate_name: "keystore2_selinux",
diff --git a/keystore2/src/async_task.rs b/keystore2/src/async_task.rs
index 6edd760..9732e79 100644
--- a/keystore2/src/async_task.rs
+++ b/keystore2/src/async_task.rs
@@ -19,9 +19,9 @@
//! processed all tasks before it terminates.
//! Note that low priority tasks are processed only when the high priority queue is empty.
-use std::time::Duration;
+use std::{any::Any, any::TypeId, time::Duration};
use std::{
- collections::VecDeque,
+ collections::{HashMap, VecDeque},
sync::Arc,
sync::{Condvar, Mutex, MutexGuard},
thread,
@@ -33,15 +33,74 @@
Running,
}
+/// The Shelf allows async tasks to store state across invocations.
+/// Note: Store elves at your own peril ;-).
+#[derive(Debug, Default)]
+pub struct Shelf(HashMap<TypeId, Box<dyn Any + Send>>);
+
+impl Shelf {
+ /// Get a reference to the shelved data of type T. Returns Some if the data exists.
+ pub fn get_downcast_ref<T: Any + Send>(&self) -> Option<&T> {
+ self.0.get(&TypeId::of::<T>()).and_then(|v| v.downcast_ref::<T>())
+ }
+
+ /// Get a mutable reference to the shelved data of type T. If a T was inserted using put,
+ /// get_mut, or get_or_put_with.
+ pub fn get_downcast_mut<T: Any + Send>(&mut self) -> Option<&mut T> {
+ self.0.get_mut(&TypeId::of::<T>()).and_then(|v| v.downcast_mut::<T>())
+ }
+
+ /// Remove the entry of the given type and returns the stored data if it existed.
+ pub fn remove_downcast_ref<T: Any + Send>(&mut self) -> Option<T> {
+ self.0.remove(&TypeId::of::<T>()).and_then(|v| v.downcast::<T>().ok().map(|b| *b))
+ }
+
+ /// Puts data `v` on the shelf. If there already was an entry of type T it is returned.
+ pub fn put<T: Any + Send>(&mut self, v: T) -> Option<T> {
+ self.0
+ .insert(TypeId::of::<T>(), Box::new(v) as Box<dyn Any + Send>)
+ .and_then(|v| v.downcast::<T>().ok().map(|b| *b))
+ }
+
+ /// Gets a mutable reference to the entry of the given type and default creates it if necessary.
+ /// The type must implement Default.
+ pub fn get_mut<T: Any + Send + Default>(&mut self) -> &mut T {
+ self.0
+ .entry(TypeId::of::<T>())
+ .or_insert_with(|| Box::new(T::default()) as Box<dyn Any + Send>)
+ .downcast_mut::<T>()
+ .unwrap()
+ }
+
+ /// Gets a mutable reference to the entry of the given type or creates it using the init
+ /// function. Init is not executed if the entry already existed.
+ pub fn get_or_put_with<T: Any + Send, F>(&mut self, init: F) -> &mut T
+ where
+ F: FnOnce() -> T,
+ {
+ self.0
+ .entry(TypeId::of::<T>())
+ .or_insert_with(|| Box::new(init()) as Box<dyn Any + Send>)
+ .downcast_mut::<T>()
+ .unwrap()
+ }
+}
+
struct AsyncTaskState {
state: State,
thread: Option<thread::JoinHandle<()>>,
- hi_prio_req: VecDeque<Box<dyn FnOnce() + Send>>,
- lo_prio_req: VecDeque<Box<dyn FnOnce() + Send>>,
+ hi_prio_req: VecDeque<Box<dyn FnOnce(&mut Shelf) + Send>>,
+ lo_prio_req: VecDeque<Box<dyn FnOnce(&mut Shelf) + Send>>,
+ /// The store allows tasks to store state across invocations. It is passed to each invocation
+ /// of each task. Tasks need to cooperate on the ids they use for storing state.
+ shelf: Option<Shelf>,
}
/// AsyncTask spawns one worker thread on demand to process jobs inserted into
-/// a low and a high priority work queue.
+/// a low and a high priority work queue. The queues are processed FIFO, and low
+/// priority queue is processed if the high priority queue is empty.
+/// Note: Because there is only one worker thread at a time for a given AsyncTask instance,
+/// all scheduled requests are guaranteed to be serialized with respect to one another.
pub struct AsyncTask {
state: Arc<(Condvar, Mutex<AsyncTaskState>)>,
}
@@ -56,6 +115,7 @@
thread: None,
hi_prio_req: VecDeque::new(),
lo_prio_req: VecDeque::new(),
+ shelf: None,
}),
)),
}
@@ -68,7 +128,7 @@
/// preempt them.
pub fn queue_hi<F>(&self, f: F)
where
- F: FnOnce() + Send + 'static,
+ F: for<'r> FnOnce(&'r mut Shelf) + Send + 'static,
{
self.queue(f, true)
}
@@ -79,14 +139,14 @@
/// priority jobs.
pub fn queue_lo<F>(&self, f: F)
where
- F: FnOnce() + Send + 'static,
+ F: FnOnce(&mut Shelf) + Send + 'static,
{
self.queue(f, false)
}
fn queue<F>(&self, f: F, hi_prio: bool)
where
- F: FnOnce() + Send + 'static,
+ F: for<'r> FnOnce(&'r mut Shelf) + Send + 'static,
{
let (ref condvar, ref state) = *self.state;
let mut state = state.lock().unwrap();
@@ -112,6 +172,8 @@
state.thread = Some(thread::spawn(move || {
let (ref condvar, ref state) = *cloned_state;
+ // When the worker starts, it takes the shelf and puts it on the stack.
+ let mut shelf = state.lock().unwrap().shelf.take().unwrap_or_default();
loop {
if let Some(f) = {
let (mut state, timeout) = condvar
@@ -129,13 +191,17 @@
(Some(f), _, _) => Some(f),
(None, false, _) => state.lo_prio_req.pop_front(),
(None, true, true) => {
+ // When the worker exits it puts the shelf back into the shared
+ // state for the next worker to use. So state is preserved not
+ // only across invocations but also across worker thread shut down.
+ state.shelf = Some(shelf);
state.state = State::Exiting;
break;
}
(None, true, false) => None,
}
} {
- f()
+ f(&mut shelf)
}
}
}));
diff --git a/keystore2/src/authorization.rs b/keystore2/src/authorization.rs
index e446e78..02b19c4 100644
--- a/keystore2/src/authorization.rs
+++ b/keystore2/src/authorization.rs
@@ -12,18 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//! This module implements IKeyAuthorization AIDL interface.
+//! This module implements IKeystoreAuthorization AIDL interface.
use crate::error::Error as KeystoreError;
use crate::error::map_or_log_err;
-use crate::globals::{DB, ENFORCEMENTS, LEGACY_BLOB_LOADER, SUPER_KEY};
+use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_MIGRATOR};
use crate::permission::KeystorePerm;
+use crate::super_key::UserState;
use crate::utils::check_keystore_permission;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- HardwareAuthToken::HardwareAuthToken, HardwareAuthenticatorType::HardwareAuthenticatorType,
-};
-use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
- Timestamp::Timestamp,
+ HardwareAuthToken::HardwareAuthToken,
};
use android_security_authorization::binder::{Interface, Result as BinderResult, Strong};
use android_security_authorization::aidl::android::security::authorization::IKeystoreAuthorization::{
@@ -50,16 +48,7 @@
//check keystore permission
check_keystore_permission(KeystorePerm::add_auth()).context("In add_auth_token.")?;
- //TODO: Keymint's HardwareAuthToken aidl needs to implement Copy/Clone
- let auth_token_copy = HardwareAuthToken {
- challenge: auth_token.challenge,
- userId: auth_token.userId,
- authenticatorId: auth_token.authenticatorId,
- authenticatorType: HardwareAuthenticatorType(auth_token.authenticatorType.0),
- timestamp: Timestamp { milliSeconds: auth_token.timestamp.milliSeconds },
- mac: auth_token.mac.clone(),
- };
- ENFORCEMENTS.add_auth_token(auth_token_copy)?;
+ ENFORCEMENTS.add_auth_token(auth_token.clone())?;
Ok(())
}
@@ -77,22 +66,22 @@
.context("In on_lock_screen_event: Unlock with password.")?;
ENFORCEMENTS.set_device_locked(user_id, false);
// Unlock super key.
- DB.with::<_, Result<()>>(|db| {
- let mut db = db.borrow_mut();
- //TODO - b/176123105 - Once the user management API is implemented, unlock is
- //allowed only if the user is added. Then the two tasks handled by the
- //unlock_user_key will be split into two methods. For now, unlock_user_key
- //method is used as it is, which created a super key for the user if one does
- //not exists, in addition to unlocking the existing super key of the user/
- SUPER_KEY.unlock_user_key(
- user_id as u32,
- user_password,
- &mut db,
- &LEGACY_BLOB_LOADER,
- )?;
- Ok(())
- })
- .context("In on_lock_screen_event.")?;
+ if let UserState::Uninitialized = DB
+ .with(|db| {
+ UserState::get_with_password_unlock(
+ &mut db.borrow_mut(),
+ &LEGACY_MIGRATOR,
+ &SUPER_KEY,
+ user_id as u32,
+ user_password,
+ )
+ })
+ .context("In on_lock_screen_event: Unlock with password.")?
+ {
+ log::info!(
+ "In on_lock_screen_event. Trying to unlock when LSKF is uninitialized."
+ );
+ }
Ok(())
}
diff --git a/keystore2/src/crypto/Android.bp b/keystore2/src/crypto/Android.bp
index 9ecd823..e386735 100644
--- a/keystore2/src/crypto/Android.bp
+++ b/keystore2/src/crypto/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
rust_library {
name: "libkeystore2_crypto_rust",
crate_name: "keystore2_crypto",
@@ -65,6 +74,7 @@
"--whitelist-function", "ECPOINTOct2Point",
"--whitelist-function", "EC_KEY_free",
"--whitelist-function", "EC_POINT_free",
+ "--whitelist-function", "extractSubjectFromCertificate",
"--whitelist-type", "EC_KEY",
"--whitelist-type", "EC_POINT",
"--whitelist-var", "EC_MAX_BYTES",
diff --git a/keystore2/src/crypto/crypto.cpp b/keystore2/src/crypto/crypto.cpp
index 3cc19c5..2e613fd 100644
--- a/keystore2/src/crypto/crypto.cpp
+++ b/keystore2/src/crypto/crypto.cpp
@@ -26,6 +26,7 @@
#include <openssl/evp.h>
#include <openssl/hkdf.h>
#include <openssl/rand.h>
+#include <openssl/x509.h>
#include <vector>
@@ -261,3 +262,42 @@
}
return point;
}
+
+int extractSubjectFromCertificate(const uint8_t* cert_buf, size_t cert_len, uint8_t* subject_buf,
+ size_t subject_buf_len) {
+ if (!cert_buf || !subject_buf) {
+ ALOGE("extractSubjectFromCertificate: received null pointer");
+ return 0;
+ }
+
+ const uint8_t* p = cert_buf;
+ bssl::UniquePtr<X509> cert(d2i_X509(nullptr /* Allocate X509 struct */, &p, cert_len));
+ if (!cert) {
+ ALOGE("extractSubjectFromCertificate: failed to parse certificate");
+ return 0;
+ }
+
+ X509_NAME* subject = X509_get_subject_name(cert.get());
+ if (!subject) {
+ ALOGE("extractSubjectFromCertificate: failed to retrieve subject name");
+ return 0;
+ }
+
+ int subject_len = i2d_X509_NAME(subject, nullptr /* Don't copy the data */);
+ if (subject_len < 0) {
+ ALOGE("extractSubjectFromCertificate: error obtaining encoded subject name length");
+ return 0;
+ }
+
+ if (subject_len > subject_buf_len) {
+ // Return the subject length, negated, so the caller knows how much
+ // buffer space is required.
+ ALOGI("extractSubjectFromCertificate: needed %d bytes for subject, caller provided %zu",
+ subject_len, subject_buf_len);
+ return -subject_len;
+ }
+
+ // subject_buf has enough space.
+ uint8_t* tmp = subject_buf;
+ return i2d_X509_NAME(subject, &tmp);
+}
diff --git a/keystore2/src/crypto/crypto.hpp b/keystore2/src/crypto/crypto.hpp
index 9bd7758..1b8971f 100644
--- a/keystore2/src/crypto/crypto.hpp
+++ b/keystore2/src/crypto/crypto.hpp
@@ -60,6 +60,29 @@
size_t ECPOINTPoint2Oct(const EC_POINT *point, uint8_t *buf, size_t len);
EC_POINT* ECPOINTOct2Point(const uint8_t *buf, size_t len);
+
}
+// Parse a DER-encoded X.509 certificate contained in cert_buf, with length
+// cert_len, extract the subject, DER-encode it and write the result to
+// subject_buf, which has subject_buf_len capacity.
+//
+// Because the length of the issuer is unknown, and becaue we'd like to (a) be
+// able to handle subjects of any size and (b) avoid parsing the certificate
+// twice most of the time, once to discover the length and once to parse it, the
+// return value is overloaded.
+//
+// If the return value > 0 it specifies the number of bytes written into
+// subject_buf; the operation was successful.
+//
+// If the return value == 0, certificate parsing failed unrecoverably. The
+// reason will be logged.
+//
+// If the return value < 0, the operation failed because the subject size >
+// subject_buf_len. The return value is -(subject_size), where subject_size is
+// the size of the extracted DER-encoded subject field. Call
+// extractSubjectFromCertificate again with a sufficiently-large buffer.
+int extractSubjectFromCertificate(const uint8_t* cert_buf, size_t cert_len,
+ uint8_t* subject_buf, size_t subject_buf_len);
+
#endif // __CRYPTO_H__
diff --git a/keystore2/src/crypto/error.rs b/keystore2/src/crypto/error.rs
index 1e84fc6..1eec321 100644
--- a/keystore2/src/crypto/error.rs
+++ b/keystore2/src/crypto/error.rs
@@ -85,4 +85,8 @@
/// This is returned if the C implementation of ECPOINTOct2Point returned null.
#[error("Failed to convert oct to point.")]
ECOct2PointFailed,
+
+ /// This is returned if the C implementation of extractSubjectFromCertificate failed.
+ #[error("Failed to extract certificate subject.")]
+ ExtractSubjectFailed,
}
diff --git a/keystore2/src/crypto/lib.rs b/keystore2/src/crypto/lib.rs
index 92b257c..f23778c 100644
--- a/keystore2/src/crypto/lib.rs
+++ b/keystore2/src/crypto/lib.rs
@@ -19,11 +19,12 @@
mod zvec;
pub use error::Error;
use keystore2_crypto_bindgen::{
- generateKeyFromPassword, randomBytes, AES_gcm_decrypt, AES_gcm_encrypt, ECDHComputeKey,
- ECKEYDeriveFromSecret, ECKEYGenerateKey, ECPOINTOct2Point, ECPOINTPoint2Oct, EC_KEY_free,
- EC_KEY_get0_public_key, EC_POINT_free, HKDFExpand, HKDFExtract, EC_KEY, EC_MAX_BYTES, EC_POINT,
- EVP_MAX_MD_SIZE,
+ extractSubjectFromCertificate, generateKeyFromPassword, randomBytes, AES_gcm_decrypt,
+ AES_gcm_encrypt, ECDHComputeKey, ECKEYDeriveFromSecret, ECKEYGenerateKey, ECPOINTOct2Point,
+ ECPOINTPoint2Oct, EC_KEY_free, EC_KEY_get0_public_key, EC_POINT_free, HKDFExpand, HKDFExtract,
+ EC_KEY, EC_MAX_BYTES, EC_POINT, EVP_MAX_MD_SIZE,
};
+use std::convert::TryFrom;
use std::convert::TryInto;
use std::marker::PhantomData;
pub use zvec::ZVec;
@@ -353,6 +354,59 @@
Ok(OwnedECPoint(result))
}
+/// Uses BoringSSL to extract the DER-encoded issuer subject from a
+/// DER-encoded X.509 certificate.
+pub fn parse_issuer_subject_from_certificate(cert_buf: &[u8]) -> Result<Vec<u8>, Error> {
+ // Try with a 200-byte output buffer, should be enough in all but bizarre cases.
+ let mut retval = vec![0; 200];
+ let mut size = unsafe {
+ extractSubjectFromCertificate(
+ cert_buf.as_ptr(),
+ cert_buf.len(),
+ retval.as_mut_ptr(),
+ retval.len(),
+ )
+ };
+
+ if size == 0 {
+ return Err(Error::ExtractSubjectFailed);
+ }
+
+ if size < 0 {
+ // Our buffer wasn't big enough. Make one that is just the right size and try again.
+ let negated_size = usize::try_from(-size);
+ retval = match negated_size.ok() {
+ None => return Err(Error::ExtractSubjectFailed),
+ Some(size) => vec![0; size],
+ };
+
+ size = unsafe {
+ extractSubjectFromCertificate(
+ cert_buf.as_ptr(),
+ cert_buf.len(),
+ retval.as_mut_ptr(),
+ retval.len(),
+ )
+ };
+
+ if size <= 0 {
+ return Err(Error::ExtractSubjectFailed);
+ }
+ }
+
+ // Reduce buffer size to the amount written.
+ let safe_size = usize::try_from(size);
+ retval.resize(
+ match safe_size.ok() {
+ None => return Err(Error::ExtractSubjectFailed),
+ Some(size) => size,
+ },
+ 0,
+ );
+
+ Ok(retval)
+}
+
#[cfg(test)]
mod tests {
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index 3789d28..3217857 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -41,12 +41,15 @@
//! from the database module these functions take permission check
//! callbacks.
-use crate::db_utils::{self, SqlField};
use crate::error::{Error as KsError, ErrorCode, ResponseCode};
use crate::impl_metadata; // This is in db_utils.rs
use crate::key_parameter::{KeyParameter, Tag};
use crate::permission::KeyPermSet;
-use crate::utils::get_current_time_in_seconds;
+use crate::utils::{get_current_time_in_seconds, AID_USER_OFFSET};
+use crate::{
+ db_utils::{self, SqlField},
+ gc::Gc,
+};
use anyhow::{anyhow, Context, Result};
use std::{convert::TryFrom, convert::TryInto, ops::Deref, time::SystemTimeError};
@@ -95,17 +98,7 @@
/// A metadata entry for key entries.
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
pub enum KeyMetaEntry {
- /// If present, indicates that the sensitive part of key
- /// is encrypted with another key or a key derived from a password.
- EncryptedBy(EncryptedBy) with accessor encrypted_by,
- /// If the blob is password encrypted this field is set to the
- /// salt used for the key derivation.
- Salt(Vec<u8>) with accessor salt,
- /// If the blob is encrypted, this field is set to the initialization vector.
- Iv(Vec<u8>) with accessor iv,
- /// If the blob is encrypted, this field holds the AEAD TAG.
- AeadTag(Vec<u8>) with accessor aead_tag,
- /// Creation date of a the key entry.
+ /// Date of the creation of the key entry.
CreationDate(DateTime) with accessor creation_date,
/// Expiration date for attestation keys.
AttestationExpirationDate(DateTime) with accessor attestation_expiration_date,
@@ -151,7 +144,7 @@
fn store_in_db(&self, key_id: i64, tx: &Transaction) -> Result<()> {
let mut stmt = tx
.prepare(
- "INSERT into persistent.keymetadata (keyentryid, tag, data)
+ "INSERT or REPLACE INTO persistent.keymetadata (keyentryid, tag, data)
VALUES (?, ?, ?);",
)
.context("In KeyMetaData::store_in_db: Failed to prepare statement.")?;
@@ -166,6 +159,76 @@
}
}
+impl_metadata!(
+ /// A set of metadata for key blobs.
+ #[derive(Debug, Default, Eq, PartialEq)]
+ pub struct BlobMetaData;
+ /// A metadata entry for key blobs.
+ #[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
+ pub enum BlobMetaEntry {
+ /// If present, indicates that the blob is encrypted with another key or a key derived
+ /// from a password.
+ EncryptedBy(EncryptedBy) with accessor encrypted_by,
+ /// If the blob is password encrypted this field is set to the
+ /// salt used for the key derivation.
+ Salt(Vec<u8>) with accessor salt,
+ /// If the blob is encrypted, this field is set to the initialization vector.
+ Iv(Vec<u8>) with accessor iv,
+ /// If the blob is encrypted, this field holds the AEAD TAG.
+ AeadTag(Vec<u8>) with accessor aead_tag,
+ /// The uuid of the owning KeyMint instance.
+ KmUuid(Uuid) with accessor km_uuid,
+ // --- ADD NEW META DATA FIELDS HERE ---
+ // For backwards compatibility add new entries only to
+ // end of this list and above this comment.
+ };
+);
+
+impl BlobMetaData {
+ fn load_from_db(blob_id: i64, tx: &Transaction) -> Result<Self> {
+ let mut stmt = tx
+ .prepare(
+ "SELECT tag, data from persistent.blobmetadata
+ WHERE blobentryid = ?;",
+ )
+ .context("In BlobMetaData::load_from_db: prepare statement failed.")?;
+
+ let mut metadata: HashMap<i64, BlobMetaEntry> = Default::default();
+
+ let mut rows =
+ stmt.query(params![blob_id]).context("In BlobMetaData::load_from_db: query failed.")?;
+ db_utils::with_rows_extract_all(&mut rows, |row| {
+ let db_tag: i64 = row.get(0).context("Failed to read tag.")?;
+ metadata.insert(
+ db_tag,
+ BlobMetaEntry::new_from_sql(db_tag, &SqlField::new(1, &row))
+ .context("Failed to read BlobMetaEntry.")?,
+ );
+ Ok(())
+ })
+ .context("In BlobMetaData::load_from_db.")?;
+
+ Ok(Self { data: metadata })
+ }
+
+ fn store_in_db(&self, blob_id: i64, tx: &Transaction) -> Result<()> {
+ let mut stmt = tx
+ .prepare(
+ "INSERT or REPLACE INTO persistent.blobmetadata (blobentryid, tag, data)
+ VALUES (?, ?, ?);",
+ )
+ .context("In BlobMetaData::store_in_db: Failed to prepare statement.")?;
+
+ let iter = self.data.iter();
+ for (tag, entry) in iter {
+ stmt.insert(params![blob_id, tag, entry,]).with_context(|| {
+ format!("In BlobMetaData::store_in_db: Failed to insert {:?}", entry)
+ })?;
+ }
+ Ok(())
+ }
+}
+
/// Indicates the type of the keyentry.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub enum KeyType {
@@ -531,7 +594,7 @@
#[derive(Debug, Default, Eq, PartialEq)]
pub struct KeyEntry {
id: i64,
- km_blob: Option<Vec<u8>>,
+ key_blob_info: Option<(Vec<u8>, BlobMetaData)>,
cert: Option<Vec<u8>>,
cert_chain: Option<Vec<u8>>,
km_uuid: Uuid,
@@ -546,12 +609,12 @@
self.id
}
/// Exposes the optional KeyMint blob.
- pub fn km_blob(&self) -> &Option<Vec<u8>> {
- &self.km_blob
+ pub fn key_blob_info(&self) -> &Option<(Vec<u8>, BlobMetaData)> {
+ &self.key_blob_info
}
- /// Extracts the Optional KeyMint blob.
- pub fn take_km_blob(&mut self) -> Option<Vec<u8>> {
- self.km_blob.take()
+ /// Extracts the Optional KeyMint blob including its metadata.
+ pub fn take_key_blob_info(&mut self) -> Option<(Vec<u8>, BlobMetaData)> {
+ self.key_blob_info.take()
}
/// Exposes the optional public certificate.
pub fn cert(&self) -> &Option<Vec<u8>> {
@@ -590,6 +653,10 @@
pub fn pure_cert(&self) -> bool {
self.pure_cert
}
+ /// Consumes this key entry and extracts the keyparameters and metadata from it.
+ pub fn into_key_parameters_and_metadata(self) -> (Vec<KeyParameter>, KeyMetaData) {
+ (self.parameters, self.metadata)
+ }
}
/// Indicates the sub component of a key entry for persistent storage.
@@ -616,10 +683,39 @@
}
}
+/// This trait is private to the database module. It is used to convey whether or not the garbage
+/// collector shall be invoked after a database access. All closures passed to
+/// `KeystoreDB::with_transaction` return a tuple (bool, T) where the bool indicates if the
+/// gc needs to be triggered. This convenience function allows to turn any anyhow::Result<T>
+/// into anyhow::Result<(bool, T)> by simply appending one of `.do_gc(bool)`, `.no_gc()`, or
+/// `.need_gc()`.
+trait DoGc<T> {
+ fn do_gc(self, need_gc: bool) -> Result<(bool, T)>;
+
+ fn no_gc(self) -> Result<(bool, T)>;
+
+ fn need_gc(self) -> Result<(bool, T)>;
+}
+
+impl<T> DoGc<T> for Result<T> {
+ fn do_gc(self, need_gc: bool) -> Result<(bool, T)> {
+ self.map(|r| (need_gc, r))
+ }
+
+ fn no_gc(self) -> Result<(bool, T)> {
+ self.do_gc(false)
+ }
+
+ fn need_gc(self) -> Result<(bool, T)> {
+ self.do_gc(true)
+ }
+}
+
/// KeystoreDB wraps a connection to an SQLite database and tracks its
/// ownership. It also implements all of Keystore 2.0's database functionality.
pub struct KeystoreDB {
conn: Connection,
+ gc: Option<Gc>,
}
/// Database representation of the monotonic time retrieved from the system call clock_gettime with
@@ -700,6 +796,9 @@
impl KeystoreDB {
const PERBOOT_DB_FILE_NAME: &'static str = &"file:perboot.sqlite?mode=memory&cache=shared";
+ /// The alias of the user super key.
+ pub const USER_SUPER_KEY_ALIAS: &'static str = &"USER_SUPER_KEY";
+
/// This creates a PerBootDbKeepAlive object to keep the per boot database alive.
pub fn keep_perboot_db_alive() -> Result<PerBootDbKeepAlive> {
let conn = Connection::open_in_memory()
@@ -715,7 +814,7 @@
/// It also attempts to initialize all of the tables.
/// KeystoreDB cannot be used by multiple threads.
/// Each thread should open their own connection using `thread_local!`.
- pub fn new(db_root: &Path) -> Result<Self> {
+ pub fn new(db_root: &Path, gc: Option<Gc>) -> Result<Self> {
// Build the path to the sqlite file.
let mut persistent_path = db_root.to_path_buf();
persistent_path.push("persistent.sqlite");
@@ -729,9 +828,9 @@
// On busy fail Immediately. It is unlikely to succeed given a bug in sqlite.
conn.busy_handler(None).context("In KeystoreDB::new: Failed to set busy handler.")?;
- let mut db = Self { conn };
+ let mut db = Self { conn, gc };
db.with_transaction(TransactionBehavior::Immediate, |tx| {
- Self::init_tables(tx).context("Trying to initialize tables.")
+ Self::init_tables(tx).context("Trying to initialize tables.").no_gc()
})?;
Ok(db)
}
@@ -782,6 +881,24 @@
.context("Failed to create index blobentry_keyentryid_index.")?;
tx.execute(
+ "CREATE TABLE IF NOT EXISTS persistent.blobmetadata (
+ id INTEGER PRIMARY KEY,
+ blobentryid INTEGER,
+ tag INTEGER,
+ data ANY,
+ UNIQUE (blobentryid, tag));",
+ NO_PARAMS,
+ )
+ .context("Failed to initialize \"blobmetadata\" table.")?;
+
+ tx.execute(
+ "CREATE INDEX IF NOT EXISTS persistent.blobmetadata_blobentryid_index
+ ON blobmetadata(blobentryid);",
+ NO_PARAMS,
+ )
+ .context("Failed to create index blobmetadata_blobentryid_index.")?;
+
+ tx.execute(
"CREATE TABLE IF NOT EXISTS persistent.keyparameter (
keyentryid INTEGER,
tag INTEGER,
@@ -802,7 +919,8 @@
"CREATE TABLE IF NOT EXISTS persistent.keymetadata (
keyentryid INTEGER,
tag INTEGER,
- data ANY);",
+ data ANY,
+ UNIQUE (keyentryid, tag));",
NO_PARAMS,
)
.context("Failed to initialize \"keymetadata\" table.")?;
@@ -894,77 +1012,73 @@
Ok(conn)
}
- /// Get one unreferenced key. There is no particular order in which the keys are returned.
- fn get_unreferenced_key_id(tx: &Transaction) -> Result<Option<i64>> {
- tx.query_row(
- "SELECT id FROM persistent.keyentry WHERE state = ?",
- params![KeyLifeCycle::Unreferenced],
- |row| row.get(0),
- )
- .optional()
- .context("In get_unreferenced_key_id: Trying to get unreferenced key id.")
- }
-
- /// Returns a key id guard and key entry for one unreferenced key entry. Of the optional
- /// fields of the key entry only the km_blob field will be populated. This is required
- /// to subject the blob to its KeyMint instance for deletion.
- pub fn get_unreferenced_key(&mut self) -> Result<Option<(KeyIdGuard, KeyEntry)>> {
- self.with_transaction(TransactionBehavior::Deferred, |tx| {
- let key_id = match Self::get_unreferenced_key_id(tx)
- .context("Trying to get unreferenced key id")?
- {
- None => return Ok(None),
- Some(id) => KEY_ID_LOCK.try_get(id).ok_or_else(KsError::sys).context(concat!(
- "A key id lock was held for an unreferenced key. ",
- "This should never happen."
- ))?,
- };
- let key_entry = Self::load_key_components(tx, KeyEntryLoadBits::KM, key_id.id())
- .context("Trying to get key components.")?;
- Ok(Some((key_id, key_entry)))
- })
- .context("In get_unreferenced_key.")
- }
-
- /// This function purges all remnants of a key entry from the database.
- /// Important: This does not check if the key was unreferenced, nor does it
- /// subject the key to its KeyMint instance for permanent invalidation.
- /// This function should only be called by the garbage collector.
- /// To delete a key call `mark_unreferenced`, which transitions the key to the unreferenced
- /// state, deletes all grants to the key, and notifies the garbage collector.
- /// The garbage collector will:
- /// 1. Call get_unreferenced_key.
- /// 2. Determine the proper way to dispose of sensitive key material, e.g., call
- /// `KeyMintDevice::delete()`.
- /// 3. Call `purge_key_entry`.
- pub fn purge_key_entry(&mut self, key_id: KeyIdGuard) -> Result<()> {
+ /// This function is intended to be used by the garbage collector.
+ /// It deletes the blob given by `blob_id_to_delete`. It then tries to find a superseded
+ /// key blob that might need special handling by the garbage collector.
+ /// If no further superseded blobs can be found it deletes all other superseded blobs that don't
+ /// need special handling and returns None.
+ pub fn handle_next_superseded_blob(
+ &mut self,
+ blob_id_to_delete: Option<i64>,
+ ) -> Result<Option<(i64, Vec<u8>, BlobMetaData)>> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute("DELETE FROM persistent.keyentry WHERE id = ?;", params![key_id.id()])
- .context("Trying to delete keyentry.")?;
- tx.execute(
- "DELETE FROM persistent.blobentry WHERE keyentryid = ?;",
- params![key_id.id()],
- )
- .context("Trying to delete blobentries.")?;
- tx.execute(
- "DELETE FROM persistent.keymetadata WHERE keyentryid = ?;",
- params![key_id.id()],
- )
- .context("Trying to delete keymetadata.")?;
- tx.execute(
- "DELETE FROM persistent.keyparameter WHERE keyentryid = ?;",
- params![key_id.id()],
- )
- .context("Trying to delete keyparameters.")?;
- let grants_deleted = tx
- .execute("DELETE FROM persistent.grant WHERE keyentryid = ?;", params![key_id.id()])
- .context("Trying to delete grants.")?;
- if grants_deleted != 0 {
- log::error!("Purged key that still had grants. This should not happen.");
+ // Delete the given blob if one was given.
+ if let Some(blob_id_to_delete) = blob_id_to_delete {
+ tx.execute(
+ "DELETE FROM persistent.blobmetadata WHERE blobentryid = ?;",
+ params![blob_id_to_delete],
+ )
+ .context("Trying to delete blob metadata.")?;
+ tx.execute(
+ "DELETE FROM persistent.blobentry WHERE id = ?;",
+ params![blob_id_to_delete],
+ )
+ .context("Trying to blob.")?;
}
- Ok(())
+
+ // Find another superseded keyblob load its metadata and return it.
+ if let Some((blob_id, blob)) = tx
+ .query_row(
+ "SELECT id, blob FROM persistent.blobentry
+ WHERE subcomponent_type = ?
+ AND (
+ id NOT IN (
+ SELECT MAX(id) FROM persistent.blobentry
+ WHERE subcomponent_type = ?
+ GROUP BY keyentryid, subcomponent_type
+ )
+ OR keyentryid NOT IN (SELECT id FROM persistent.keyentry)
+ );",
+ params![SubComponentType::KEY_BLOB, SubComponentType::KEY_BLOB],
+ |row| Ok((row.get(0)?, row.get(1)?)),
+ )
+ .optional()
+ .context("Trying to query superseded blob.")?
+ {
+ let blob_metadata = BlobMetaData::load_from_db(blob_id, tx)
+ .context("Trying to load blob metadata.")?;
+ return Ok(Some((blob_id, blob, blob_metadata))).no_gc();
+ }
+
+ // We did not find any superseded key blob, so let's remove other superseded blob in
+ // one transaction.
+ tx.execute(
+ "DELETE FROM persistent.blobentry
+ WHERE NOT subcomponent_type = ?
+ AND (
+ id NOT IN (
+ SELECT MAX(id) FROM persistent.blobentry
+ WHERE NOT subcomponent_type = ?
+ GROUP BY keyentryid, subcomponent_type
+ ) OR keyentryid NOT IN (SELECT id FROM persistent.keyentry)
+ );",
+ params![SubComponentType::KEY_BLOB, SubComponentType::KEY_BLOB],
+ )
+ .context("Trying to purge superseded blobs.")?;
+
+ Ok(None).no_gc()
})
- .context("In purge_key_entry.")
+ .context("In handle_next_superseded_blob.")
}
/// This maintenance function should be called only once before the database is used for the
@@ -982,10 +1096,103 @@
params![KeyLifeCycle::Unreferenced, KeyLifeCycle::Existing],
)
.context("Failed to execute query.")
+ .need_gc()
})
.context("In cleanup_leftovers.")
}
+ /// Checks if a key exists with given key type and key descriptor properties.
+ pub fn key_exists(
+ &mut self,
+ domain: Domain,
+ nspace: i64,
+ alias: &str,
+ key_type: KeyType,
+ ) -> Result<bool> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let key_descriptor =
+ KeyDescriptor { domain, nspace, alias: Some(alias.to_string()), blob: None };
+ let result = Self::load_key_entry_id(&tx, &key_descriptor, key_type);
+ match result {
+ Ok(_) => Ok(true),
+ Err(error) => match error.root_cause().downcast_ref::<KsError>() {
+ Some(KsError::Rc(ResponseCode::KEY_NOT_FOUND)) => Ok(false),
+ _ => Err(error).context("In key_exists: Failed to find if the key exists."),
+ },
+ }
+ .no_gc()
+ })
+ .context("In key_exists.")
+ }
+
+ /// Stores a super key in the database.
+ pub fn store_super_key(
+ &mut self,
+ user_id: u32,
+ blob_info: &(&[u8], &BlobMetaData),
+ ) -> Result<KeyEntry> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let key_id = Self::insert_with_retry(|id| {
+ tx.execute(
+ "INSERT into persistent.keyentry
+ (id, key_type, domain, namespace, alias, state, km_uuid)
+ VALUES(?, ?, ?, ?, ?, ?, ?);",
+ params![
+ id,
+ KeyType::Super,
+ Domain::APP.0,
+ user_id as i64,
+ Self::USER_SUPER_KEY_ALIAS,
+ KeyLifeCycle::Live,
+ &KEYSTORE_UUID,
+ ],
+ )
+ })
+ .context("Failed to insert into keyentry table.")?;
+
+ let (blob, blob_metadata) = *blob_info;
+ Self::set_blob_internal(
+ &tx,
+ key_id,
+ SubComponentType::KEY_BLOB,
+ Some(blob),
+ Some(blob_metadata),
+ )
+ .context("Failed to store key blob.")?;
+
+ Self::load_key_components(tx, KeyEntryLoadBits::KM, key_id)
+ .context("Trying to load key components.")
+ .no_gc()
+ })
+ .context("In store_super_key.")
+ }
+
+ /// Loads super key of a given user, if exists
+ pub fn load_super_key(&mut self, user_id: u32) -> Result<Option<(KeyIdGuard, KeyEntry)>> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let key_descriptor = KeyDescriptor {
+ domain: Domain::APP,
+ nspace: user_id as i64,
+ alias: Some(String::from("USER_SUPER_KEY")),
+ blob: None,
+ };
+ let id = Self::load_key_entry_id(&tx, &key_descriptor, KeyType::Super);
+ match id {
+ Ok(id) => {
+ let key_entry = Self::load_key_components(&tx, KeyEntryLoadBits::KM, id)
+ .context("In load_super_key. Failed to load key entry.")?;
+ Ok(Some((KEY_ID_LOCK.get(id), key_entry)))
+ }
+ Err(error) => match error.root_cause().downcast_ref::<KsError>() {
+ Some(KsError::Rc(ResponseCode::KEY_NOT_FOUND)) => Ok(None),
+ _ => Err(error).context("In load_super_key."),
+ },
+ }
+ .no_gc()
+ })
+ .context("In load_super_key.")
+ }
+
/// Atomically loads a key entry and associated metadata or creates it using the
/// callback create_new_key callback. The callback is called during a database
/// transaction. This means that implementers should be mindful about using
@@ -999,7 +1206,7 @@
create_new_key: F,
) -> Result<(KeyIdGuard, KeyEntry)>
where
- F: Fn() -> Result<(Vec<u8>, KeyMetaData)>,
+ F: Fn() -> Result<(Vec<u8>, BlobMetaData)>,
{
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let id = {
@@ -1055,22 +1262,26 @@
let (blob, metadata) =
create_new_key().context("In get_or_create_key_with.")?;
- Self::set_blob_internal(&tx, id, SubComponentType::KEY_BLOB, Some(&blob))
- .context("In get_of_create_key_with.")?;
- metadata.store_in_db(id, &tx).context("In get_or_create_key_with.")?;
+ Self::set_blob_internal(
+ &tx,
+ id,
+ SubComponentType::KEY_BLOB,
+ Some(&blob),
+ Some(&metadata),
+ )
+ .context("In get_of_create_key_with.")?;
(
id,
KeyEntry {
id,
- km_blob: Some(blob),
- metadata,
+ key_blob_info: Some((blob, metadata)),
pure_cert: false,
..Default::default()
},
)
}
};
- Ok((KEY_ID_LOCK.get(id), entry))
+ Ok((KEY_ID_LOCK.get(id), entry)).no_gc()
})
.context("In get_or_create_key_with.")
}
@@ -1092,7 +1303,7 @@
/// or DatabaseLocked is encountered.
fn with_transaction<T, F>(&mut self, behavior: TransactionBehavior, f: F) -> Result<T>
where
- F: Fn(&Transaction) -> Result<T>,
+ F: Fn(&Transaction) -> Result<(bool, T)>,
{
loop {
match self
@@ -1115,6 +1326,14 @@
}
}
}
+ .map(|(need_gc, result)| {
+ if need_gc {
+ if let Some(ref gc) = self.gc {
+ gc.notify_gc();
+ }
+ }
+ result
+ })
}
fn is_locked_error(e: &anyhow::Error) -> bool {
@@ -1142,7 +1361,7 @@
km_uuid: &Uuid,
) -> Result<KeyIdGuard> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- Self::create_key_entry_internal(tx, domain, namespace, km_uuid)
+ Self::create_key_entry_internal(tx, domain, namespace, km_uuid).no_gc()
})
.context("In create_key_entry.")
}
@@ -1204,12 +1423,18 @@
})
.context("In create_key_entry")?,
);
- Self::set_blob_internal(&tx, key_id.0, SubComponentType::KEY_BLOB, Some(private_key))?;
+ Self::set_blob_internal(
+ &tx,
+ key_id.0,
+ SubComponentType::KEY_BLOB,
+ Some(private_key),
+ None,
+ )?;
let mut metadata = KeyMetaData::new();
metadata.add(KeyMetaEntry::AttestationMacedPublicKey(maced_public_key.to_vec()));
metadata.add(KeyMetaEntry::AttestationRawPubKey(raw_public_key.to_vec()));
metadata.store_in_db(key_id.0, &tx)?;
- Ok(())
+ Ok(()).no_gc()
})
.context("In create_attestation_key_entry")
}
@@ -1226,9 +1451,10 @@
key_id: &KeyIdGuard,
sc_type: SubComponentType,
blob: Option<&[u8]>,
+ blob_metadata: Option<&BlobMetaData>,
) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- Self::set_blob_internal(&tx, key_id.0, sc_type, blob)
+ Self::set_blob_internal(&tx, key_id.0, sc_type, blob, blob_metadata).need_gc()
})
.context("In set_blob.")
}
@@ -1238,6 +1464,7 @@
key_id: i64,
sc_type: SubComponentType,
blob: Option<&[u8]>,
+ blob_metadata: Option<&BlobMetaData>,
) -> Result<()> {
match (blob, sc_type) {
(Some(blob), _) => {
@@ -1247,6 +1474,16 @@
params![sc_type, key_id, blob],
)
.context("In set_blob_internal: Failed to insert blob.")?;
+ if let Some(blob_metadata) = blob_metadata {
+ let blob_id = tx
+ .query_row("SELECT MAX(id) FROM persistent.blobentry;", NO_PARAMS, |row| {
+ row.get(0)
+ })
+ .context("In set_blob_internal: Failed to get new blob id.")?;
+ blob_metadata
+ .store_in_db(blob_id, tx)
+ .context("In set_blob_internal: Trying to store blob metadata.")?;
+ }
}
(None, SubComponentType::CERT) | (None, SubComponentType::CERT_CHAIN) => {
tx.execute(
@@ -1266,13 +1503,10 @@
/// Inserts a collection of key parameters into the `persistent.keyparameter` table
/// and associates them with the given `key_id`.
- pub fn insert_keyparameter(
- &mut self,
- key_id: &KeyIdGuard,
- params: &[KeyParameter],
- ) -> Result<()> {
+ #[cfg(test)]
+ fn insert_keyparameter(&mut self, key_id: &KeyIdGuard, params: &[KeyParameter]) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- Self::insert_keyparameter_internal(tx, key_id, params)
+ Self::insert_keyparameter_internal(tx, key_id, params).no_gc()
})
.context("In insert_keyparameter.")
}
@@ -1304,13 +1538,10 @@
}
/// Insert a set of key entry specific metadata into the database.
- pub fn insert_key_metadata(
- &mut self,
- key_id: &KeyIdGuard,
- metadata: &KeyMetaData,
- ) -> Result<()> {
+ #[cfg(test)]
+ fn insert_key_metadata(&mut self, key_id: &KeyIdGuard, metadata: &KeyMetaData) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- metadata.store_in_db(key_id.0, &tx)
+ metadata.store_in_db(key_id.0, &tx).no_gc()
})
.context("In insert_key_metadata.")
}
@@ -1370,9 +1601,15 @@
expiration_date,
)));
metadata.store_in_db(key_id, &tx).context("Failed to insert key metadata.")?;
- Self::set_blob_internal(&tx, key_id, SubComponentType::CERT_CHAIN, Some(cert_chain))
- .context("Failed to insert cert chain")?;
- Ok(())
+ Self::set_blob_internal(
+ &tx,
+ key_id,
+ SubComponentType::CERT_CHAIN,
+ Some(cert_chain),
+ None,
+ )
+ .context("Failed to insert cert chain")?;
+ Ok(()).no_gc()
})
.context("In store_signed_attestation_certificate_chain: ")
}
@@ -1434,7 +1671,7 @@
result
));
}
- Ok(())
+ Ok(()).no_gc()
})
.context("In assign_attestation_key: ")
}
@@ -1476,7 +1713,7 @@
)?
.collect::<rusqlite::Result<Vec<Vec<u8>>>>()
.context("Failed to execute statement")?;
- Ok(rows)
+ Ok(rows).no_gc()
})
.context("In fetch_unsigned_attestation_keys")
}
@@ -1511,7 +1748,7 @@
num_deleted += 1;
}
}
- Ok(num_deleted)
+ Ok(num_deleted).do_gc(num_deleted != 0)
})
.context("In delete_expired_attestation_keys: ")
}
@@ -1576,7 +1813,7 @@
_ => {}
}
}
- Ok(AttestationPoolStatus { expiring, unassigned, attested, total })
+ Ok(AttestationPoolStatus { expiring, unassigned, attested, total }).no_gc()
})
.context("In get_attestation_pool_status: ")
}
@@ -1597,8 +1834,9 @@
.context(format!("Domain {:?} must be either App or SELinux.", domain));
}
}
- let mut stmt = self.conn.prepare(
- "SELECT subcomponent_type, blob
+ self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ let mut stmt = tx.prepare(
+ "SELECT subcomponent_type, blob
FROM persistent.blobentry
WHERE keyentryid IN
(SELECT id
@@ -1608,48 +1846,50 @@
AND namespace = ?
AND state = ?
AND km_uuid = ?);",
- )?;
- let rows = stmt
- .query_map(
- params![
- KeyType::Attestation,
- domain.0 as u32,
- namespace,
- KeyLifeCycle::Live,
- km_uuid
- ],
- |row| Ok((row.get(0)?, row.get(1)?)),
- )?
- .collect::<rusqlite::Result<Vec<(SubComponentType, Vec<u8>)>>>()
- .context("In retrieve_attestation_key_and_cert_chain: query failed.")?;
- if rows.is_empty() {
- return Ok(None);
- } else if rows.len() != 2 {
- return Err(KsError::sys()).context(format!(
- concat!(
+ )?;
+ let rows = stmt
+ .query_map(
+ params![
+ KeyType::Attestation,
+ domain.0 as u32,
+ namespace,
+ KeyLifeCycle::Live,
+ km_uuid
+ ],
+ |row| Ok((row.get(0)?, row.get(1)?)),
+ )?
+ .collect::<rusqlite::Result<Vec<(SubComponentType, Vec<u8>)>>>()
+ .context("In retrieve_attestation_key_and_cert_chain: query failed.")?;
+ if rows.is_empty() {
+ return Ok(None).no_gc();
+ } else if rows.len() != 2 {
+ return Err(KsError::sys()).context(format!(
+ concat!(
"In retrieve_attestation_key_and_cert_chain: Expected to get a single attestation",
"key chain but instead got {}."),
- rows.len()
- ));
- }
- let mut km_blob: Vec<u8> = Vec::new();
- let mut cert_chain_blob: Vec<u8> = Vec::new();
- for row in rows {
- let sub_type: SubComponentType = row.0;
- match sub_type {
- SubComponentType::KEY_BLOB => {
- km_blob = row.1;
- }
- SubComponentType::CERT_CHAIN => {
- cert_chain_blob = row.1;
- }
- _ => Err(KsError::sys()).context("Unknown or incorrect subcomponent type.")?,
+ rows.len()
+ ));
}
- }
- Ok(Some(CertificateChain {
- private_key: ZVec::try_from(km_blob)?,
- cert_chain: ZVec::try_from(cert_chain_blob)?,
- }))
+ let mut km_blob: Vec<u8> = Vec::new();
+ let mut cert_chain_blob: Vec<u8> = Vec::new();
+ for row in rows {
+ let sub_type: SubComponentType = row.0;
+ match sub_type {
+ SubComponentType::KEY_BLOB => {
+ km_blob = row.1;
+ }
+ SubComponentType::CERT_CHAIN => {
+ cert_chain_blob = row.1;
+ }
+ _ => Err(KsError::sys()).context("Unknown or incorrect subcomponent type.")?,
+ }
+ }
+ Ok(Some(CertificateChain {
+ private_key: ZVec::try_from(km_blob)?,
+ cert_chain: ZVec::try_from(cert_chain_blob)?,
+ }))
+ .no_gc()
+ })
}
/// Updates the alias column of the given key id `newid` with the given alias,
@@ -1714,11 +1954,11 @@
&mut self,
key: &KeyDescriptor,
params: &[KeyParameter],
- blob: &[u8],
+ blob_info: &(&[u8], &BlobMetaData),
cert_info: &CertificateInfo,
metadata: &KeyMetaData,
km_uuid: &Uuid,
- ) -> Result<(bool, KeyIdGuard)> {
+ ) -> Result<KeyIdGuard> {
let (alias, domain, namespace) = match key {
KeyDescriptor { alias: Some(alias), domain: Domain::APP, nspace, blob: None }
| KeyDescriptor { alias: Some(alias), domain: Domain::SELINUX, nspace, blob: None } => {
@@ -1732,10 +1972,17 @@
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let key_id = Self::create_key_entry_internal(tx, &domain, namespace, km_uuid)
.context("Trying to create new key entry.")?;
- Self::set_blob_internal(tx, key_id.id(), SubComponentType::KEY_BLOB, Some(blob))
- .context("Trying to insert the key blob.")?;
+ let (blob, blob_metadata) = *blob_info;
+ Self::set_blob_internal(
+ tx,
+ key_id.id(),
+ SubComponentType::KEY_BLOB,
+ Some(blob),
+ Some(&blob_metadata),
+ )
+ .context("Trying to insert the key blob.")?;
if let Some(cert) = &cert_info.cert {
- Self::set_blob_internal(tx, key_id.id(), SubComponentType::CERT, Some(&cert))
+ Self::set_blob_internal(tx, key_id.id(), SubComponentType::CERT, Some(&cert), None)
.context("Trying to insert the certificate.")?;
}
if let Some(cert_chain) = &cert_info.cert_chain {
@@ -1744,6 +1991,7 @@
key_id.id(),
SubComponentType::CERT_CHAIN,
Some(&cert_chain),
+ None,
)
.context("Trying to insert the certificate chain.")?;
}
@@ -1752,7 +2000,7 @@
metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace)
.context("Trying to rebind alias.")?;
- Ok((need_gc, key_id))
+ Ok(key_id).do_gc(need_gc)
})
.context("In store_new_key.")
}
@@ -1781,8 +2029,14 @@
let key_id = Self::create_key_entry_internal(tx, &domain, namespace, km_uuid)
.context("Trying to create new key entry.")?;
- Self::set_blob_internal(tx, key_id.id(), SubComponentType::CERT_CHAIN, Some(cert))
- .context("Trying to insert certificate.")?;
+ Self::set_blob_internal(
+ tx,
+ key_id.id(),
+ SubComponentType::CERT_CHAIN,
+ Some(cert),
+ None,
+ )
+ .context("Trying to insert certificate.")?;
let mut metadata = KeyMetaData::new();
metadata.add(KeyMetaEntry::CreationDate(
@@ -1791,9 +2045,9 @@
metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
- Self::rebind_alias(tx, &key_id, &alias, &domain, namespace)
+ let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace)
.context("Trying to rebind alias.")?;
- Ok(key_id)
+ Ok(key_id).do_gc(need_gc)
})
.context("In store_new_certificate.")
}
@@ -1811,7 +2065,7 @@
.prepare(
"SELECT id FROM persistent.keyentry
WHERE
- key_type = ?
+ key_type = ?
AND domain = ?
AND namespace = ?
AND alias = ?
@@ -1954,7 +2208,7 @@
key_id: i64,
load_bits: KeyEntryLoadBits,
tx: &Transaction,
- ) -> Result<(bool, Option<Vec<u8>>, Option<Vec<u8>>, Option<Vec<u8>>)> {
+ ) -> Result<(bool, Option<(Vec<u8>, BlobMetaData)>, Option<Vec<u8>>, Option<Vec<u8>>)> {
let mut stmt = tx
.prepare(
"SELECT MAX(id), subcomponent_type, blob FROM persistent.blobentry
@@ -1965,7 +2219,7 @@
let mut rows =
stmt.query(params![key_id]).context("In load_blob_components: query failed.")?;
- let mut km_blob: Option<Vec<u8>> = None;
+ let mut key_blob: Option<(i64, Vec<u8>)> = None;
let mut cert_blob: Option<Vec<u8>> = None;
let mut cert_chain_blob: Option<Vec<u8>> = None;
let mut has_km_blob: bool = false;
@@ -1975,7 +2229,10 @@
has_km_blob = has_km_blob || sub_type == SubComponentType::KEY_BLOB;
match (sub_type, load_bits.load_public(), load_bits.load_km()) {
(SubComponentType::KEY_BLOB, _, true) => {
- km_blob = Some(row.get(2).context("Failed to extract KM blob.")?);
+ key_blob = Some((
+ row.get(0).context("Failed to extract key blob id.")?,
+ row.get(2).context("Failed to extract key blob.")?,
+ ));
}
(SubComponentType::CERT, true, _) => {
cert_blob =
@@ -1994,7 +2251,15 @@
})
.context("In load_blob_components.")?;
- Ok((has_km_blob, km_blob, cert_blob, cert_chain_blob))
+ let blob_info = key_blob.map_or::<Result<_>, _>(Ok(None), |(blob_id, blob)| {
+ Ok(Some((
+ blob,
+ BlobMetaData::load_from_db(blob_id, tx)
+ .context("In load_blob_components: Trying to load blob_metadata.")?,
+ )))
+ })?;
+
+ Ok((has_km_blob, blob_info, cert_blob, cert_chain_blob))
}
fn load_key_parameters(key_id: i64, tx: &Transaction) -> Result<Vec<KeyParameter>> {
@@ -2027,7 +2292,7 @@
/// usage has been exhausted, if not, decreases the usage count. If the usage count reaches
/// zero, the key also gets marked unreferenced and scheduled for deletion.
/// Returns Ok(true) if the key was marked unreferenced as a hint to the garbage collector.
- pub fn check_and_update_key_usage_count(&mut self, key_id: i64) -> Result<bool> {
+ pub fn check_and_update_key_usage_count(&mut self, key_id: i64) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let limit: Option<i32> = tx
.query_row(
@@ -2052,9 +2317,10 @@
match limit {
1 => Self::mark_unreferenced(tx, key_id)
+ .map(|need_gc| (need_gc, ()))
.context("Trying to mark limited use key for deletion."),
0 => Err(KsError::Km(ErrorCode::INVALID_KEY_BLOB)).context("Key is exhausted."),
- _ => Ok(false),
+ _ => Ok(()).no_gc(),
}
})
.context("In check_and_update_key_usage_count.")
@@ -2180,13 +2446,14 @@
fn mark_unreferenced(tx: &Transaction, key_id: i64) -> Result<bool> {
let updated = tx
- .execute(
- "UPDATE persistent.keyentry SET state = ? WHERE id = ?;",
- params![KeyLifeCycle::Unreferenced, key_id],
- )
- .context("In mark_unreferenced: Failed to update state of key entry.")?;
- tx.execute("DELETE from persistent.grant WHERE keyentryid = ?;", params![key_id])
- .context("In mark_unreferenced: Failed to drop grants.")?;
+ .execute("DELETE FROM persistent.keyentry WHERE id = ?;", params![key_id])
+ .context("Trying to delete keyentry.")?;
+ tx.execute("DELETE FROM persistent.keymetadata WHERE keyentryid = ?;", params![key_id])
+ .context("Trying to delete keymetadata.")?;
+ tx.execute("DELETE FROM persistent.keyparameter WHERE keyentryid = ?;", params![key_id])
+ .context("Trying to delete keyparameters.")?;
+ tx.execute("DELETE FROM persistent.grant WHERE keyentryid = ?;", params![key_id])
+ .context("Trying to delete grants.")?;
Ok(updated != 0)
}
@@ -2198,7 +2465,7 @@
key_type: KeyType,
caller_uid: u32,
check_permission: impl Fn(&KeyDescriptor, Option<KeyPermSet>) -> Result<()>,
- ) -> Result<bool> {
+ ) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let (key_id, access_key_descriptor, access_vector) =
Self::load_access_tuple(tx, key, key_type, caller_uid)
@@ -2209,7 +2476,9 @@
check_permission(&access_key_descriptor, access_vector)
.context("While checking permission.")?;
- Self::mark_unreferenced(tx, key_id).context("Trying to mark the key unreferenced.")
+ Self::mark_unreferenced(tx, key_id)
+ .map(|need_gc| (need_gc, ()))
+ .context("Trying to mark the key unreferenced.")
})
.context("In unbind_key.")
}
@@ -2223,6 +2492,82 @@
.context("In get_key_km_uuid.")
}
+ /// Delete the keys created on behalf of the user, denoted by the user id.
+ /// Delete all the keys unless 'keep_non_super_encrypted_keys' set to true.
+ /// Returned boolean is to hint the garbage collector to delete the unbound keys.
+ /// The caller of this function should notify the gc if the returned value is true.
+ pub fn unbind_keys_for_user(
+ &mut self,
+ user_id: u32,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let mut stmt = tx
+ .prepare(&format!(
+ "SELECT id from persistent.keyentry
+ WHERE (
+ key_type = ?
+ AND domain = ?
+ AND cast ( (namespace/{aid_user_offset}) as int) = ?
+ AND state = ?
+ ) OR (
+ key_type = ?
+ AND namespace = ?
+ AND alias = ?
+ AND state = ?
+ );",
+ aid_user_offset = AID_USER_OFFSET
+ ))
+ .context(concat!(
+ "In unbind_keys_for_user. ",
+ "Failed to prepare the query to find the keys created by apps."
+ ))?;
+
+ let mut rows = stmt
+ .query(params![
+ // WHERE client key:
+ KeyType::Client,
+ Domain::APP.0 as u32,
+ user_id,
+ KeyLifeCycle::Live,
+ // OR super key:
+ KeyType::Super,
+ user_id,
+ Self::USER_SUPER_KEY_ALIAS,
+ KeyLifeCycle::Live
+ ])
+ .context("In unbind_keys_for_user. Failed to query the keys created by apps.")?;
+
+ let mut key_ids: Vec<i64> = Vec::new();
+ db_utils::with_rows_extract_all(&mut rows, |row| {
+ key_ids
+ .push(row.get(0).context("Failed to read key id of a key created by an app.")?);
+ Ok(())
+ })
+ .context("In unbind_keys_for_user.")?;
+
+ let mut notify_gc = false;
+ for key_id in key_ids {
+ if keep_non_super_encrypted_keys {
+ // Load metadata and filter out non-super-encrypted keys.
+ if let (_, Some((_, blob_metadata)), _, _) =
+ Self::load_blob_components(key_id, KeyEntryLoadBits::KM, tx)
+ .context("In unbind_keys_for_user: Trying to load blob info.")?
+ {
+ if blob_metadata.encrypted_by().is_none() {
+ continue;
+ }
+ }
+ }
+ notify_gc = Self::mark_unreferenced(&tx, key_id)
+ .context("In unbind_keys_for_user.")?
+ || notify_gc;
+ }
+ Ok(()).do_gc(notify_gc)
+ })
+ .context("In unbind_keys_for_user.")
+ }
+
fn load_key_components(
tx: &Transaction,
load_bits: KeyEntryLoadBits,
@@ -2230,7 +2575,7 @@
) -> Result<KeyEntry> {
let metadata = KeyMetaData::load_from_db(key_id, &tx).context("In load_key_components.")?;
- let (has_km_blob, km_blob, cert_blob, cert_chain_blob) =
+ let (has_km_blob, key_blob_info, cert_blob, cert_chain_blob) =
Self::load_blob_components(key_id, load_bits, &tx)
.context("In load_key_components.")?;
@@ -2242,7 +2587,7 @@
Ok(KeyEntry {
id: key_id,
- km_blob,
+ key_blob_info,
cert: cert_blob,
cert_chain: cert_chain_blob,
km_uuid,
@@ -2279,7 +2624,7 @@
Ok(())
})
.context("In list: Failed to extract rows.")?;
- Ok(descriptors)
+ Ok(descriptors).no_gc()
})
}
@@ -2349,6 +2694,7 @@
};
Ok(KeyDescriptor { domain: Domain::GRANT, nspace: grant_id, alias: None, blob: None })
+ .no_gc()
})
}
@@ -2380,7 +2726,7 @@
)
.context("Failed to delete grant.")?;
- Ok(())
+ Ok(()).no_gc()
})
}
@@ -2424,7 +2770,7 @@
],
)
.context("In insert_auth_token: failed to insert auth token into the database")?;
- Ok(())
+ Ok(()).no_gc()
})
}
@@ -2460,10 +2806,11 @@
entry,
Self::get_last_off_body(tx)
.context("In find_auth_token_entry: Trying to get last off body")?,
- )));
+ )))
+ .no_gc();
}
}
- Ok(None)
+ Ok(None).no_gc()
})
.context("In find_auth_token_entry.")
}
@@ -2476,7 +2823,7 @@
params!["last_off_body", last_off_body],
)
.context("In insert_last_off_body: failed to insert.")?;
- Ok(())
+ Ok(()).no_gc()
})
}
@@ -2488,7 +2835,7 @@
params![last_off_body, "last_off_body"],
)
.context("In update_last_off_body: failed to update.")?;
- Ok(())
+ Ok(()).no_gc()
})
}
@@ -2513,6 +2860,7 @@
};
use crate::key_perm_set;
use crate::permission::{KeyPerm, KeyPermSet};
+ use crate::super_key::SuperKeyManager;
use keystore2_test_utils::TempDir;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
HardwareAuthToken::HardwareAuthToken,
@@ -2534,13 +2882,25 @@
fn new_test_db() -> Result<KeystoreDB> {
let conn = KeystoreDB::make_connection("file::memory:", "file::memory:")?;
- let mut db = KeystoreDB { conn };
+ let mut db = KeystoreDB { conn, gc: None };
db.with_transaction(TransactionBehavior::Immediate, |tx| {
- KeystoreDB::init_tables(tx).context("Failed to initialize tables.")
+ KeystoreDB::init_tables(tx).context("Failed to initialize tables.").no_gc()
})?;
Ok(db)
}
+ fn new_test_db_with_gc<F>(path: &Path, cb: F) -> Result<KeystoreDB>
+ where
+ F: Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static,
+ {
+ let super_key = Arc::new(SuperKeyManager::new());
+
+ let gc_db = KeystoreDB::new(path, None).expect("Failed to open test gc db_connection.");
+ let gc = Gc::new_init_with(Default::default(), move || (Box::new(cb), gc_db, super_key));
+
+ KeystoreDB::new(path, Some(gc))
+ }
+
fn rebind_alias(
db: &mut KeystoreDB,
newid: &KeyIdGuard,
@@ -2549,7 +2909,7 @@
namespace: i64,
) -> Result<bool> {
db.with_transaction(TransactionBehavior::Immediate, |tx| {
- KeystoreDB::rebind_alias(tx, newid, alias, &domain, &namespace)
+ KeystoreDB::rebind_alias(tx, newid, alias, &domain, &namespace).no_gc()
})
.context("In rebind_alias.")
}
@@ -2601,12 +2961,13 @@
.prepare("SELECT name from persistent.sqlite_master WHERE type='table' ORDER BY name;")?
.query_map(params![], |row| row.get(0))?
.collect::<rusqlite::Result<Vec<String>>>()?;
- assert_eq!(tables.len(), 5);
+ assert_eq!(tables.len(), 6);
assert_eq!(tables[0], "blobentry");
- assert_eq!(tables[1], "grant");
- assert_eq!(tables[2], "keyentry");
- assert_eq!(tables[3], "keymetadata");
- assert_eq!(tables[4], "keyparameter");
+ assert_eq!(tables[1], "blobmetadata");
+ assert_eq!(tables[2], "grant");
+ assert_eq!(tables[3], "keyentry");
+ assert_eq!(tables[4], "keymetadata");
+ assert_eq!(tables[5], "keyparameter");
let tables = db
.conn
.prepare("SELECT name from perboot.sqlite_master WHERE type='table' ORDER BY name;")?
@@ -2696,13 +3057,13 @@
#[test]
fn test_persistence_for_files() -> Result<()> {
let temp_dir = TempDir::new("persistent_db_test")?;
- let mut db = KeystoreDB::new(temp_dir.path())?;
+ let mut db = KeystoreDB::new(temp_dir.path(), None)?;
db.create_key_entry(&Domain::APP, &100, &KEYSTORE_UUID)?;
let entries = get_keyentry(&db)?;
assert_eq!(entries.len(), 1);
- let db = KeystoreDB::new(temp_dir.path())?;
+ let db = KeystoreDB::new(temp_dir.path(), None)?;
let entries_new = get_keyentry(&db)?;
assert_eq!(entries, entries_new);
@@ -2833,7 +3194,9 @@
#[test]
fn test_remove_expired_certs() -> Result<()> {
- let mut db = new_test_db()?;
+ let temp_dir =
+ TempDir::new("test_remove_expired_certs_").expect("Failed to create temp dir.");
+ let mut db = new_test_db_with_gc(temp_dir.path(), |_, _| Ok(()))?;
let expiration_date: i64 =
SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64 + 10000;
let namespace: i64 = 30;
@@ -2847,11 +3210,20 @@
)?;
load_attestation_key_pool(&mut db, 45, namespace_del1, 0x02)?;
load_attestation_key_pool(&mut db, 60, namespace_del2, 0x03)?;
+
+ let blob_entry_row_count: u32 = db
+ .conn
+ .query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
+ .expect("Failed to get blob entry row count.");
+ // We expect 6 rows here because there are two blobs per attestation key, i.e.,
+ // One key and one certificate.
+ assert_eq!(blob_entry_row_count, 6);
+
assert_eq!(db.delete_expired_attestation_keys()?, 2);
let mut cert_chain =
db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
- assert_eq!(true, cert_chain.is_some());
+ assert!(cert_chain.is_some());
let value = cert_chain.unwrap();
assert_eq!(entry_values[1], value.cert_chain.to_vec());
assert_eq!(entry_values[2], value.private_key.to_vec());
@@ -2861,26 +3233,25 @@
namespace_del1,
&KEYSTORE_UUID,
)?;
- assert_eq!(false, cert_chain.is_some());
+ assert!(!cert_chain.is_some());
cert_chain = db.retrieve_attestation_key_and_cert_chain(
Domain::APP,
namespace_del2,
&KEYSTORE_UUID,
)?;
- assert_eq!(false, cert_chain.is_some());
+ assert!(!cert_chain.is_some());
- let mut option_entry = db.get_unreferenced_key()?;
- assert_eq!(true, option_entry.is_some());
- let (key_guard, _) = option_entry.unwrap();
- db.purge_key_entry(key_guard)?;
+ // Give the garbage collector half a second to catch up.
+ std::thread::sleep(Duration::from_millis(500));
- option_entry = db.get_unreferenced_key()?;
- assert_eq!(true, option_entry.is_some());
- let (key_guard, _) = option_entry.unwrap();
- db.purge_key_entry(key_guard)?;
+ let blob_entry_row_count: u32 = db
+ .conn
+ .query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
+ .expect("Failed to get blob entry row count.");
+ // There shound be 2 blob entries left, because we deleted two of the attestation
+ // key entries with two blobs each.
+ assert_eq!(blob_entry_row_count, 2);
- option_entry = db.get_unreferenced_key()?;
- assert_eq!(false, option_entry.is_some());
Ok(())
}
@@ -3120,26 +3491,43 @@
fn test_set_blob() -> Result<()> {
let key_id = KEY_ID_LOCK.get(3000);
let mut db = new_test_db()?;
- db.set_blob(&key_id, SubComponentType::KEY_BLOB, Some(TEST_KEY_BLOB))?;
- db.set_blob(&key_id, SubComponentType::CERT, Some(TEST_CERT_BLOB))?;
- db.set_blob(&key_id, SubComponentType::CERT_CHAIN, Some(TEST_CERT_CHAIN_BLOB))?;
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::KmUuid(KEYSTORE_UUID));
+ db.set_blob(
+ &key_id,
+ SubComponentType::KEY_BLOB,
+ Some(TEST_KEY_BLOB),
+ Some(&blob_metadata),
+ )?;
+ db.set_blob(&key_id, SubComponentType::CERT, Some(TEST_CERT_BLOB), None)?;
+ db.set_blob(&key_id, SubComponentType::CERT_CHAIN, Some(TEST_CERT_CHAIN_BLOB), None)?;
drop(key_id);
let mut stmt = db.conn.prepare(
- "SELECT subcomponent_type, keyentryid, blob FROM persistent.blobentry
+ "SELECT subcomponent_type, keyentryid, blob, id FROM persistent.blobentry
ORDER BY subcomponent_type ASC;",
)?;
let mut rows = stmt
- .query_map::<(SubComponentType, i64, Vec<u8>), _, _>(NO_PARAMS, |row| {
- Ok((row.get(0)?, row.get(1)?, row.get(2)?))
+ .query_map::<((SubComponentType, i64, Vec<u8>), i64), _, _>(NO_PARAMS, |row| {
+ Ok(((row.get(0)?, row.get(1)?, row.get(2)?), row.get(3)?))
})?;
- let r = rows.next().unwrap().unwrap();
+ let (r, id) = rows.next().unwrap().unwrap();
assert_eq!(r, (SubComponentType::KEY_BLOB, 3000, TEST_KEY_BLOB.to_vec()));
- let r = rows.next().unwrap().unwrap();
+ let (r, _) = rows.next().unwrap().unwrap();
assert_eq!(r, (SubComponentType::CERT, 3000, TEST_CERT_BLOB.to_vec()));
- let r = rows.next().unwrap().unwrap();
+ let (r, _) = rows.next().unwrap().unwrap();
assert_eq!(r, (SubComponentType::CERT_CHAIN, 3000, TEST_CERT_CHAIN_BLOB.to_vec()));
+ drop(rows);
+ drop(stmt);
+
+ assert_eq!(
+ db.with_transaction(TransactionBehavior::Immediate, |tx| {
+ BlobMetaData::load_from_db(id, tx).no_gc()
+ })
+ .expect("Should find blob metadata."),
+ blob_metadata
+ );
Ok(())
}
@@ -3564,7 +3952,7 @@
let handle = {
let temp_dir = Arc::new(TempDir::new("id_lock_test")?);
let temp_dir_clone = temp_dir.clone();
- let mut db = KeystoreDB::new(temp_dir.path())?;
+ let mut db = KeystoreDB::new(temp_dir.path(), None)?;
let key_id = make_test_key_entry(&mut db, Domain::APP, 33, KEY_LOCK_TEST_ALIAS, None)
.context("test_insert_and_load_full_keyentry_domain_app")?
.0;
@@ -3595,7 +3983,7 @@
// the primary thread.
let handle = thread::spawn(move || {
let temp_dir = temp_dir_clone;
- let mut db = KeystoreDB::new(temp_dir.path()).unwrap();
+ let mut db = KeystoreDB::new(temp_dir.path(), None).unwrap();
assert!(db
.load_key_entry(
&KeyDescriptor {
@@ -3638,8 +4026,8 @@
let temp_dir =
TempDir::new("test_database_busy_error_code_").expect("Failed to create temp dir.");
- let mut db1 = KeystoreDB::new(temp_dir.path()).expect("Failed to open database1.");
- let mut db2 = KeystoreDB::new(temp_dir.path()).expect("Failed to open database2.");
+ let mut db1 = KeystoreDB::new(temp_dir.path(), None).expect("Failed to open database1.");
+ let mut db2 = KeystoreDB::new(temp_dir.path(), None).expect("Failed to open database2.");
let _tx1 = db1
.conn
@@ -3799,7 +4187,7 @@
#[test]
fn list() -> Result<()> {
let temp_dir = TempDir::new("list_test")?;
- let mut db = KeystoreDB::new(temp_dir.path())?;
+ let mut db = KeystoreDB::new(temp_dir.path(), None)?;
static LIST_O_ENTRIES: &[(Domain, i64, &str)] = &[
(Domain::APP, 1, "test1"),
(Domain::APP, 1, "test2"),
@@ -4198,18 +4586,27 @@
max_usage_count: Option<i32>,
) -> Result<KeyIdGuard> {
let key_id = db.create_key_entry(&domain, &namespace, &KEYSTORE_UUID)?;
- db.set_blob(&key_id, SubComponentType::KEY_BLOB, Some(TEST_KEY_BLOB))?;
- db.set_blob(&key_id, SubComponentType::CERT, Some(TEST_CERT_BLOB))?;
- db.set_blob(&key_id, SubComponentType::CERT_CHAIN, Some(TEST_CERT_CHAIN_BLOB))?;
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
+ blob_metadata.add(BlobMetaEntry::Salt(vec![1, 2, 3]));
+ blob_metadata.add(BlobMetaEntry::Iv(vec![2, 3, 1]));
+ blob_metadata.add(BlobMetaEntry::AeadTag(vec![3, 1, 2]));
+ blob_metadata.add(BlobMetaEntry::KmUuid(KEYSTORE_UUID));
+
+ db.set_blob(
+ &key_id,
+ SubComponentType::KEY_BLOB,
+ Some(TEST_KEY_BLOB),
+ Some(&blob_metadata),
+ )?;
+ db.set_blob(&key_id, SubComponentType::CERT, Some(TEST_CERT_BLOB), None)?;
+ db.set_blob(&key_id, SubComponentType::CERT_CHAIN, Some(TEST_CERT_CHAIN_BLOB), None)?;
let params = make_test_params(max_usage_count);
db.insert_keyparameter(&key_id, ¶ms)?;
let mut metadata = KeyMetaData::new();
- metadata.add(KeyMetaEntry::EncryptedBy(EncryptedBy::Password));
- metadata.add(KeyMetaEntry::Salt(vec![1, 2, 3]));
- metadata.add(KeyMetaEntry::Iv(vec![2, 3, 1]));
- metadata.add(KeyMetaEntry::AeadTag(vec![3, 1, 2]));
+ metadata.add(KeyMetaEntry::CreationDate(DateTime::from_millis_epoch(123456789)));
db.insert_key_metadata(&key_id, &metadata)?;
rebind_alias(db, &key_id, alias, domain, namespace)?;
Ok(key_id)
@@ -4218,15 +4615,19 @@
fn make_test_key_entry_test_vector(key_id: i64, max_usage_count: Option<i32>) -> KeyEntry {
let params = make_test_params(max_usage_count);
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
+ blob_metadata.add(BlobMetaEntry::Salt(vec![1, 2, 3]));
+ blob_metadata.add(BlobMetaEntry::Iv(vec![2, 3, 1]));
+ blob_metadata.add(BlobMetaEntry::AeadTag(vec![3, 1, 2]));
+ blob_metadata.add(BlobMetaEntry::KmUuid(KEYSTORE_UUID));
+
let mut metadata = KeyMetaData::new();
- metadata.add(KeyMetaEntry::EncryptedBy(EncryptedBy::Password));
- metadata.add(KeyMetaEntry::Salt(vec![1, 2, 3]));
- metadata.add(KeyMetaEntry::Iv(vec![2, 3, 1]));
- metadata.add(KeyMetaEntry::AeadTag(vec![3, 1, 2]));
+ metadata.add(KeyMetaEntry::CreationDate(DateTime::from_millis_epoch(123456789)));
KeyEntry {
id: key_id,
- km_blob: Some(TEST_KEY_BLOB.to_vec()),
+ key_blob_info: Some((TEST_KEY_BLOB.to_vec(), blob_metadata)),
cert: Some(TEST_CERT_BLOB.to_vec()),
cert_chain: Some(TEST_CERT_CHAIN_BLOB.to_vec()),
km_uuid: KEYSTORE_UUID,
@@ -4319,4 +4720,53 @@
assert!(last_off_body_1.seconds() < last_off_body_2.seconds());
Ok(())
}
+
+ #[test]
+ fn test_unbind_keys_for_user() -> Result<()> {
+ let mut db = new_test_db()?;
+ db.unbind_keys_for_user(1, false)?;
+
+ make_test_key_entry(&mut db, Domain::APP, 210000, TEST_ALIAS, None)?;
+ make_test_key_entry(&mut db, Domain::APP, 110000, TEST_ALIAS, None)?;
+ db.unbind_keys_for_user(2, false)?;
+
+ assert_eq!(1, db.list(Domain::APP, 110000)?.len());
+ assert_eq!(0, db.list(Domain::APP, 210000)?.len());
+
+ db.unbind_keys_for_user(1, true)?;
+ assert_eq!(0, db.list(Domain::APP, 110000)?.len());
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_store_super_key() -> Result<()> {
+ let mut db = new_test_db()?;
+ let pw = "xyzabc".as_bytes();
+ let super_key = keystore2_crypto::generate_aes256_key()?;
+ let secret = String::from("keystore2 is great.");
+ let secret_bytes = secret.into_bytes();
+ let (encrypted_secret, iv, tag) =
+ keystore2_crypto::aes_gcm_encrypt(&secret_bytes, &super_key)?;
+
+ let (encrypted_super_key, metadata) =
+ SuperKeyManager::encrypt_with_password(&super_key, &pw)?;
+ db.store_super_key(1, &(&encrypted_super_key, &metadata))?;
+
+ //check if super key exists
+ assert!(db.key_exists(Domain::APP, 1, "USER_SUPER_KEY", KeyType::Super)?);
+
+ let (_, key_entry) = db.load_super_key(1)?.unwrap();
+ let loaded_super_key = SuperKeyManager::extract_super_key_from_key_entry(key_entry, &pw)?;
+
+ let decrypted_secret_bytes = keystore2_crypto::aes_gcm_decrypt(
+ &encrypted_secret,
+ &iv,
+ &tag,
+ &loaded_super_key.get_key(),
+ )?;
+ let decrypted_secret = String::from_utf8((&decrypted_secret_bytes).to_vec())?;
+ assert_eq!(String::from("keystore2 is great."), decrypted_secret);
+ Ok(())
+ }
}
diff --git a/keystore2/src/enforcements.rs b/keystore2/src/enforcements.rs
index 13068c5..cc59c32 100644
--- a/keystore2/src/enforcements.rs
+++ b/keystore2/src/enforcements.rs
@@ -14,13 +14,10 @@
//! This is the Keystore 2.0 Enforcements module.
// TODO: more description to follow.
+use crate::database::{AuthTokenEntry, MonotonicRawTime};
use crate::error::{map_binder_status, Error, ErrorCode};
use crate::globals::{get_timestamp_service, ASYNC_TASK, DB, ENFORCEMENTS};
use crate::key_parameter::{KeyParameter, KeyParameterValue};
-use crate::{
- database::{AuthTokenEntry, MonotonicRawTime},
- gc::Gc,
-};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
Algorithm::Algorithm, ErrorCode::ErrorCode as Ec, HardwareAuthToken::HardwareAuthToken,
HardwareAuthenticatorType::HardwareAuthenticatorType,
@@ -29,7 +26,10 @@
use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
ISecureClock::ISecureClock, TimeStampToken::TimeStampToken,
};
-use android_system_keystore2::aidl::android::system::keystore2::OperationChallenge::OperationChallenge;
+use android_system_keystore2::aidl::android::system::keystore2::{
+ IKeystoreSecurityLevel::KEY_FLAG_AUTH_BOUND_WITHOUT_CRYPTOGRAPHIC_LSKF_BINDING,
+ OperationChallenge::OperationChallenge,
+};
use android_system_keystore2::binder::Strong;
use anyhow::{Context, Result};
use std::sync::{
@@ -245,7 +245,7 @@
let token_receiver = TokenReceiver(Arc::downgrade(&auth_request));
ENFORCEMENTS.register_op_auth_receiver(challenge, token_receiver);
- ASYNC_TASK.queue_hi(move || timestamp_token_request(challenge, sender));
+ ASYNC_TASK.queue_hi(move |_| timestamp_token_request(challenge, sender));
self.state = DeferredAuthState::Waiting(auth_request);
Some(OperationChallenge { challenge })
}
@@ -253,7 +253,7 @@
let hat = (*hat).clone();
let (sender, receiver) = channel::<Result<TimeStampToken, Error>>();
let auth_request = AuthRequest::timestamp(hat, receiver);
- ASYNC_TASK.queue_hi(move || timestamp_token_request(challenge, sender));
+ ASYNC_TASK.queue_hi(move |_| timestamp_token_request(challenge, sender));
self.state = DeferredAuthState::Waiting(auth_request);
None
}
@@ -305,16 +305,12 @@
if let Some(key_id) = self.key_usage_limited {
// On the last successful use, the key gets deleted. In this case we
// have to notify the garbage collector.
- let need_gc = DB
- .with(|db| {
- db.borrow_mut()
- .check_and_update_key_usage_count(key_id)
- .context("Trying to update key usage count.")
- })
- .context("In after_finish.")?;
- if need_gc {
- Gc::notify_gc();
- }
+ DB.with(|db| {
+ db.borrow_mut()
+ .check_and_update_key_usage_count(key_id)
+ .context("Trying to update key usage count.")
+ })
+ .context("In after_finish.")?;
}
Ok(())
}
@@ -751,6 +747,19 @@
fn register_op_auth_receiver(&self, challenge: i64, recv: TokenReceiver) {
self.op_auth_map.add_receiver(challenge, recv);
}
+
+ /// Given the set of key parameters and flags, check if super encryption is required.
+ pub fn super_encryption_required(key_parameters: &[KeyParameter], flags: Option<i32>) -> bool {
+ let auth_bound = key_parameters.iter().any(|kp| kp.get_tag() == Tag::USER_SECURE_ID);
+
+ let skip_lskf_binding = if let Some(flags) = flags {
+ (flags & KEY_FLAG_AUTH_BOUND_WITHOUT_CRYPTOGRAPHIC_LSKF_BINDING) != 0
+ } else {
+ false
+ };
+
+ auth_bound && !skip_lskf_binding
+ }
}
impl Default for Enforcements {
diff --git a/keystore2/src/gc.rs b/keystore2/src/gc.rs
index fbb1cf6..6cc0f27 100644
--- a/keystore2/src/gc.rs
+++ b/keystore2/src/gc.rs
@@ -18,80 +18,106 @@
//! optionally dispose of sensitive key material appropriately, and then delete
//! the key entry from the database.
-use crate::globals::{get_keymint_dev_by_uuid, DB};
-use crate::{error::map_km_error, globals::ASYNC_TASK};
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::IKeyMintDevice::IKeyMintDevice;
-use android_hardware_security_keymint::binder::Strong;
-use anyhow::Result;
+use crate::{
+ async_task,
+ database::{KeystoreDB, Uuid},
+ super_key::SuperKeyManager,
+};
+use anyhow::{Context, Result};
+use async_task::AsyncTask;
+use std::sync::Arc;
-#[derive(Clone, Copy)]
pub struct Gc {
- remaining_tries: u32,
+ async_task: Arc<AsyncTask>,
}
impl Gc {
- const MAX_ERROR_RETRIES: u32 = 3u32;
+ /// Creates a garbage collector using the given async_task.
+ /// The garbage collector needs a function to invalidate key blobs and a database connection.
+ /// Both are obtained from the init function. The function is only called if this is first
+ /// time a garbage collector was initialized with the given AsyncTask instance.
+ pub fn new_init_with<F>(async_task: Arc<AsyncTask>, init: F) -> Self
+ where
+ F: FnOnce() -> (
+ Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
+ KeystoreDB,
+ Arc<SuperKeyManager>,
+ ) + Send
+ + 'static,
+ {
+ let weak_at = Arc::downgrade(&async_task);
+ // Initialize the task's shelf.
+ async_task.queue_hi(move |shelf| {
+ let (invalidate_key, db, super_key) = init();
+ shelf.get_or_put_with(|| GcInternal {
+ blob_id_to_delete: None,
+ invalidate_key,
+ db,
+ async_task: weak_at,
+ super_key,
+ });
+ });
+ Self { async_task }
+ }
- /// Attempts to process one unreferenced key from the database.
- /// Returns Ok(true) if a key was deleted and Ok(false) if there were no more keys to process.
+ /// Notifies the key garbage collector to iterate through orphaned and superseded blobs and
+ /// attempts their deletion. We only process one key at a time and then schedule another
+ /// attempt by queueing it in the async_task (low priority) queue.
+ pub fn notify_gc(&self) {
+ self.async_task.queue_lo(|shelf| shelf.get_downcast_mut::<GcInternal>().unwrap().step())
+ }
+}
+
+struct GcInternal {
+ blob_id_to_delete: Option<i64>,
+ invalidate_key: Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
+ db: KeystoreDB,
+ async_task: std::sync::Weak<AsyncTask>,
+ super_key: Arc<SuperKeyManager>,
+}
+
+impl GcInternal {
+ /// Attempts to process one blob from the database.
/// We process one key at a time, because deleting a key is a time consuming process which
/// may involve calling into the KeyMint backend and we don't want to hog neither the backend
/// nor the database for extended periods of time.
- fn process_one_key() -> Result<bool> {
- DB.with(|db| {
- let mut db = db.borrow_mut();
- if let Some((key_id, mut key_entry)) = db.get_unreferenced_key()? {
- if let Some(blob) = key_entry.take_km_blob() {
- let km_dev: Strong<dyn IKeyMintDevice> =
- get_keymint_dev_by_uuid(key_entry.km_uuid())
- .map(|(dev, _)| dev)?
- .get_interface()?;
- if let Err(e) = map_km_error(km_dev.deleteKey(&blob)) {
- // Log but ignore error.
- log::error!("Error trying to delete key. {:?}", e);
- }
- }
- db.purge_key_entry(key_id)?;
- return Ok(true);
- }
- Ok(false)
- })
- }
+ fn process_one_key(&mut self) -> Result<()> {
+ if let Some((blob_id, blob, blob_metadata)) = self
+ .db
+ .handle_next_superseded_blob(self.blob_id_to_delete.take())
+ .context("In process_one_key: Trying to handle superseded blob.")?
+ {
+ // Set the blob_id as the next to be deleted blob. So it will be
+ // removed from the database regardless of whether the following
+ // succeeds or not.
+ self.blob_id_to_delete = Some(blob_id);
- /// Processes one key and then schedules another attempt until it runs out of tries or keys
- /// to delete.
- fn process_all(mut self) {
- match Self::process_one_key() {
- // We successfully removed a key.
- Ok(true) => self.remaining_tries = Self::MAX_ERROR_RETRIES,
- // There were no more keys to remove. We may exit.
- Ok(false) => self.remaining_tries = 0,
- // An error occurred. We retry in case the error was transient, but
- // we also count down the number of tries so that we don't spin
- // indefinitely.
- Err(e) => {
- self.remaining_tries -= 1;
- log::error!(
- concat!(
- "Failed to delete key. Retrying in case this error was transient. ",
- "(Tries remaining {}) {:?}"
- ),
- self.remaining_tries,
- e
- )
+ // If the key has a km_uuid we try to get the corresponding device
+ // and delete the key, unwrapping if necessary and possible.
+ // (At this time keys may get deleted without having the super encryption
+ // key in this case we can only delete the key from the database.)
+ if let Some(uuid) = blob_metadata.km_uuid() {
+ let blob = self
+ .super_key
+ .unwrap_key_if_required(&blob_metadata, &blob)
+ .context("In process_one_key: Trying to unwrap to-be-deleted blob.")?;
+ (self.invalidate_key)(&uuid, &*blob)
+ .context("In process_one_key: Trying to invalidate key.")?;
}
}
- if self.remaining_tries != 0 {
- ASYNC_TASK.queue_lo(move || {
- self.process_all();
- })
- }
+ Ok(())
}
- /// Notifies the key garbage collector to iterate through unreferenced keys and attempt
- /// their deletion. We only process one key at a time and then schedule another
- /// attempt by queueing it in the async_task (low priority) queue.
- pub fn notify_gc() {
- ASYNC_TASK.queue_lo(|| Self { remaining_tries: Self::MAX_ERROR_RETRIES }.process_all())
+ /// Processes one key and then schedules another attempt until it runs out of blobs to delete.
+ fn step(&mut self) {
+ if let Err(e) = self.process_one_key() {
+ log::error!("Error trying to delete blob entry. {:?}", e);
+ }
+ // Schedule the next step. This gives high priority requests a chance to interleave.
+ if self.blob_id_to_delete.is_some() {
+ if let Some(at) = self.async_task.upgrade() {
+ at.queue_lo(move |shelf| shelf.get_downcast_mut::<GcInternal>().unwrap().step());
+ }
+ }
}
}
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 2afabbe..8cc0106 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -18,6 +18,7 @@
use crate::gc::Gc;
use crate::legacy_blob::LegacyBlobLoader;
+use crate::legacy_migrator::LegacyMigrator;
use crate::super_key::SuperKeyManager;
use crate::utils::Asp;
use crate::{async_task::AsyncTask, database::MonotonicRawTime};
@@ -28,13 +29,14 @@
};
use crate::{enforcements::Enforcements, error::map_km_error};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- KeyMintHardwareInfo::KeyMintHardwareInfo, SecurityLevel::SecurityLevel,
+ IKeyMintDevice::IKeyMintDevice, KeyMintHardwareInfo::KeyMintHardwareInfo,
+ SecurityLevel::SecurityLevel,
};
use android_hardware_security_keymint::binder::{StatusCode, Strong};
use android_security_compat::aidl::android::security::compat::IKeystoreCompatService::IKeystoreCompatService;
use anyhow::{Context, Result};
use lazy_static::lazy_static;
-use std::sync::Mutex;
+use std::sync::{Arc, Mutex};
use std::{cell::RefCell, sync::Once};
use std::{collections::HashMap, path::Path, path::PathBuf};
@@ -43,10 +45,29 @@
/// Open a connection to the Keystore 2.0 database. This is called during the initialization of
/// the thread local DB field. It should never be called directly. The first time this is called
/// we also call KeystoreDB::cleanup_leftovers to restore the key lifecycle invariant. See the
-/// documentation of cleanup_leftovers for more details.
-fn create_thread_local_db() -> KeystoreDB {
- let mut db = KeystoreDB::new(&DB_PATH.lock().expect("Could not get the database directory."))
- .expect("Failed to open database.");
+/// documentation of cleanup_leftovers for more details. The function also constructs a blob
+/// garbage collector. The initializing closure constructs another database connection without
+/// a gc. Although one GC is created for each thread local database connection, this closure
+/// is run only once, as long as the ASYNC_TASK instance is the same. So only one additional
+/// database connection is created for the garbage collector worker.
+pub fn create_thread_local_db() -> KeystoreDB {
+ let gc = Gc::new_init_with(ASYNC_TASK.clone(), || {
+ (
+ Box::new(|uuid, blob| {
+ let km_dev: Strong<dyn IKeyMintDevice> =
+ get_keymint_dev_by_uuid(uuid).map(|(dev, _)| dev)?.get_interface()?;
+ map_km_error(km_dev.deleteKey(&*blob))
+ .context("In invalidate key closure: Trying to invalidate key blob.")
+ }),
+ KeystoreDB::new(&DB_PATH.lock().expect("Could not get the database directory."), None)
+ .expect("Failed to open database."),
+ SUPER_KEY.clone(),
+ )
+ });
+
+ let mut db =
+ KeystoreDB::new(&DB_PATH.lock().expect("Could not get the database directory."), Some(gc))
+ .expect("Failed to open database.");
DB_INIT.call_once(|| {
log::info!("Touching Keystore 2.0 database for this first time since boot.");
db.insert_last_off_body(MonotonicRawTime::now())
@@ -62,7 +83,6 @@
n
);
}
- Gc::notify_gc();
});
db
}
@@ -113,20 +133,23 @@
pub static ref DB_PATH: Mutex<PathBuf> = Mutex::new(
Path::new("/data/misc/keystore").to_path_buf());
/// Runtime database of unwrapped super keys.
- pub static ref SUPER_KEY: SuperKeyManager = Default::default();
+ pub static ref SUPER_KEY: Arc<SuperKeyManager> = Default::default();
/// Map of KeyMint devices.
static ref KEY_MINT_DEVICES: Mutex<DevicesMap> = Default::default();
/// Timestamp service.
static ref TIME_STAMP_DEVICE: Mutex<Option<Asp>> = Default::default();
/// A single on-demand worker thread that handles deferred tasks with two different
/// priorities.
- pub static ref ASYNC_TASK: AsyncTask = Default::default();
+ pub static ref ASYNC_TASK: Arc<AsyncTask> = Default::default();
/// Singleton for enforcements.
pub static ref ENFORCEMENTS: Enforcements = Enforcements::new();
/// LegacyBlobLoader is initialized and exists globally.
/// The same directory used by the database is used by the LegacyBlobLoader as well.
- pub static ref LEGACY_BLOB_LOADER: LegacyBlobLoader = LegacyBlobLoader::new(
- &DB_PATH.lock().expect("Could not get the database path for legacy blob loader."));
+ pub static ref LEGACY_BLOB_LOADER: Arc<LegacyBlobLoader> = Arc::new(LegacyBlobLoader::new(
+ &DB_PATH.lock().expect("Could not get the database path for legacy blob loader.")));
+ /// Legacy migrator. Atomically migrates legacy blobs to the database.
+ pub static ref LEGACY_MIGRATOR: Arc<LegacyMigrator> =
+ Arc::new(LegacyMigrator::new(ASYNC_TASK.clone()));
}
static KEYMINT_SERVICE_NAME: &str = "android.hardware.security.keymint.IKeyMintDevice";
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index 75475e1..30e3e22 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -18,12 +18,14 @@
use keystore2::authorization::AuthorizationManager;
use keystore2::globals::ENFORCEMENTS;
use keystore2::service::KeystoreService;
+use keystore2::user_manager::UserManager;
use log::{error, info};
use std::{panic, path::Path, sync::mpsc::channel};
static KS2_SERVICE_NAME: &str = "android.system.keystore2";
static APC_SERVICE_NAME: &str = "android.security.apc";
static AUTHORIZATION_SERVICE_NAME: &str = "android.security.authorization";
+static USER_MANAGER_SERVICE_NAME: &str = "android.security.usermanager";
/// Keystore 2.0 takes one argument which is a path indicating its designated working directory.
fn main() {
@@ -87,6 +89,15 @@
panic!("Failed to register service {} because of {:?}.", AUTHORIZATION_SERVICE_NAME, e);
});
+ let usermanager_service = UserManager::new_native_binder().unwrap_or_else(|e| {
+ panic!("Failed to create service {} because of {:?}.", USER_MANAGER_SERVICE_NAME, e);
+ });
+ binder::add_service(USER_MANAGER_SERVICE_NAME, usermanager_service.as_binder()).unwrap_or_else(
+ |e| {
+ panic!("Failed to register service {} because of {:?}.", USER_MANAGER_SERVICE_NAME, e);
+ },
+ );
+
info!("Successfully registered Keystore 2.0 service.");
info!("Joining thread pool now.");
diff --git a/keystore2/src/km_compat/Android.bp b/keystore2/src/km_compat/Android.bp
index 2180935..fa214a7 100644
--- a/keystore2/src/km_compat/Android.bp
+++ b/keystore2/src/km_compat/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
rust_library {
name: "libkeystore2_km_compat",
crate_name: "keystore2_km_compat",
diff --git a/keystore2/src/km_compat/certificate_test.cpp b/keystore2/src/km_compat/certificate_test.cpp
index d6bece7..de1d60a 100644
--- a/keystore2/src/km_compat/certificate_test.cpp
+++ b/keystore2/src/km_compat/certificate_test.cpp
@@ -50,7 +50,7 @@
return ScopedAStatus::fromStatus(STATUS_NAME_NOT_FOUND);
}
KeyCreationResult creationResult;
- auto status = device->generateKey(keyParams, &creationResult);
+ auto status = device->generateKey(keyParams, std::nullopt /* attest_key */, &creationResult);
if (!status.isOk()) {
return status;
}
diff --git a/keystore2/src/km_compat/km_compat.cpp b/keystore2/src/km_compat/km_compat.cpp
index 429a038..812f513 100644
--- a/keystore2/src/km_compat/km_compat.cpp
+++ b/keystore2/src/km_compat/km_compat.cpp
@@ -333,8 +333,10 @@
return convertErrorCode(result);
}
-ScopedAStatus KeyMintDevice::generateKey(const std::vector<KeyParameter>& inKeyParams,
- KeyCreationResult* out_creationResult) {
+ScopedAStatus
+KeyMintDevice::generateKey(const std::vector<KeyParameter>& inKeyParams,
+ const std::optional<AttestationKey>& /* in_attestationKey */,
+ KeyCreationResult* out_creationResult) {
auto legacyKeyGenParams = convertKeyParametersToLegacy(extractGenerationParams(inKeyParams));
KMV1::ErrorCode errorCode;
auto result = mDevice->generateKey(
@@ -368,6 +370,7 @@
ScopedAStatus KeyMintDevice::importKey(const std::vector<KeyParameter>& inKeyParams,
KeyFormat in_inKeyFormat,
const std::vector<uint8_t>& in_inKeyData,
+ const std::optional<AttestationKey>& /* in_attestationKey */,
KeyCreationResult* out_creationResult) {
auto legacyKeyGENParams = convertKeyParametersToLegacy(extractGenerationParams(inKeyParams));
auto legacyKeyFormat = convertKeyFormatToLegacy(in_inKeyFormat);
@@ -401,11 +404,13 @@
return convertErrorCode(errorCode);
}
-ScopedAStatus KeyMintDevice::importWrappedKey(
- const std::vector<uint8_t>& in_inWrappedKeyData,
- const std::vector<uint8_t>& in_inWrappingKeyBlob, const std::vector<uint8_t>& in_inMaskingKey,
- const std::vector<KeyParameter>& in_inUnwrappingParams, int64_t in_inPasswordSid,
- int64_t in_inBiometricSid, KeyCreationResult* out_creationResult) {
+ScopedAStatus
+KeyMintDevice::importWrappedKey(const std::vector<uint8_t>& in_inWrappedKeyData,
+ const std::vector<uint8_t>& in_inWrappingKeyBlob, //
+ const std::vector<uint8_t>& in_inMaskingKey,
+ const std::vector<KeyParameter>& in_inUnwrappingParams,
+ int64_t in_inPasswordSid, int64_t in_inBiometricSid,
+ KeyCreationResult* out_creationResult) {
auto legacyUnwrappingParams = convertKeyParametersToLegacy(in_inUnwrappingParams);
KMV1::ErrorCode errorCode;
auto result = mDevice->importWrappedKey(
@@ -500,6 +505,29 @@
return convertErrorCode(errorCode);
}
+ScopedAStatus KeyMintDevice::deviceLocked(bool passwordOnly,
+ const std::optional<TimeStampToken>& timestampToken) {
+ V4_0_VerificationToken token;
+ if (timestampToken.has_value()) {
+ token = convertTimestampTokenToLegacy(timestampToken.value());
+ }
+ auto ret = mDevice->deviceLocked(passwordOnly, token);
+ if (!ret.isOk()) {
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
+ } else {
+ return convertErrorCode(KMV1::ErrorCode::OK);
+ }
+}
+
+ScopedAStatus KeyMintDevice::earlyBootEnded() {
+ auto ret = mDevice->earlyBootEnded();
+ if (!ret.isOk()) {
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
+ } else {
+ return convertErrorCode(KMV1::ErrorCode::OK);
+ }
+}
+
ScopedAStatus KeyMintOperation::update(const std::optional<KeyParameterArray>& in_inParams,
const std::optional<std::vector<uint8_t>>& in_input,
const std::optional<HardwareAuthToken>& in_inAuthToken,
diff --git a/keystore2/src/km_compat/km_compat.h b/keystore2/src/km_compat/km_compat.h
index 5637b58..57a7bbd 100644
--- a/keystore2/src/km_compat/km_compat.h
+++ b/keystore2/src/km_compat/km_compat.h
@@ -28,6 +28,7 @@
#include "certificate_utils.h"
+using ::aidl::android::hardware::security::keymint::AttestationKey;
using ::aidl::android::hardware::security::keymint::BeginResult;
using ::aidl::android::hardware::security::keymint::ByteArray;
using ::aidl::android::hardware::security::keymint::Certificate;
@@ -90,9 +91,11 @@
ScopedAStatus getHardwareInfo(KeyMintHardwareInfo* _aidl_return) override;
ScopedAStatus addRngEntropy(const std::vector<uint8_t>& in_data) override;
ScopedAStatus generateKey(const std::vector<KeyParameter>& in_keyParams,
+ const std::optional<AttestationKey>& in_attestationKey,
KeyCreationResult* out_creationResult) override;
ScopedAStatus importKey(const std::vector<KeyParameter>& in_inKeyParams,
KeyFormat in_inKeyFormat, const std::vector<uint8_t>& in_inKeyData,
+ const std::optional<AttestationKey>& in_attestationKey,
KeyCreationResult* out_creationResult) override;
ScopedAStatus importWrappedKey(const std::vector<uint8_t>& in_inWrappedKeyData,
const std::vector<uint8_t>& in_inWrappingKeyBlob,
@@ -110,7 +113,9 @@
const std::vector<KeyParameter>& in_inParams,
const HardwareAuthToken& in_inAuthToken,
BeginResult* _aidl_return) override;
-
+ ScopedAStatus deviceLocked(bool passwordOnly,
+ const std::optional<TimeStampToken>& timestampToken) override;
+ ScopedAStatus earlyBootEnded() override;
// These are public to allow testing code to use them directly.
// This class should not be used publicly anyway.
diff --git a/keystore2/src/km_compat/lib.rs b/keystore2/src/km_compat/lib.rs
index eca0a85..9be329f 100644
--- a/keystore2/src/km_compat/lib.rs
+++ b/keystore2/src/km_compat/lib.rs
@@ -71,7 +71,8 @@
// TODO: If I only need the key itself, don't return the other things.
fn generate_key(legacy: &dyn IKeyMintDevice, kps: Vec<KeyParameter>) -> KeyCreationResult {
- let creation_result = legacy.generateKey(&kps).expect("Failed to generate key");
+ let creation_result =
+ legacy.generateKey(&kps, None /* attest_key */).expect("Failed to generate key");
assert_ne!(creation_result.keyBlob.len(), 0);
creation_result
}
@@ -163,7 +164,8 @@
}];
let kf = KeyFormat::RAW;
let kd = [0; 16];
- let creation_result = legacy.importKey(&kps, kf, &kd).expect("Failed to import key");
+ let creation_result =
+ legacy.importKey(&kps, kf, &kd, None /* attest_key */).expect("Failed to import key");
assert_ne!(creation_result.keyBlob.len(), 0);
assert_eq!(creation_result.certificateChain.len(), 0);
}
diff --git a/keystore2/src/km_compat/slot_test.cpp b/keystore2/src/km_compat/slot_test.cpp
index 0859ddf..72d944c 100644
--- a/keystore2/src/km_compat/slot_test.cpp
+++ b/keystore2/src/km_compat/slot_test.cpp
@@ -47,7 +47,7 @@
KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::DECRYPT),
});
KeyCreationResult creationResult;
- auto status = device->generateKey(keyParams, &creationResult);
+ auto status = device->generateKey(keyParams, std::nullopt /* attest_key */, &creationResult);
if (!status.isOk()) {
return {};
}
@@ -150,13 +150,13 @@
KMV1::makeKeyParameter(KMV1::TAG_NO_AUTH_REQUIRED, true),
});
KeyCreationResult creationResult;
- status = device->generateKey(kps, &creationResult);
+ status = device->generateKey(kps, std::nullopt /* attest_key */, &creationResult);
ASSERT_TRUE(!status.isOk());
ASSERT_EQ(status.getServiceSpecificError(),
static_cast<int32_t>(ErrorCode::TOO_MANY_OPERATIONS));
// But generating a certificate with signCert does not use a slot.
kps.pop_back();
- status = device->generateKey(kps, &creationResult);
+ status = device->generateKey(kps, std::nullopt /* attest_key */, &creationResult);
ASSERT_TRUE(status.isOk());
// Destructing operations should free up their slots.
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 230a82c..1981022 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -17,7 +17,6 @@
//! This module implements methods to load legacy keystore key blob files.
use crate::{
- database::KeyMetaData,
error::{Error as KsError, ResponseCode},
key_parameter::{KeyParameter, KeyParameterValue},
super_key::SuperKeyManager,
@@ -28,8 +27,11 @@
};
use anyhow::{Context, Result};
use keystore2_crypto::{aes_gcm_decrypt, derive_key_from_password, ZVec};
-use std::io::{ErrorKind, Read};
use std::{convert::TryInto, fs::File, path::Path, path::PathBuf};
+use std::{
+ fs,
+ io::{ErrorKind, Read, Result as IoResult},
+};
const SUPPORTED_LEGACY_BLOB_VERSION: u8 = 3;
@@ -231,6 +233,7 @@
pub fn new(path: &Path) -> Self {
Self { path: path.to_owned() }
}
+
/// Encodes an alias string as ascii character sequence in the range
/// ['+' .. '.'] and ['0' .. '~'].
/// Bytes with values in the range ['0' .. '~'] are represented as they are.
@@ -587,7 +590,7 @@
let sw_list = Self::read_key_parameters(&mut stream)
.context("In read_characteristics_file.")?
.into_iter()
- .map(|value| KeyParameter::new(value, SecurityLevel::SOFTWARE));
+ .map(|value| KeyParameter::new(value, SecurityLevel::KEYSTORE));
Ok(hw_list.into_iter().flatten().chain(sw_list).collect())
}
@@ -600,7 +603,7 @@
// used this for user installed certificates without private key material.
fn read_km_blob_file(&self, uid: u32, alias: &str) -> Result<Option<(Blob, String)>> {
- let mut iter = ["USRPKEY", "USERSKEY"].iter();
+ let mut iter = ["USRPKEY", "USRSKEY"].iter();
let (blob, prefix) = loop {
if let Some(prefix) = iter.next() {
@@ -619,7 +622,7 @@
}
fn read_generic_blob(path: &Path) -> Result<Option<Blob>> {
- let mut file = match File::open(path) {
+ let mut file = match Self::with_retry_interrupted(|| File::open(path)) {
Ok(file) => file,
Err(e) => match e.kind() {
ErrorKind::NotFound => return Ok(None),
@@ -633,47 +636,215 @@
/// This function constructs the blob file name which has the form:
/// user_<android user id>/<uid>_<alias>.
fn make_blob_filename(&self, uid: u32, alias: &str, prefix: &str) -> PathBuf {
- let mut path = self.path.clone();
let user_id = uid_to_android_user(uid);
let encoded_alias = Self::encode_alias(&format!("{}_{}", prefix, alias));
- path.push(format!("user_{}", user_id));
+ let mut path = self.make_user_path_name(user_id);
path.push(format!("{}_{}", uid, encoded_alias));
path
}
/// This function constructs the characteristics file name which has the form:
- /// user_<android user id>/.<uid>_chr_<alias>.
+ /// user_<android user id>/.<uid>_chr_<prefix>_<alias>.
fn make_chr_filename(&self, uid: u32, alias: &str, prefix: &str) -> PathBuf {
- let mut path = self.path.clone();
let user_id = uid_to_android_user(uid);
let encoded_alias = Self::encode_alias(&format!("{}_{}", prefix, alias));
- path.push(format!("user_{}", user_id));
+ let mut path = self.make_user_path_name(user_id);
path.push(format!(".{}_chr_{}", uid, encoded_alias));
path
}
- fn load_by_uid_alias(
+ fn make_super_key_filename(&self, user_id: u32) -> PathBuf {
+ let mut path = self.make_user_path_name(user_id);
+ path.push(".masterkey");
+ path
+ }
+
+ fn make_user_path_name(&self, user_id: u32) -> PathBuf {
+ let mut path = self.path.clone();
+ path.push(&format!("user_{}", user_id));
+ path
+ }
+
+ /// Returns if the legacy blob database is empty, i.e., there are no entries matching "user_*"
+ /// in the database dir.
+ pub fn is_empty(&self) -> Result<bool> {
+ let dir = Self::with_retry_interrupted(|| fs::read_dir(self.path.as_path()))
+ .context("In is_empty: Failed to open legacy blob database.")?;
+ for entry in dir {
+ if (*entry.context("In is_empty: Trying to access dir entry")?.file_name())
+ .to_str()
+ .map_or(false, |f| f.starts_with("user_"))
+ {
+ return Ok(false);
+ }
+ }
+ Ok(true)
+ }
+
+ /// Returns if the legacy blob database is empty for a given user, i.e., there are no entries
+ /// matching "user_*" in the database dir.
+ pub fn is_empty_user(&self, user_id: u32) -> Result<bool> {
+ let mut user_path = self.path.clone();
+ user_path.push(format!("user_{}", user_id));
+ if !user_path.as_path().is_dir() {
+ return Ok(true);
+ }
+ Ok(Self::with_retry_interrupted(|| user_path.read_dir())
+ .context("In is_empty_user: Failed to open legacy user dir.")?
+ .next()
+ .is_none())
+ }
+
+ fn extract_alias(encoded_alias: &str) -> Option<String> {
+ // We can check the encoded alias because the prefixes we are interested
+ // in are all in the printable range that don't get mangled.
+ for prefix in &["USRPKEY_", "USRSKEY_", "USRCERT_", "CACERT_"] {
+ if let Some(alias) = encoded_alias.strip_prefix(prefix) {
+ return Self::decode_alias(&alias).ok();
+ }
+ }
+ None
+ }
+
+ /// List all entries for a given user. The strings are unchanged file names, i.e.,
+ /// encoded with UID prefix.
+ fn list_user(&self, user_id: u32) -> Result<Vec<String>> {
+ let path = self.make_user_path_name(user_id);
+ let dir =
+ Self::with_retry_interrupted(|| fs::read_dir(path.as_path())).with_context(|| {
+ format!("In list_user: Failed to open legacy blob database. {:?}", path)
+ })?;
+ let mut result: Vec<String> = Vec::new();
+ for entry in dir {
+ let file_name = entry.context("In list_user: Trying to access dir entry")?.file_name();
+ if let Some(f) = file_name.to_str() {
+ result.push(f.to_string())
+ }
+ }
+ Ok(result)
+ }
+
+ /// List all keystore entries belonging to the given uid.
+ pub fn list_keystore_entries_for_uid(&self, uid: u32) -> Result<Vec<String>> {
+ let user_id = uid_to_android_user(uid);
+
+ let user_entries = self
+ .list_user(user_id)
+ .context("In list_keystore_entries_for_uid: Trying to list user.")?;
+
+ let uid_str = format!("{}_", uid);
+
+ let mut result: Vec<String> = user_entries
+ .into_iter()
+ .filter_map(|v| {
+ if !v.starts_with(&uid_str) {
+ return None;
+ }
+ let encoded_alias = &v[uid_str.len()..];
+ Self::extract_alias(encoded_alias)
+ })
+ .collect();
+
+ result.sort_unstable();
+ result.dedup();
+ Ok(result)
+ }
+
+ fn with_retry_interrupted<F, T>(f: F) -> IoResult<T>
+ where
+ F: Fn() -> IoResult<T>,
+ {
+ loop {
+ match f() {
+ Ok(v) => return Ok(v),
+ Err(e) => match e.kind() {
+ ErrorKind::Interrupted => continue,
+ _ => return Err(e),
+ },
+ }
+ }
+ }
+
+ /// Deletes a keystore entry. Also removes the user_<uid> directory on the
+ /// last migration.
+ pub fn remove_keystore_entry(&self, uid: u32, alias: &str) -> Result<bool> {
+ let mut something_was_deleted = false;
+ let prefixes = ["USRPKEY", "USRSKEY"];
+ for prefix in &prefixes {
+ let path = self.make_blob_filename(uid, alias, prefix);
+ if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
+ match e.kind() {
+ // Only a subset of keys are expected.
+ ErrorKind::NotFound => continue,
+ // Log error but ignore.
+ _ => log::error!("Error while deleting key blob entries. {:?}", e),
+ }
+ }
+ let path = self.make_chr_filename(uid, alias, prefix);
+ if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
+ match e.kind() {
+ ErrorKind::NotFound => {
+ log::info!("No characteristics file found for legacy key blob.")
+ }
+ // Log error but ignore.
+ _ => log::error!("Error while deleting key blob entries. {:?}", e),
+ }
+ }
+ something_was_deleted = true;
+ // Only one of USRPKEY and USRSKEY can be present. So we can end the loop
+ // if we reach this point.
+ break;
+ }
+
+ let prefixes = ["USRCERT", "CACERT"];
+ for prefix in &prefixes {
+ let path = self.make_blob_filename(uid, alias, prefix);
+ if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
+ match e.kind() {
+ // USRCERT and CACERT are optional either or both may or may not be present.
+ ErrorKind::NotFound => continue,
+ // Log error but ignore.
+ _ => log::error!("Error while deleting key blob entries. {:?}", e),
+ }
+ something_was_deleted = true;
+ }
+ }
+
+ if something_was_deleted {
+ let user_id = uid_to_android_user(uid);
+ if self
+ .is_empty_user(user_id)
+ .context("In remove_keystore_entry: Trying to check for empty user dir.")?
+ {
+ let user_path = self.make_user_path_name(user_id);
+ Self::with_retry_interrupted(|| fs::remove_dir(user_path.as_path())).ok();
+ }
+ }
+
+ Ok(something_was_deleted)
+ }
+
+ /// Load a legacy key blob entry by uid and alias.
+ pub fn load_by_uid_alias(
&self,
uid: u32,
alias: &str,
- key_manager: &SuperKeyManager,
- ) -> Result<(Option<(Blob, Vec<KeyParameter>)>, Option<Vec<u8>>, Option<Vec<u8>>, KeyMetaData)>
- {
- let metadata = KeyMetaData::new();
-
+ key_manager: Option<&SuperKeyManager>,
+ ) -> Result<(Option<(Blob, Vec<KeyParameter>)>, Option<Vec<u8>>, Option<Vec<u8>>)> {
let km_blob = self.read_km_blob_file(uid, alias).context("In load_by_uid_alias.")?;
let km_blob = match km_blob {
Some((km_blob, prefix)) => {
- let km_blob =
- match km_blob {
- Blob { flags: _, value: BlobValue::Decrypted(_) } => km_blob,
- // Unwrap the key blob if required.
- Blob { flags, value: BlobValue::Encrypted { iv, tag, data } } => {
+ let km_blob = match km_blob {
+ Blob { flags: _, value: BlobValue::Decrypted(_) } => km_blob,
+ // Unwrap the key blob if required and if we have key_manager.
+ Blob { flags, value: BlobValue::Encrypted { ref iv, ref tag, ref data } } => {
+ if let Some(key_manager) = key_manager {
let decrypted = match key_manager
.get_per_boot_key_by_user_id(uid_to_android_user(uid))
{
- Some(key) => aes_gcm_decrypt(&data, &iv, &tag, &key).context(
+ Some(key) => aes_gcm_decrypt(data, iv, tag, &(key.get_key()))
+ .context(
"In load_by_uid_alias: while trying to decrypt legacy blob.",
)?,
None => {
@@ -687,11 +858,16 @@
}
};
Blob { flags, value: BlobValue::Decrypted(decrypted) }
+ } else {
+ km_blob
}
- _ => return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
+ }
+ _ => {
+ return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
"In load_by_uid_alias: Found wrong blob type in legacy key blob file.",
- ),
- };
+ )
+ }
+ };
let hw_sec_level = match km_blob.is_strongbox() {
true => SecurityLevel::STRONGBOX,
@@ -729,14 +905,17 @@
}
};
- Ok((km_blob, user_cert, ca_cert, metadata))
+ Ok((km_blob, user_cert, ca_cert))
+ }
+
+ /// Returns true if the given user has a super key.
+ pub fn has_super_key(&self, user_id: u32) -> bool {
+ self.make_super_key_filename(user_id).is_file()
}
/// Load and decrypt legacy super key blob.
pub fn load_super_key(&self, user_id: u32, pw: &[u8]) -> Result<Option<ZVec>> {
- let mut path = self.path.clone();
- path.push(&format!("user_{}", user_id));
- path.push(".masterkey");
+ let path = self.make_super_key_filename(user_id);
let blob = Self::read_generic_blob(&path)
.context("In load_super_key: While loading super key.")?;
@@ -763,6 +942,18 @@
Ok(blob)
}
+
+ /// Removes the super key for the given user from the legacy database.
+ /// If this was the last entry in the user's database, this function removes
+ /// the user_<uid> directory as well.
+ pub fn remove_super_key(&self, user_id: u32) {
+ let path = self.make_super_key_filename(user_id);
+ Self::with_retry_interrupted(|| fs::remove_file(path.as_path())).ok();
+ if self.is_empty_user(user_id).ok().unwrap_or(false) {
+ let path = self.make_user_path_name(user_id);
+ Self::with_retry_interrupted(|| fs::remove_dir(path.as_path())).ok();
+ }
+ }
}
#[cfg(test)]
@@ -897,6 +1088,37 @@
}
#[test]
+ fn test_is_empty() {
+ let temp_dir = TempDir::new("test_is_empty").expect("Failed to create temp dir.");
+ let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
+
+ assert!(legacy_blob_loader.is_empty().expect("Should succeed and be empty."));
+
+ let _db = crate::database::KeystoreDB::new(temp_dir.path(), None)
+ .expect("Failed to open database.");
+
+ assert!(legacy_blob_loader.is_empty().expect("Should succeed and still be empty."));
+
+ std::fs::create_dir(&*temp_dir.build().push("user_0")).expect("Failed to create user_0.");
+
+ assert!(!legacy_blob_loader.is_empty().expect("Should succeed but not be empty."));
+
+ std::fs::create_dir(&*temp_dir.build().push("user_10")).expect("Failed to create user_10.");
+
+ assert!(!legacy_blob_loader.is_empty().expect("Should succeed but still not be empty."));
+
+ std::fs::remove_dir_all(&*temp_dir.build().push("user_0"))
+ .expect("Failed to remove user_0.");
+
+ assert!(!legacy_blob_loader.is_empty().expect("Should succeed but still not be empty."));
+
+ std::fs::remove_dir_all(&*temp_dir.build().push("user_10"))
+ .expect("Failed to remove user_10.");
+
+ assert!(legacy_blob_loader.is_empty().expect("Should succeed and be empty again."));
+ }
+
+ #[test]
fn test_legacy_blobs() -> anyhow::Result<()> {
let temp_dir = TempDir::new("legacy_blob_test")?;
std::fs::create_dir(&*temp_dir.build().push("user_0"))?;
@@ -938,32 +1160,32 @@
)?;
let key_manager = crate::super_key::SuperKeyManager::new();
- let mut db = crate::database::KeystoreDB::new(temp_dir.path())?;
+ let mut db = crate::database::KeystoreDB::new(temp_dir.path(), None)?;
let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
assert_eq!(
legacy_blob_loader
- .load_by_uid_alias(10223, "authbound", &key_manager)
+ .load_by_uid_alias(10223, "authbound", Some(&key_manager))
.unwrap_err()
.root_cause()
.downcast_ref::<error::Error>(),
Some(&error::Error::Rc(ResponseCode::LOCKED))
);
- key_manager.unlock_user_key(0, PASSWORD, &mut db, &legacy_blob_loader)?;
+ key_manager.unlock_user_key(&mut db, 0, PASSWORD, &legacy_blob_loader)?;
- if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain), _kp) =
- legacy_blob_loader.load_by_uid_alias(10223, "authbound", &key_manager)?
+ if let (Some((Blob { flags, value: _ }, _params)), Some(cert), Some(chain)) =
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
{
assert_eq!(flags, 4);
- assert_eq!(value, BlobValue::Decrypted(DECRYPTED_USRPKEY_AUTHBOUND.try_into()?));
+ //assert_eq!(value, BlobValue::Encrypted(..));
assert_eq!(&cert[..], LOADED_CERT_AUTHBOUND);
assert_eq!(&chain[..], LOADED_CACERT_AUTHBOUND);
} else {
panic!("");
}
- if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain), _kp) =
- legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", &key_manager)?
+ if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain)) =
+ legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", Some(&key_manager))?
{
assert_eq!(flags, 0);
assert_eq!(value, BlobValue::Decrypted(LOADED_USRPKEY_NON_AUTHBOUND.try_into()?));
@@ -973,6 +1195,33 @@
panic!("");
}
+ legacy_blob_loader.remove_keystore_entry(10223, "authbound").expect("This should succeed.");
+ legacy_blob_loader
+ .remove_keystore_entry(10223, "non_authbound")
+ .expect("This should succeed.");
+
+ assert_eq!(
+ (None, None, None),
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
+ );
+ assert_eq!(
+ (None, None, None),
+ legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", Some(&key_manager))?
+ );
+
+ // The database should not be empty due to the super key.
+ assert!(!legacy_blob_loader.is_empty()?);
+ assert!(!legacy_blob_loader.is_empty_user(0)?);
+
+ // The database should be considered empty for user 1.
+ assert!(legacy_blob_loader.is_empty_user(1)?);
+
+ legacy_blob_loader.remove_super_key(0);
+
+ // Now it should be empty.
+ assert!(legacy_blob_loader.is_empty_user(0)?);
+ assert!(legacy_blob_loader.is_empty()?);
+
Ok(())
}
}
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_migrator.rs
new file mode 100644
index 0000000..60c6bca
--- /dev/null
+++ b/keystore2/src/legacy_migrator.rs
@@ -0,0 +1,570 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module acts as a bridge between the legacy key database and the keystore2 database.
+
+use crate::database::{
+ BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData, KeyMetaEntry,
+ KeystoreDB, Uuid, KEYSTORE_UUID,
+};
+use crate::error::Error;
+use crate::legacy_blob::BlobValue;
+use crate::utils::uid_to_android_user;
+use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode,
+};
+use anyhow::{Context, Result};
+use core::ops::Deref;
+use keystore2_crypto::ZVec;
+use std::collections::{HashMap, HashSet};
+use std::convert::TryInto;
+use std::sync::atomic::{AtomicU8, Ordering};
+use std::sync::mpsc::channel;
+use std::sync::{Arc, Mutex};
+
+/// Represents LegacyMigrator.
+pub struct LegacyMigrator {
+ async_task: Arc<AsyncTask>,
+ initializer: Mutex<
+ Option<
+ Box<
+ dyn FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
+ + Send
+ + 'static,
+ >,
+ >,
+ >,
+ /// This atomic is used for cheap interior mutability. It is intended to prevent
+ /// expensive calls into the legacy migrator when the legacy database is empty.
+ /// When transitioning from READY to EMPTY, spurious calls may occur for a brief period
+ /// of time. This is tolerable in favor of the common case.
+ state: AtomicU8,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+struct RecentMigration {
+ uid: u32,
+ alias: String,
+}
+
+impl RecentMigration {
+ fn new(uid: u32, alias: String) -> Self {
+ Self { uid, alias }
+ }
+}
+
+struct LegacyMigratorState {
+ recently_migrated: HashSet<RecentMigration>,
+ recently_migrated_super_key: HashSet<u32>,
+ legacy_loader: Arc<LegacyBlobLoader>,
+ sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>,
+ db: KeystoreDB,
+}
+
+impl LegacyMigrator {
+ const WIFI_NAMESPACE: i64 = 102;
+ const AID_WIFI: u32 = 1010;
+
+ const STATE_UNINITIALIZED: u8 = 0;
+ const STATE_READY: u8 = 1;
+ const STATE_EMPTY: u8 = 2;
+
+ /// Constructs a new LegacyMigrator using the given AsyncTask object as migration
+ /// worker.
+ pub fn new(async_task: Arc<AsyncTask>) -> Self {
+ Self {
+ async_task,
+ initializer: Default::default(),
+ state: AtomicU8::new(Self::STATE_UNINITIALIZED),
+ }
+ }
+
+ /// The legacy migrator must be initialized deferred, because keystore starts very early.
+ /// At this time the data partition may not be mounted. So we cannot open database connections
+ /// until we get actual key load requests. This sets the function that the legacy loader
+ /// uses to connect to the database.
+ pub fn set_init<F>(&self, f_init: F) -> Result<()>
+ where
+ F: FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
+ + Send
+ + 'static,
+ {
+ let mut initializer = self.initializer.lock().expect("Failed to lock initializer.");
+
+ // If we are not uninitialized we have no business setting the initializer.
+ if self.state.load(Ordering::Relaxed) != Self::STATE_UNINITIALIZED {
+ return Ok(());
+ }
+
+ // Only set the initializer if it hasn't been set before.
+ if initializer.is_none() {
+ *initializer = Some(Box::new(f_init))
+ }
+
+ Ok(())
+ }
+
+ /// This function is called by the migration requestor to check if it is worth
+ /// making a migration request. It also transitions the state from UNINITIALIZED
+ /// to READY or EMPTY on first use. The deferred initialization is necessary, because
+ /// Keystore 2.0 runs early during boot, where data may not yet be mounted.
+ /// Returns Ok(STATE_READY) if a migration request is worth undertaking and
+ /// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader
+ /// was not initialized and cannot be initialized.
+ fn check_state(&self) -> Result<u8> {
+ let mut first_try = true;
+ loop {
+ match (self.state.load(Ordering::Relaxed), first_try) {
+ (Self::STATE_EMPTY, _) => {
+ return Ok(Self::STATE_EMPTY);
+ }
+ (Self::STATE_UNINITIALIZED, true) => {
+ // If we find the legacy loader uninitialized, we grab the initializer lock,
+ // check if the legacy database is empty, and if not, schedule an initialization
+ // request. Coming out of the initializer lock, the state is either EMPTY or
+ // READY.
+ let mut initializer = self.initializer.lock().unwrap();
+
+ if let Some(initializer) = initializer.take() {
+ let (db, sec_level_to_km_uuid, legacy_loader) = (initializer)();
+
+ if legacy_loader.is_empty().context(
+ "In check_state: Trying to check if the legacy database is empty.",
+ )? {
+ self.state.store(Self::STATE_EMPTY, Ordering::Relaxed);
+ return Ok(Self::STATE_EMPTY);
+ }
+
+ self.async_task.queue_hi(move |shelf| {
+ shelf.get_or_put_with(|| LegacyMigratorState {
+ recently_migrated: Default::default(),
+ recently_migrated_super_key: Default::default(),
+ legacy_loader,
+ sec_level_to_km_uuid,
+ db,
+ });
+ });
+
+ // It is safe to set this here even though the async task may not yet have
+ // run because any thread observing this will not be able to schedule a
+ // task that can run before the initialization.
+ // Also we can only transition out of this state while having the
+ // initializer lock and having found an initializer.
+ self.state.store(Self::STATE_READY, Ordering::Relaxed);
+ return Ok(Self::STATE_READY);
+ } else {
+ // There is a chance that we just lost the race from state.load() to
+ // grabbing the initializer mutex. If that is the case the state must
+ // be EMPTY or READY after coming out of the lock. So we can give it
+ // one more try.
+ first_try = false;
+ continue;
+ }
+ }
+ (Self::STATE_UNINITIALIZED, false) => {
+ // Okay, tough luck. The legacy loader was really completely uninitialized.
+ return Err(Error::sys()).context(
+ "In check_state: Legacy loader should not be called uninitialized.",
+ );
+ }
+ (Self::STATE_READY, _) => return Ok(Self::STATE_READY),
+ (s, _) => panic!("Unknown legacy migrator state. {} ", s),
+ }
+ }
+ }
+
+ /// List all aliases for uid in the legacy database.
+ pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
+ let uid = match (domain, namespace) {
+ (Domain::APP, namespace) => namespace as u32,
+ (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
+ _ => return Ok(Vec::new()),
+ };
+ self.do_serialized(move |state| state.list_uid(uid)).unwrap_or_else(|| Ok(Vec::new())).map(
+ |v| {
+ v.into_iter()
+ .map(|alias| KeyDescriptor {
+ domain,
+ nspace: namespace,
+ alias: Some(alias),
+ blob: None,
+ })
+ .collect()
+ },
+ )
+ }
+
+ /// Sends the given closure to the migrator thread for execution after calling check_state.
+ /// Returns None if the database was empty and the request was not executed.
+ /// Otherwise returns Some with the result produced by the migration request.
+ /// The loader state may transition to STATE_EMPTY during the execution of this function.
+ fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>>
+ where
+ F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static,
+ {
+ // Short circuit if the database is empty or not initialized (error case).
+ match self.check_state().context("In do_serialized: Checking state.") {
+ Ok(LegacyMigrator::STATE_EMPTY) => return None,
+ Ok(LegacyMigrator::STATE_READY) => {}
+ Err(e) => return Some(Err(e)),
+ Ok(s) => panic!("Unknown legacy migrator state. {} ", s),
+ }
+
+ // We have established that there may be a key in the legacy database.
+ // Now we schedule a migration request.
+ let (sender, receiver) = channel();
+ self.async_task.queue_hi(move |shelf| {
+ // Get the migrator state from the shelf.
+ // There may not be a state. This can happen if this migration request was scheduled
+ // before a previous request established that the legacy database was empty
+ // and removed the state from the shelf. Since we know now that the database
+ // is empty, we can return None here.
+ let (new_state, result) = if let Some(legacy_migrator_state) =
+ shelf.get_downcast_mut::<LegacyMigratorState>()
+ {
+ let result = f(legacy_migrator_state);
+ (legacy_migrator_state.check_empty(), Some(result))
+ } else {
+ (Self::STATE_EMPTY, None)
+ };
+
+ // If the migration request determined that the database is now empty, we discard
+ // the state from the shelf to free up the resources we won't need any longer.
+ if result.is_some() && new_state == Self::STATE_EMPTY {
+ shelf.remove_downcast_ref::<LegacyMigratorState>();
+ }
+
+ // Send the result to the requester.
+ if let Err(e) = sender.send((new_state, result)) {
+ log::error!("In do_serialized. Error in sending the result. {:?}", e);
+ }
+ });
+
+ let (new_state, result) = match receiver.recv() {
+ Err(e) => {
+ return Some(Err(e).context("In do_serialized. Failed to receive from the sender."))
+ }
+ Ok(r) => r,
+ };
+
+ // We can only transition to EMPTY but never back.
+ // The migrator never creates any legacy blobs.
+ if new_state == Self::STATE_EMPTY {
+ self.state.store(Self::STATE_EMPTY, Ordering::Relaxed)
+ }
+
+ result
+ }
+
+ /// Runs the key_accessor function and returns its result. If it returns an error and the
+ /// root cause was KEY_NOT_FOUND, tries to migrate a key with the given parameters from
+ /// the legacy database to the new database and runs the key_accessor function again if
+ /// the migration request was successful.
+ pub fn with_try_migrate<F, T>(
+ &self,
+ key: &KeyDescriptor,
+ caller_uid: u32,
+ key_accessor: F,
+ ) -> Result<T>
+ where
+ F: Fn() -> Result<T>,
+ {
+ // Access the key and return on success.
+ match key_accessor() {
+ Ok(result) => return Ok(result),
+ Err(e) => match e.root_cause().downcast_ref::<Error>() {
+ Some(&Error::Rc(ResponseCode::KEY_NOT_FOUND)) => {}
+ _ => return Err(e),
+ },
+ }
+
+ // Filter inputs. We can only load legacy app domain keys and some special rules due
+ // to which we migrate keys transparently to an SELINUX domain.
+ let uid = match key {
+ KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid,
+ KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => {
+ match *nspace {
+ Self::WIFI_NAMESPACE => Self::AID_WIFI,
+ _ => {
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context(format!("No legacy keys for namespace {}", nspace))
+ }
+ }
+ }
+ _ => {
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("No legacy keys for key descriptor.")
+ }
+ };
+
+ let key_clone = key.clone();
+ let result = self
+ .do_serialized(move |migrator_state| migrator_state.check_and_migrate(uid, key_clone));
+
+ if let Some(result) = result {
+ result?;
+ // After successful migration try again.
+ key_accessor()
+ } else {
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.")
+ }
+ }
+
+ /// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error
+ /// this function makes a migration request and on success retries the key_accessor.
+ pub fn with_try_migrate_super_key<F, T>(
+ &self,
+ user_id: u32,
+ pw: &[u8],
+ mut key_accessor: F,
+ ) -> Result<Option<T>>
+ where
+ F: FnMut() -> Result<Option<T>>,
+ {
+ match key_accessor() {
+ Ok(Some(result)) => return Ok(Some(result)),
+ Ok(None) => {}
+ Err(e) => return Err(e),
+ }
+
+ let pw: ZVec = pw
+ .try_into()
+ .context("In with_try_migrate_super_key: copying the password into a zvec.")?;
+ let result = self.do_serialized(move |migrator_state| {
+ migrator_state.check_and_migrate_super_key(user_id, pw)
+ });
+
+ if let Some(result) = result {
+ result?;
+ // After successful migration try again.
+ key_accessor()
+ } else {
+ Ok(None)
+ }
+ }
+
+ /// Queries the legacy database for the presence of a super key for the given user.
+ pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
+ let result =
+ self.do_serialized(move |migrator_state| migrator_state.has_super_key(user_id));
+ result.unwrap_or(Ok(false))
+ }
+}
+
+impl LegacyMigratorState {
+ fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> {
+ let sec_level = if is_strongbox {
+ SecurityLevel::STRONGBOX
+ } else {
+ SecurityLevel::TRUSTED_ENVIRONMENT
+ };
+
+ self.sec_level_to_km_uuid.get(&sec_level).copied().ok_or_else(|| {
+ anyhow::anyhow!(Error::sys()).context("In get_km_uuid: No KM instance for blob.")
+ })
+ }
+
+ fn list_uid(&mut self, uid: u32) -> Result<Vec<String>> {
+ self.legacy_loader
+ .list_keystore_entries_for_uid(uid)
+ .context("In list_uid: Trying to list legacy entries.")
+ }
+
+ /// This is a key migration request that can run in the migrator thread. This should
+ /// be passed to do_serialized.
+ fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
+ let alias = key.alias.clone().ok_or_else(|| {
+ anyhow::anyhow!(Error::sys()).context(concat!(
+ "In check_and_migrate: Must be Some because ",
+ "our caller must not have called us otherwise."
+ ))
+ })?;
+
+ if self.recently_migrated.contains(&RecentMigration::new(uid, alias.clone())) {
+ return Ok(());
+ }
+
+ if key.domain == Domain::APP {
+ key.nspace = uid as i64;
+ }
+
+ // If the key is not found in the cache, try to load from the legacy database.
+ let (km_blob_params, user_cert, ca_cert) = self
+ .legacy_loader
+ .load_by_uid_alias(uid, &alias, None)
+ .context("In check_and_migrate: Trying to load legacy blob.")?;
+ let result = match km_blob_params {
+ Some((km_blob, params)) => {
+ let is_strongbox = km_blob.is_strongbox();
+ let (blob, mut blob_metadata) = match km_blob.take_value() {
+ BlobValue::Encrypted { iv, tag, data } => {
+ // Get super key id for user id.
+ let user_id = uid_to_android_user(uid as u32);
+
+ let super_key_id = match self
+ .db
+ .load_super_key(user_id)
+ .context("In check_and_migrate: Failed to load super key")?
+ {
+ Some((_, entry)) => entry.id(),
+ None => {
+ // This might be the first time we access the super key,
+ // and it may not have been migrated. We cannot import
+ // the legacy super_key key now, because we need to reencrypt
+ // it which we cannot do if we are not unlocked, which we are
+ // not because otherwise the key would have been migrated.
+ // We can check though if the key exists. If it does,
+ // we can return Locked. Otherwise, we can delete the
+ // key and return NotFound, because the key will never
+ // be unlocked again.
+ if self.legacy_loader.has_super_key(user_id) {
+ return Err(Error::Rc(ResponseCode::LOCKED)).context(concat!(
+ "In check_and_migrate: Cannot migrate super key of this ",
+ "key while user is locked."
+ ));
+ } else {
+ self.legacy_loader.remove_keystore_entry(uid, &alias).context(
+ concat!(
+ "In check_and_migrate: ",
+ "Trying to remove obsolete key."
+ ),
+ )?;
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_migrate: Obsolete key.");
+ }
+ }
+ };
+
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
+ blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
+ blob_metadata
+ .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
+ (LegacyBlob::Vec(data), blob_metadata)
+ }
+ BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()),
+ _ => {
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_migrate: Legacy key has unexpected type.")
+ }
+ };
+
+ let km_uuid = self
+ .get_km_uuid(is_strongbox)
+ .context("In check_and_migrate: Trying to get KM UUID")?;
+ blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+
+ let mut metadata = KeyMetaData::new();
+ let creation_date = DateTime::now()
+ .context("In check_and_migrate: Trying to make creation time.")?;
+ metadata.add(KeyMetaEntry::CreationDate(creation_date));
+
+ // Store legacy key in the database.
+ self.db
+ .store_new_key(
+ &key,
+ ¶ms,
+ &(&blob, &blob_metadata),
+ &CertificateInfo::new(user_cert, ca_cert),
+ &metadata,
+ &km_uuid,
+ )
+ .context("In check_and_migrate.")?;
+ Ok(())
+ }
+ None => {
+ if let Some(ca_cert) = ca_cert {
+ self.db
+ .store_new_certificate(&key, &ca_cert, &KEYSTORE_UUID)
+ .context("In check_and_migrate: Failed to insert new certificate.")?;
+ Ok(())
+ } else {
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_migrate: Legacy key not found.")
+ }
+ }
+ };
+
+ match result {
+ Ok(()) => {
+ // Add the key to the migrated_keys list.
+ self.recently_migrated.insert(RecentMigration::new(uid, alias.clone()));
+ // Delete legacy key from the file system
+ self.legacy_loader
+ .remove_keystore_entry(uid, &alias)
+ .context("In check_and_migrate: Trying to remove migrated key.")?;
+ Ok(())
+ }
+ Err(e) => Err(e),
+ }
+ }
+
+ fn check_and_migrate_super_key(&mut self, user_id: u32, pw: ZVec) -> Result<()> {
+ if self.recently_migrated_super_key.contains(&user_id) {
+ return Ok(());
+ }
+
+ if let Some(super_key) = self
+ .legacy_loader
+ .load_super_key(user_id, &pw)
+ .context("In check_and_migrate_super_key: Trying to load legacy super key.")?
+ {
+ let (blob, blob_metadata) =
+ crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, &pw)
+ .context("In check_and_migrate_super_key: Trying to encrypt super key.")?;
+
+ self.db.store_super_key(user_id, &(&blob, &blob_metadata)).context(concat!(
+ "In check_and_migrate_super_key: ",
+ "Trying to insert legacy super_key into the database."
+ ))?;
+ self.legacy_loader.remove_super_key(user_id);
+ self.recently_migrated_super_key.insert(user_id);
+ Ok(())
+ } else {
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_migrate_super_key: No key found do migrate.")
+ }
+ }
+
+ fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
+ Ok(self.recently_migrated_super_key.contains(&user_id)
+ || self.legacy_loader.has_super_key(user_id))
+ }
+
+ fn check_empty(&self) -> u8 {
+ if self.legacy_loader.is_empty().unwrap_or(false) {
+ LegacyMigrator::STATE_EMPTY
+ } else {
+ LegacyMigrator::STATE_READY
+ }
+ }
+}
+
+enum LegacyBlob {
+ Vec(Vec<u8>),
+ ZVec(ZVec),
+}
+
+impl Deref for LegacyBlob {
+ type Target = [u8];
+
+ fn deref(&self) -> &Self::Target {
+ match self {
+ Self::Vec(v) => &v,
+ Self::ZVec(v) => &v,
+ }
+ }
+}
diff --git a/keystore2/src/lib.rs b/keystore2/src/lib.rs
index f9554ea..358fce8 100644
--- a/keystore2/src/lib.rs
+++ b/keystore2/src/lib.rs
@@ -24,11 +24,13 @@
/// Internal Representation of Key Parameter and convenience functions.
pub mod key_parameter;
pub mod legacy_blob;
+pub mod legacy_migrator;
pub mod operation;
pub mod permission;
pub mod remote_provisioning;
pub mod security_level;
pub mod service;
+pub mod user_manager;
pub mod utils;
mod async_task;
diff --git a/keystore2/src/permission.rs b/keystore2/src/permission.rs
index 0f0ca04..576ac3f 100644
--- a/keystore2/src/permission.rs
+++ b/keystore2/src/permission.rs
@@ -299,9 +299,15 @@
/// Checked when Keystore 2.0 gets locked.
Lock = 0x10, selinux name: lock;
/// Checked when Keystore 2.0 shall be reset.
- Reset = 0x20, selinux name: reset;
+ Reset = 0x20, selinux name: reset;
/// Checked when Keystore 2.0 shall be unlocked.
- Unlock = 0x40, selinux name: unlock;
+ Unlock = 0x40, selinux name: unlock;
+ /// Checked when user is added or removed.
+ ChangeUser = 0x80, selinux name: change_user;
+ /// Checked when password of the user is changed.
+ ChangePassword = 0x100, selinux name: change_password;
+ /// Checked when a UID is cleared.
+ ClearUID = 0x200, selinux name: clear_uid;
}
);
@@ -659,6 +665,11 @@
assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::lock()).is_ok());
assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::reset()).is_ok());
assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::unlock()).is_ok());
+ assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::change_user()).is_ok());
+ assert!(
+ check_keystore_permission(&system_server_ctx, KeystorePerm::change_password()).is_ok()
+ );
+ assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::clear_uid()).is_ok());
let shell_ctx = Context::new("u:r:shell:s0")?;
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::add_auth()));
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::clear_ns()));
@@ -667,6 +678,9 @@
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::lock()));
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::reset()));
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::unlock()));
+ assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::change_user()));
+ assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::change_password()));
+ assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::clear_uid()));
Ok(())
}
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index 417e3c5..5e1ce84 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -16,10 +16,11 @@
//! This crate implements the IKeystoreSecurityLevel interface.
-use crate::{database::Uuid, gc::Gc, globals::get_keymint_device};
+use crate::globals::get_keymint_device;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- Algorithm::Algorithm, HardwareAuthenticatorType::HardwareAuthenticatorType,
- IKeyMintDevice::IKeyMintDevice, KeyCreationResult::KeyCreationResult, KeyFormat::KeyFormat,
+ Algorithm::Algorithm, AttestationKey::AttestationKey,
+ HardwareAuthenticatorType::HardwareAuthenticatorType, IKeyMintDevice::IKeyMintDevice,
+ KeyCreationResult::KeyCreationResult, KeyFormat::KeyFormat,
KeyMintHardwareInfo::KeyMintHardwareInfo, KeyParameter::KeyParameter,
KeyParameterValue::KeyParameterValue, SecurityLevel::SecurityLevel, Tag::Tag,
};
@@ -31,22 +32,20 @@
KeyMetadata::KeyMetadata, KeyParameters::KeyParameters,
};
-use crate::globals::ENFORCEMENTS;
+use crate::database::{CertificateInfo, KeyIdGuard};
+use crate::globals::{DB, ENFORCEMENTS, LEGACY_MIGRATOR, SUPER_KEY};
use crate::key_parameter::KeyParameter as KsKeyParam;
use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
+use crate::super_key::{KeyBlob, SuperKeyManager};
use crate::utils::{check_key_permission, uid_to_android_user, Asp};
use crate::{
- database::{CertificateInfo, KeyIdGuard},
- globals::DB,
-};
-use crate::{
- database::{DateTime, KeyMetaData, KeyMetaEntry, KeyType},
- permission::KeyPerm,
-};
-use crate::{
- database::{KeyEntry, KeyEntryLoadBits, SubComponentType},
+ database::{
+ BlobMetaData, BlobMetaEntry, DateTime, KeyEntry, KeyEntryLoadBits, KeyMetaData,
+ KeyMetaEntry, KeyType, SubComponentType, Uuid,
+ },
operation::KeystoreOperation,
operation::OperationDb,
+ permission::KeyPerm,
};
use crate::{
error::{self, map_km_error, map_or_log_err, Error, ErrorCode},
@@ -54,6 +53,7 @@
};
use anyhow::{anyhow, Context, Result};
use binder::{IBinder, Strong, ThreadState};
+use keystore2_crypto::parse_issuer_subject_from_certificate;
/// Implementation of the IKeystoreSecurityLevel Interface.
pub struct KeystoreSecurityLevel {
@@ -98,6 +98,7 @@
key: KeyDescriptor,
creation_result: KeyCreationResult,
user_id: u32,
+ flags: Option<i32>,
) -> Result<KeyMetadata> {
let KeyCreationResult {
keyBlob: key_blob,
@@ -130,6 +131,20 @@
SecurityLevel::SOFTWARE,
));
+ let (key_blob, mut blob_metadata) = DB
+ .with(|db| {
+ SUPER_KEY.handle_super_encryption_on_key_init(
+ &mut db.borrow_mut(),
+ &LEGACY_MIGRATOR,
+ &(key.domain),
+ &key_parameters,
+ flags,
+ user_id,
+ &key_blob,
+ )
+ })
+ .context("In store_new_key. Failed to handle super encryption.")?;
+
let creation_date = DateTime::now().context("Trying to make creation time.")?;
let key = match key.domain {
@@ -138,23 +153,21 @@
}
_ => DB
.with::<_, Result<KeyDescriptor>>(|db| {
- let mut metadata = KeyMetaData::new();
- metadata.add(KeyMetaEntry::CreationDate(creation_date));
+ let mut key_metadata = KeyMetaData::new();
+ key_metadata.add(KeyMetaEntry::CreationDate(creation_date));
+ blob_metadata.add(BlobMetaEntry::KmUuid(self.km_uuid));
let mut db = db.borrow_mut();
- let (need_gc, key_id) = db
+ let key_id = db
.store_new_key(
&key,
&key_parameters,
- &key_blob,
+ &(&key_blob, &blob_metadata),
&cert_info,
- &metadata,
+ &key_metadata,
&self.km_uuid,
)
.context("In store_new_key.")?;
- if need_gc {
- Gc::notify_gc();
- }
Ok(KeyDescriptor {
domain: Domain::KEY_ID,
nspace: key_id.id(),
@@ -185,7 +198,7 @@
// so that we can use it by reference like the blob provided by the key descriptor.
// Otherwise, we would have to clone the blob from the key descriptor.
let scoping_blob: Vec<u8>;
- let (km_blob, key_properties, key_id_guard) = match key.domain {
+ let (km_blob, key_properties, key_id_guard, blob_metadata) = match key.domain {
Domain::BLOB => {
check_key_permission(KeyPerm::use_(), key, &None)
.context("In create_operation: checking use permission for Domain::BLOB.")?;
@@ -201,33 +214,36 @@
},
None,
None,
+ BlobMetaData::new(),
)
}
_ => {
let (key_id_guard, mut key_entry) = DB
.with::<_, Result<(KeyIdGuard, KeyEntry)>>(|db| {
- db.borrow_mut().load_key_entry(
- &key,
- KeyType::Client,
- KeyEntryLoadBits::KM,
- caller_uid,
- |k, av| check_key_permission(KeyPerm::use_(), k, &av),
- )
+ LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().load_key_entry(
+ &key,
+ KeyType::Client,
+ KeyEntryLoadBits::KM,
+ caller_uid,
+ |k, av| check_key_permission(KeyPerm::use_(), k, &av),
+ )
+ })
})
.context("In create_operation: Failed to load key blob.")?;
- scoping_blob = match key_entry.take_km_blob() {
- Some(blob) => blob,
- None => {
- return Err(Error::sys()).context(concat!(
- "In create_operation: Successfully loaded key entry,",
- " but KM blob was missing."
- ))
- }
- };
+
+ let (blob, blob_metadata) =
+ key_entry.take_key_blob_info().ok_or_else(Error::sys).context(concat!(
+ "In create_operation: Successfully loaded key entry, ",
+ "but KM blob was missing."
+ ))?;
+ scoping_blob = blob;
+
(
&scoping_blob,
Some((key_id_guard.id(), key_entry.into_key_parameters())),
Some(key_id_guard),
+ blob_metadata,
)
}
};
@@ -256,6 +272,12 @@
let immediate_hat = immediate_hat.unwrap_or_default();
+ let user_id = uid_to_android_user(caller_uid);
+
+ let km_blob = SUPER_KEY
+ .unwrap_key_if_required(&blob_metadata, km_blob)
+ .context("In create_operation. Failed to handle super encryption.")?;
+
let km_dev: Strong<dyn IKeyMintDevice> = self
.keymint
.get_interface()
@@ -265,7 +287,7 @@
.upgrade_keyblob_if_required_with(
&*km_dev,
key_id_guard,
- &km_blob,
+ &(&km_blob, &blob_metadata),
&operation_parameters,
|blob| loop {
match map_km_error(km_dev.begin(
@@ -359,7 +381,7 @@
fn generate_key(
&self,
key: &KeyDescriptor,
- attestation_key: Option<&KeyDescriptor>,
+ attest_key_descriptor: Option<&KeyDescriptor>,
params: &[KeyParameter],
flags: i32,
entropy: &[u8],
@@ -383,17 +405,76 @@
// generate_key requires the rebind permission.
check_key_permission(KeyPerm::rebind(), &key, &None).context("In generate_key.")?;
+ let attest_key = match attest_key_descriptor {
+ None => None,
+ Some(key) => Some(
+ self.get_attest_key(key, caller_uid)
+ .context("In generate_key: Trying to load attest key")?,
+ ),
+ };
+
let params = Self::add_certificate_parameters(caller_uid, params, &key)
.context("In generate_key: Trying to get aaid.")?;
let km_dev: Strong<dyn IKeyMintDevice> = self.keymint.get_interface()?;
map_km_error(km_dev.addRngEntropy(entropy))
.context("In generate_key: Trying to add entropy.")?;
- let creation_result = map_km_error(km_dev.generateKey(¶ms))
+ let creation_result = map_km_error(km_dev.generateKey(¶ms, attest_key.as_ref()))
.context("In generate_key: While generating Key")?;
let user_id = uid_to_android_user(caller_uid);
- self.store_new_key(key, creation_result, user_id).context("In generate_key.")
+ self.store_new_key(key, creation_result, user_id, Some(flags)).context("In generate_key.")
+ }
+
+ fn get_attest_key(&self, key: &KeyDescriptor, caller_uid: u32) -> Result<AttestationKey> {
+ let (km_blob, cert) = self
+ .load_attest_key_blob_and_cert(&key, caller_uid)
+ .context("In get_attest_key: Failed to load blob and cert")?;
+
+ let issuer_subject: Vec<u8> = parse_issuer_subject_from_certificate(&cert)
+ .context("In get_attest_key: Failed to parse subject from certificate.")?;
+
+ Ok(AttestationKey {
+ keyBlob: km_blob.to_vec(),
+ attestKeyParams: [].to_vec(),
+ issuerSubjectName: issuer_subject,
+ })
+ }
+
+ fn load_attest_key_blob_and_cert(
+ &self,
+ key: &KeyDescriptor,
+ caller_uid: u32,
+ ) -> Result<(Vec<u8>, Vec<u8>)> {
+ match key.domain {
+ Domain::BLOB => Err(error::Error::Km(ErrorCode::INVALID_ARGUMENT)).context(
+ "In load_attest_key_blob_and_cert: Domain::BLOB attestation keys not supported",
+ ),
+ _ => {
+ let (key_id_guard, mut key_entry) = DB
+ .with::<_, Result<(KeyIdGuard, KeyEntry)>>(|db| {
+ db.borrow_mut().load_key_entry(
+ &key,
+ KeyType::Client,
+ KeyEntryLoadBits::BOTH,
+ caller_uid,
+ |k, av| check_key_permission(KeyPerm::use_(), k, &av),
+ )
+ })
+ .context("In load_attest_key_blob_and_cert: Failed to load key.")?;
+
+ let (blob, _) =
+ key_entry.take_key_blob_info().ok_or_else(Error::sys).context(concat!(
+ "In load_attest_key_blob_and_cert: Successfully loaded key entry,",
+ " but KM blob was missing."
+ ))?;
+ let cert = key_entry.take_cert().ok_or_else(Error::sys).context(concat!(
+ "In load_attest_key_blob_and_cert: Successfully loaded key entry,",
+ " but cert was missing."
+ ))?;
+ Ok((blob, cert))
+ }
+ }
}
fn import_key(
@@ -444,11 +525,12 @@
let km_dev: Strong<dyn IKeyMintDevice> =
self.keymint.get_interface().context("In import_key: Trying to get the KM device")?;
- let creation_result = map_km_error(km_dev.importKey(¶ms, format, key_data))
- .context("In import_key: Trying to call importKey")?;
+ let creation_result =
+ map_km_error(km_dev.importKey(¶ms, format, key_data, None /* attestKey */))
+ .context("In import_key: Trying to call importKey")?;
let user_id = uid_to_android_user(caller_uid);
- self.store_new_key(key, creation_result, user_id).context("In import_key.")
+ self.store_new_key(key, creation_result, user_id, Some(flags)).context("In import_key.")
}
fn import_wrapped_key(
@@ -482,6 +564,8 @@
}
let caller_uid = ThreadState::get_calling_uid();
+ let user_id = uid_to_android_user(caller_uid);
+
let key = match key.domain {
Domain::APP => KeyDescriptor {
domain: key.domain,
@@ -498,29 +582,32 @@
_ => panic!("Unreachable."),
};
- // import_wrapped_key requires the rebind permission for the new key.
+ // Import_wrapped_key requires the rebind permission for the new key.
check_key_permission(KeyPerm::rebind(), &key, &None).context("In import_wrapped_key.")?;
- let (wrapping_key_id_guard, wrapping_key_entry) = DB
+ let (wrapping_key_id_guard, mut wrapping_key_entry) = DB
.with(|db| {
- db.borrow_mut().load_key_entry(
- &wrapping_key,
- KeyType::Client,
- KeyEntryLoadBits::KM,
- caller_uid,
- |k, av| check_key_permission(KeyPerm::use_(), k, &av),
- )
+ LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().load_key_entry(
+ &wrapping_key,
+ KeyType::Client,
+ KeyEntryLoadBits::KM,
+ caller_uid,
+ |k, av| check_key_permission(KeyPerm::use_(), k, &av),
+ )
+ })
})
.context("Failed to load wrapping key.")?;
- let wrapping_key_blob = match wrapping_key_entry.km_blob() {
- Some(blob) => blob,
- None => {
- return Err(error::Error::sys()).context(concat!(
- "No km_blob after successfully loading key.",
- " This should never happen."
- ))
- }
- };
+
+ let (wrapping_key_blob, wrapping_blob_metadata) = wrapping_key_entry
+ .take_key_blob_info()
+ .ok_or_else(error::Error::sys)
+ .context("No km_blob after successfully loading key. This should never happen.")?;
+
+ let wrapping_key_blob =
+ SUPER_KEY.unwrap_key_if_required(&wrapping_blob_metadata, &wrapping_key_blob).context(
+ "In import_wrapped_key. Failed to handle super encryption for wrapping key.",
+ )?;
// km_dev.importWrappedKey does not return a certificate chain.
// TODO Do we assume that all wrapped keys are symmetric?
@@ -549,12 +636,12 @@
.upgrade_keyblob_if_required_with(
&*km_dev,
Some(wrapping_key_id_guard),
- wrapping_key_blob,
+ &(&wrapping_key_blob, &wrapping_blob_metadata),
&[],
|wrapping_blob| {
let creation_result = map_km_error(km_dev.importWrappedKey(
wrapped_data,
- wrapping_key_blob,
+ wrapping_blob,
masking_key,
¶ms,
pw_sid,
@@ -565,8 +652,7 @@
)
.context("In import_wrapped_key.")?;
- let user_id = uid_to_android_user(caller_uid);
- self.store_new_key(key, creation_result, user_id)
+ self.store_new_key(key, creation_result, user_id, None)
.context("In import_wrapped_key: Trying to store the new key.")
}
@@ -574,23 +660,37 @@
&self,
km_dev: &dyn IKeyMintDevice,
key_id_guard: Option<KeyIdGuard>,
- blob: &[u8],
+ blob_info: &(&KeyBlob, &BlobMetaData),
params: &[KeyParameter],
f: F,
) -> Result<(T, Option<Vec<u8>>)>
where
F: Fn(&[u8]) -> Result<T, Error>,
{
- match f(blob) {
+ match f(blob_info.0) {
Err(Error::Km(ErrorCode::KEY_REQUIRES_UPGRADE)) => {
- let upgraded_blob = map_km_error(km_dev.upgradeKey(blob, params))
+ let upgraded_blob = map_km_error(km_dev.upgradeKey(blob_info.0, params))
.context("In upgrade_keyblob_if_required_with: Upgrade failed.")?;
+
+ let (upgraded_blob_to_be_stored, blob_metadata) =
+ SuperKeyManager::reencrypt_on_upgrade_if_required(blob_info.0, &upgraded_blob)
+ .context(
+ "In upgrade_keyblob_if_required_with: Failed to handle super encryption.",
+ )?;
+
+ let mut blob_metadata = blob_metadata.unwrap_or_else(BlobMetaData::new);
+ if let Some(uuid) = blob_info.1.km_uuid() {
+ blob_metadata.add(BlobMetaEntry::KmUuid(*uuid));
+ }
+
key_id_guard.map_or(Ok(()), |key_id_guard| {
DB.with(|db| {
- db.borrow_mut().set_blob(
+ let mut db = db.borrow_mut();
+ db.set_blob(
&key_id_guard,
SubComponentType::KEY_BLOB,
- Some(&upgraded_blob),
+ Some(&upgraded_blob_to_be_stored),
+ Some(&blob_metadata),
)
})
.context(concat!(
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index 6aa7ed5..3a4bf82 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -20,22 +20,22 @@
use std::collections::HashMap;
+use crate::error::{self, map_or_log_err, ErrorCode};
use crate::permission::{KeyPerm, KeystorePerm};
use crate::security_level::KeystoreSecurityLevel;
use crate::utils::{
check_grant_permission, check_key_permission, check_keystore_permission,
key_parameters_to_authorizations, Asp,
};
-use crate::{database::Uuid, globals::DB};
+use crate::{
+ database::Uuid,
+ globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_MIGRATOR},
+};
use crate::{database::KEYSTORE_UUID, permission};
use crate::{
database::{KeyEntryLoadBits, KeyType, SubComponentType},
error::ResponseCode,
};
-use crate::{
- error::{self, map_or_log_err, ErrorCode},
- gc::Gc,
-};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, IKeystoreSecurityLevel::IKeystoreSecurityLevel,
@@ -76,6 +76,15 @@
result.uuid_by_sec_level.insert(SecurityLevel::STRONGBOX, uuid);
}
+ let uuid_by_sec_level = result.uuid_by_sec_level.clone();
+ LEGACY_MIGRATOR
+ .set_init(move || {
+ (create_thread_local_db(), uuid_by_sec_level, LEGACY_BLOB_LOADER.clone())
+ })
+ .context(
+ "In KeystoreService::new_native_binder: Trying to initialize the legacy migrator.",
+ )?;
+
let result = BnKeystoreService::new_binder(result);
result.as_binder().set_requesting_sid(true);
Ok(result)
@@ -115,15 +124,18 @@
}
fn get_key_entry(&self, key: &KeyDescriptor) -> Result<KeyEntryResponse> {
+ let caller_uid = ThreadState::get_calling_uid();
let (key_id_guard, mut key_entry) = DB
.with(|db| {
- db.borrow_mut().load_key_entry(
- &key,
- KeyType::Client,
- KeyEntryLoadBits::PUBLIC,
- ThreadState::get_calling_uid(),
- |k, av| check_key_permission(KeyPerm::get_info(), k, &av),
- )
+ LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().load_key_entry(
+ &key,
+ KeyType::Client,
+ KeyEntryLoadBits::PUBLIC,
+ caller_uid,
+ |k, av| check_key_permission(KeyPerm::get_info(), k, &av),
+ )
+ })
})
.context("In get_key_entry, while trying to load key info.")?;
@@ -164,18 +176,20 @@
public_cert: Option<&[u8]>,
certificate_chain: Option<&[u8]>,
) -> Result<()> {
+ let caller_uid = ThreadState::get_calling_uid();
DB.with::<_, Result<()>>(|db| {
- let mut db = db.borrow_mut();
- let entry = match db.load_key_entry(
- &key,
- KeyType::Client,
- KeyEntryLoadBits::NONE,
- ThreadState::get_calling_uid(),
- |k, av| {
- check_key_permission(KeyPerm::update(), k, &av)
- .context("In update_subcomponent.")
- },
- ) {
+ let entry = match LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().load_key_entry(
+ &key,
+ KeyType::Client,
+ KeyEntryLoadBits::NONE,
+ caller_uid,
+ |k, av| {
+ check_key_permission(KeyPerm::update(), k, &av)
+ .context("In update_subcomponent.")
+ },
+ )
+ }) {
Err(e) => match e.root_cause().downcast_ref::<Error>() {
Some(Error::Rc(ResponseCode::KEY_NOT_FOUND)) => Ok(None),
_ => Err(e),
@@ -184,11 +198,12 @@
}
.context("Failed to load key entry.")?;
+ let mut db = db.borrow_mut();
if let Some((key_id_guard, key_entry)) = entry {
- db.set_blob(&key_id_guard, SubComponentType::CERT, public_cert)
+ db.set_blob(&key_id_guard, SubComponentType::CERT, public_cert, None)
.context("Failed to update cert subcomponent.")?;
- db.set_blob(&key_id_guard, SubComponentType::CERT_CHAIN, certificate_chain)
+ db.set_blob(&key_id_guard, SubComponentType::CERT_CHAIN, certificate_chain, None)
.context("Failed to update cert chain subcomponent.")?;
return Ok(());
}
@@ -261,24 +276,34 @@
Ok(()) => {}
};
- DB.with(|db| {
- let mut db = db.borrow_mut();
- db.list(k.domain, k.nspace)
- })
+ let mut result = LEGACY_MIGRATOR
+ .list_uid(k.domain, k.nspace)
+ .context("In list_entries: Trying to list legacy keys.")?;
+
+ result.append(
+ &mut DB
+ .with(|db| {
+ let mut db = db.borrow_mut();
+ db.list(k.domain, k.nspace)
+ })
+ .context("In list_entries: Trying to list keystore database.")?,
+ );
+
+ result.sort_unstable();
+ result.dedup();
+ Ok(result)
}
fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
- let need_gc = DB
- .with(|db| {
+ DB.with(|db| {
+ LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
db.borrow_mut().unbind_key(&key, KeyType::Client, caller_uid, |k, av| {
check_key_permission(KeyPerm::delete(), k, &av).context("During delete_key.")
})
})
- .context("In delete_key: Trying to unbind the key.")?;
- if need_gc {
- Gc::notify_gc();
- }
+ })
+ .context("In delete_key: Trying to unbind the key.")?;
Ok(())
}
@@ -288,14 +313,17 @@
grantee_uid: i32,
access_vector: permission::KeyPermSet,
) -> Result<KeyDescriptor> {
+ let caller_uid = ThreadState::get_calling_uid();
DB.with(|db| {
- db.borrow_mut().grant(
- &key,
- ThreadState::get_calling_uid(),
- grantee_uid as u32,
- access_vector,
- |k, av| check_grant_permission(*av, k).context("During grant."),
- )
+ LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().grant(
+ &key,
+ caller_uid,
+ grantee_uid as u32,
+ access_vector,
+ |k, av| check_grant_permission(*av, k).context("During grant."),
+ )
+ })
})
.context("In KeystoreService::grant.")
}
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 9872513..156d20d 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -15,15 +15,18 @@
#![allow(dead_code)]
use crate::{
- database::EncryptedBy, database::KeyMetaData, database::KeyMetaEntry, database::KeystoreDB,
- error::Error, error::ResponseCode, legacy_blob::LegacyBlobLoader,
+ database::BlobMetaData, database::BlobMetaEntry, database::EncryptedBy, database::KeyEntry,
+ database::KeyType, database::KeystoreDB, enforcements::Enforcements, error::Error,
+ error::ResponseCode, key_parameter::KeyParameter, legacy_blob::LegacyBlobLoader,
+ legacy_migrator::LegacyMigrator,
};
use android_system_keystore2::aidl::android::system::keystore2::Domain::Domain;
use anyhow::{Context, Result};
use keystore2_crypto::{
- aes_gcm_decrypt, aes_gcm_encrypt, derive_key_from_password, generate_salt, ZVec,
- AES_256_KEY_LENGTH,
+ aes_gcm_decrypt, aes_gcm_encrypt, derive_key_from_password, generate_aes256_key, generate_salt,
+ ZVec, AES_256_KEY_LENGTH,
};
+use std::ops::Deref;
use std::{
collections::HashMap,
sync::Arc,
@@ -39,13 +42,30 @@
/// secret, that is itself derived from the user's lock screen knowledge factor (LSKF).
/// When the user unlocks the device for the first time, this key is unlocked, i.e., decrypted,
/// and stays memory resident until the device reboots.
- per_boot: Option<Arc<ZVec>>,
+ per_boot: Option<SuperKey>,
/// The screen lock key works like the per boot key with the distinction that it is cleared
/// from memory when the screen lock is engaged.
/// TODO the life cycle is not fully implemented at this time.
screen_lock: Option<Arc<ZVec>>,
}
+#[derive(Default, Clone)]
+pub struct SuperKey {
+ key: Arc<ZVec>,
+ // id of the super key in the database.
+ id: i64,
+}
+
+impl SuperKey {
+ pub fn get_key(&self) -> &Arc<ZVec> {
+ &self.key
+ }
+
+ pub fn get_id(&self) -> i64 {
+ self.id
+ }
+}
+
#[derive(Default)]
struct SkmState {
user_keys: HashMap<UserId, UserSuperKeys>,
@@ -87,18 +107,17 @@
data.key_index.clear();
}
- fn install_per_boot_key_for_user(&self, user: UserId, key_id: i64, key: ZVec) {
+ fn install_per_boot_key_for_user(&self, user: UserId, super_key: SuperKey) {
let mut data = self.data.lock().unwrap();
- let key = Arc::new(key);
- data.key_index.insert(key_id, Arc::downgrade(&key));
- data.user_keys.entry(user).or_default().per_boot = Some(key);
+ data.key_index.insert(super_key.id, Arc::downgrade(&(super_key.key)));
+ data.user_keys.entry(user).or_default().per_boot = Some(super_key);
}
fn get_key(&self, key_id: &i64) -> Option<Arc<ZVec>> {
self.data.lock().unwrap().key_index.get(key_id).and_then(|k| k.upgrade())
}
- pub fn get_per_boot_key_by_user_id(&self, user_id: u32) -> Option<Arc<ZVec>> {
+ pub fn get_per_boot_key_by_user_id(&self, user_id: u32) -> Option<SuperKey> {
let data = self.data.lock().unwrap();
data.user_keys.get(&user_id).map(|e| e.per_boot.clone()).flatten()
}
@@ -109,16 +128,16 @@
/// a key derived from the given password and stored in the database.
pub fn unlock_user_key(
&self,
+ db: &mut KeystoreDB,
user: UserId,
pw: &[u8],
- db: &mut KeystoreDB,
legacy_blob_loader: &LegacyBlobLoader,
) -> Result<()> {
let (_, entry) = db
.get_or_create_key_with(
Domain::APP,
user as u64 as i64,
- &"USER_SUPER_KEY",
+ KeystoreDB::USER_SUPER_KEY_ALIAS,
crate::database::KEYSTORE_UUID,
|| {
// For backward compatibility we need to check if there is a super key present.
@@ -128,63 +147,22 @@
let super_key = match super_key {
None => {
// No legacy file was found. So we generate a new key.
- keystore2_crypto::generate_aes256_key()
+ generate_aes256_key()
.context("In create_new_key: Failed to generate AES 256 key.")?
}
Some(key) => key,
};
- // Regardless of whether we loaded an old AES128 key or a new AES256 key,
- // we derive a AES256 key and re-encrypt the key before we insert it in the
- // database. The length of the key is preserved by the encryption so we don't
- // need any extra flags to inform us which algorithm to use it with.
- let salt =
- generate_salt().context("In create_new_key: Failed to generate salt.")?;
- let derived_key = derive_key_from_password(pw, Some(&salt), AES_256_KEY_LENGTH)
- .context("In create_new_key: Failed to derive password.")?;
- let mut metadata = KeyMetaData::new();
- metadata.add(KeyMetaEntry::EncryptedBy(EncryptedBy::Password));
- metadata.add(KeyMetaEntry::Salt(salt));
- let (encrypted_key, iv, tag) = aes_gcm_encrypt(&super_key, &derived_key)
- .context("In create_new_key: Failed to encrypt new super key.")?;
- metadata.add(KeyMetaEntry::Iv(iv));
- metadata.add(KeyMetaEntry::AeadTag(tag));
- Ok((encrypted_key, metadata))
+ // Regardless of whether we loaded an old AES128 key or generated a new AES256
+ // key as the super key, we derive a AES256 key from the password and re-encrypt
+ // the super key before we insert it in the database. The length of the key is
+ // preserved by the encryption so we don't need any extra flags to inform us
+ // which algorithm to use it with.
+ Self::encrypt_with_password(&super_key, pw).context("In create_new_key.")
},
)
.context("In unlock_user_key: Failed to get key id.")?;
- let metadata = entry.metadata();
- let super_key = match (
- metadata.encrypted_by(),
- metadata.salt(),
- metadata.iv(),
- metadata.aead_tag(),
- entry.km_blob(),
- ) {
- (Some(&EncryptedBy::Password), Some(salt), Some(iv), Some(tag), Some(blob)) => {
- let key = derive_key_from_password(pw, Some(salt), AES_256_KEY_LENGTH)
- .context("In unlock_user_key: Failed to generate key from password.")?;
-
- aes_gcm_decrypt(blob, iv, tag, &key)
- .context("In unlock_user_key: Failed to decrypt key blob.")?
- }
- (enc_by, salt, iv, tag, blob) => {
- return Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
- concat!(
- "In unlock_user_key: Super key has incomplete metadata.",
- "Present: encrypted_by: {}, salt: {}, iv: {}, aead_tag: {}, blob: {}."
- ),
- enc_by.is_some(),
- salt.is_some(),
- iv.is_some(),
- tag.is_some(),
- blob.is_some()
- ));
- }
- };
-
- self.install_per_boot_key_for_user(user, entry.id(), super_key);
-
+ self.populate_cache_from_super_key_blob(user, entry, pw).context("In unlock_user_key.")?;
Ok(())
}
@@ -192,12 +170,13 @@
/// The function queries `metadata.encrypted_by()` to determine the encryption key.
/// It then check if the required key is memory resident, and if so decrypts the
/// blob.
- pub fn unwrap_key(&self, blob: &[u8], metadata: &KeyMetaData) -> Result<ZVec> {
+ pub fn unwrap_key<'a>(&self, blob: &'a [u8], metadata: &BlobMetaData) -> Result<KeyBlob<'a>> {
match metadata.encrypted_by() {
Some(EncryptedBy::KeyId(key_id)) => match self.get_key(key_id) {
- Some(key) => {
- Self::unwrap_key_with_key(blob, metadata, &key).context("In unwrap_key.")
- }
+ Some(key) => Ok(KeyBlob::Sensitive(
+ Self::unwrap_key_with_key(blob, metadata, &key).context("In unwrap_key.")?,
+ SuperKey { key: key.clone(), id: *key_id },
+ )),
None => Err(Error::Rc(ResponseCode::LOCKED))
.context("In unwrap_key: Key is not usable until the user entered their LSKF."),
},
@@ -207,7 +186,7 @@
}
/// Unwraps an encrypted key blob given an encryption key.
- fn unwrap_key_with_key(blob: &[u8], metadata: &KeyMetaData, key: &[u8]) -> Result<ZVec> {
+ fn unwrap_key_with_key(blob: &[u8], metadata: &BlobMetaData, key: &[u8]) -> Result<ZVec> {
match (metadata.iv(), metadata.aead_tag()) {
(Some(iv), Some(tag)) => aes_gcm_decrypt(blob, iv, tag, key)
.context("In unwrap_key_with_key: Failed to decrypt the key blob."),
@@ -221,4 +200,409 @@
)),
}
}
+
+ /// Checks if user has setup LSKF, even when super key cache is empty for the user.
+ pub fn super_key_exists_in_db_for_user(
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ user_id: u32,
+ ) -> Result<bool> {
+ let key_in_db = db
+ .key_exists(
+ Domain::APP,
+ user_id as u64 as i64,
+ KeystoreDB::USER_SUPER_KEY_ALIAS,
+ KeyType::Super,
+ )
+ .context("In super_key_exists_in_db_for_user.")?;
+
+ if key_in_db {
+ Ok(key_in_db)
+ } else {
+ legacy_migrator
+ .has_super_key(user_id)
+ .context("In super_key_exists_in_db_for_user: Trying to query legacy db.")
+ }
+ }
+
+ /// Checks if user has already setup LSKF (i.e. a super key is persisted in the database or the
+ /// legacy database). If not, return Uninitialized state.
+ /// Otherwise, decrypt the super key from the password and return LskfUnlocked state.
+ pub fn check_and_unlock_super_key(
+ &self,
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ user_id: u32,
+ pw: &[u8],
+ ) -> Result<UserState> {
+ let result = legacy_migrator
+ .with_try_migrate_super_key(user_id, pw, || db.load_super_key(user_id))
+ .context("In check_and_unlock_super_key. Failed to load super key")?;
+
+ match result {
+ Some((_, entry)) => {
+ let super_key = self
+ .populate_cache_from_super_key_blob(user_id, entry, pw)
+ .context("In check_and_unlock_super_key.")?;
+ Ok(UserState::LskfUnlocked(super_key))
+ }
+ None => Ok(UserState::Uninitialized),
+ }
+ }
+
+ /// Checks if user has already setup LSKF (i.e. a super key is persisted in the database or the
+ /// legacy database). If so, return LskfLocked state.
+ /// If the password is provided, generate a new super key, encrypt with the password,
+ /// store in the database and populate the super key cache for the new user
+ /// and return LskfUnlocked state.
+ /// If the password is not provided, return Uninitialized state.
+ pub fn check_and_initialize_super_key(
+ &self,
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ user_id: u32,
+ pw: Option<&[u8]>,
+ ) -> Result<UserState> {
+ let super_key_exists_in_db =
+ Self::super_key_exists_in_db_for_user(db, legacy_migrator, user_id).context(
+ "In check_and_initialize_super_key. Failed to check if super key exists.",
+ )?;
+ if super_key_exists_in_db {
+ Ok(UserState::LskfLocked)
+ } else if let Some(pw) = pw {
+ //generate a new super key.
+ let super_key = generate_aes256_key()
+ .context("In check_and_initialize_super_key: Failed to generate AES 256 key.")?;
+ //derive an AES256 key from the password and re-encrypt the super key
+ //before we insert it in the database.
+ let (encrypted_super_key, blob_metadata) = Self::encrypt_with_password(&super_key, pw)
+ .context("In check_and_initialize_super_key.")?;
+
+ let key_entry = db
+ .store_super_key(user_id, &(&encrypted_super_key, &blob_metadata))
+ .context("In check_and_initialize_super_key. Failed to store super key.")?;
+
+ let super_key = self
+ .populate_cache_from_super_key_blob(user_id, key_entry, pw)
+ .context("In check_and_initialize_super_key.")?;
+ Ok(UserState::LskfUnlocked(super_key))
+ } else {
+ Ok(UserState::Uninitialized)
+ }
+ }
+
+ //helper function to populate super key cache from the super key blob loaded from the database
+ fn populate_cache_from_super_key_blob(
+ &self,
+ user_id: u32,
+ entry: KeyEntry,
+ pw: &[u8],
+ ) -> Result<SuperKey> {
+ let super_key = Self::extract_super_key_from_key_entry(entry, pw).context(
+ "In populate_cache_from_super_key_blob. Failed to extract super key from key entry",
+ )?;
+ self.install_per_boot_key_for_user(user_id, super_key.clone());
+ Ok(super_key)
+ }
+
+ /// Extracts super key from the entry loaded from the database
+ pub fn extract_super_key_from_key_entry(entry: KeyEntry, pw: &[u8]) -> Result<SuperKey> {
+ if let Some((blob, metadata)) = entry.key_blob_info() {
+ let key = match (
+ metadata.encrypted_by(),
+ metadata.salt(),
+ metadata.iv(),
+ metadata.aead_tag(),
+ ) {
+ (Some(&EncryptedBy::Password), Some(salt), Some(iv), Some(tag)) => {
+ let key = derive_key_from_password(pw, Some(salt), AES_256_KEY_LENGTH).context(
+ "In extract_super_key_from_key_entry: Failed to generate key from password.",
+ )?;
+
+ aes_gcm_decrypt(blob, iv, tag, &key).context(
+ "In extract_super_key_from_key_entry: Failed to decrypt key blob.",
+ )?
+ }
+ (enc_by, salt, iv, tag) => {
+ return Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
+ concat!(
+ "In extract_super_key_from_key_entry: Super key has incomplete metadata.",
+ "Present: encrypted_by: {}, salt: {}, iv: {}, aead_tag: {}."
+ ),
+ enc_by.is_some(),
+ salt.is_some(),
+ iv.is_some(),
+ tag.is_some()
+ ));
+ }
+ };
+ Ok(SuperKey { key: Arc::new(key), id: entry.id() })
+ } else {
+ Err(Error::Rc(ResponseCode::VALUE_CORRUPTED))
+ .context("In extract_super_key_from_key_entry: No key blob info.")
+ }
+ }
+
+ /// Encrypts the super key from a key derived from the password, before storing in the database.
+ pub fn encrypt_with_password(super_key: &[u8], pw: &[u8]) -> Result<(Vec<u8>, BlobMetaData)> {
+ let salt = generate_salt().context("In encrypt_with_password: Failed to generate salt.")?;
+ let derived_key = derive_key_from_password(pw, Some(&salt), AES_256_KEY_LENGTH)
+ .context("In encrypt_with_password: Failed to derive password.")?;
+ let mut metadata = BlobMetaData::new();
+ metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
+ metadata.add(BlobMetaEntry::Salt(salt));
+ let (encrypted_key, iv, tag) = aes_gcm_encrypt(super_key, &derived_key)
+ .context("In encrypt_with_password: Failed to encrypt new super key.")?;
+ metadata.add(BlobMetaEntry::Iv(iv));
+ metadata.add(BlobMetaEntry::AeadTag(tag));
+ Ok((encrypted_key, metadata))
+ }
+
+ // Encrypt the given key blob with the user's super key, if the super key exists and the device
+ // is unlocked. If the super key exists and the device is locked, or LSKF is not setup,
+ // return error. Note that it is out of the scope of this function to check if super encryption
+ // is required. Such check should be performed before calling this function.
+ fn super_encrypt_on_key_init(
+ &self,
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ user_id: u32,
+ key_blob: &[u8],
+ ) -> Result<(Vec<u8>, BlobMetaData)> {
+ match UserState::get(db, legacy_migrator, self, user_id)
+ .context("In super_encrypt. Failed to get user state.")?
+ {
+ UserState::LskfUnlocked(super_key) => {
+ Self::encrypt_with_super_key(key_blob, &super_key)
+ .context("In super_encrypt_on_key_init. Failed to encrypt the key.")
+ }
+ UserState::LskfLocked => {
+ Err(Error::Rc(ResponseCode::LOCKED)).context("In super_encrypt. Device is locked.")
+ }
+ UserState::Uninitialized => Err(Error::Rc(ResponseCode::UNINITIALIZED))
+ .context("In super_encrypt. LSKF is not setup for the user."),
+ }
+ }
+
+ //Helper function to encrypt a key with the given super key. Callers should select which super
+ //key to be used. This is called when a key is super encrypted at its creation as well as at its
+ //upgrade.
+ fn encrypt_with_super_key(
+ key_blob: &[u8],
+ super_key: &SuperKey,
+ ) -> Result<(Vec<u8>, BlobMetaData)> {
+ let mut metadata = BlobMetaData::new();
+ let (encrypted_key, iv, tag) = aes_gcm_encrypt(key_blob, &(super_key.key))
+ .context("In encrypt_with_super_key: Failed to encrypt new super key.")?;
+ metadata.add(BlobMetaEntry::Iv(iv));
+ metadata.add(BlobMetaEntry::AeadTag(tag));
+ metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key.id)));
+ Ok((encrypted_key, metadata))
+ }
+
+ /// Check if super encryption is required and if so, super-encrypt the key to be stored in
+ /// the database.
+ #[allow(clippy::clippy::too_many_arguments)]
+ pub fn handle_super_encryption_on_key_init(
+ &self,
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ domain: &Domain,
+ key_parameters: &[KeyParameter],
+ flags: Option<i32>,
+ user_id: u32,
+ key_blob: &[u8],
+ ) -> Result<(Vec<u8>, BlobMetaData)> {
+ match (*domain, Enforcements::super_encryption_required(key_parameters, flags)) {
+ (Domain::APP, true) => {
+ self.super_encrypt_on_key_init(db, legacy_migrator, user_id, &key_blob).context(
+ "In handle_super_encryption_on_key_init.
+ Failed to super encrypt the key.",
+ )
+ }
+ _ => Ok((key_blob.to_vec(), BlobMetaData::new())),
+ }
+ }
+
+ /// Check if a given key is super-encrypted, from its metadata. If so, unwrap the key using
+ /// the relevant super key.
+ pub fn unwrap_key_if_required<'a>(
+ &self,
+ metadata: &BlobMetaData,
+ key_blob: &'a [u8],
+ ) -> Result<KeyBlob<'a>> {
+ if Self::key_super_encrypted(&metadata) {
+ let unwrapped_key = self
+ .unwrap_key(key_blob, metadata)
+ .context("In unwrap_key_if_required. Error in unwrapping the key.")?;
+ Ok(unwrapped_key)
+ } else {
+ Ok(KeyBlob::Ref(key_blob))
+ }
+ }
+
+ /// Check if a given key needs re-super-encryption, from its KeyBlob type.
+ /// If so, re-super-encrypt the key and return a new set of metadata,
+ /// containing the new super encryption information.
+ pub fn reencrypt_on_upgrade_if_required<'a>(
+ key_blob_before_upgrade: &KeyBlob,
+ key_after_upgrade: &'a [u8],
+ ) -> Result<(KeyBlob<'a>, Option<BlobMetaData>)> {
+ match key_blob_before_upgrade {
+ KeyBlob::Sensitive(_, super_key) => {
+ let (key, metadata) = Self::encrypt_with_super_key(key_after_upgrade, super_key)
+ .context(
+ "In reencrypt_on_upgrade_if_required. Failed to re-super-encrypt key on key upgrade.",
+ )?;
+ Ok((KeyBlob::NonSensitive(key), Some(metadata)))
+ }
+ _ => Ok((KeyBlob::Ref(key_after_upgrade), None)),
+ }
+ }
+
+ // Helper function to decide if a key is super encrypted, given metadata.
+ fn key_super_encrypted(metadata: &BlobMetaData) -> bool {
+ if let Some(&EncryptedBy::KeyId(_)) = metadata.encrypted_by() {
+ return true;
+ }
+ false
+ }
+}
+
+/// This enum represents different states of the user's life cycle in the device.
+/// For now, only three states are defined. More states may be added later.
+pub enum UserState {
+ // The user has registered LSKF and has unlocked the device by entering PIN/Password,
+ // and hence the per-boot super key is available in the cache.
+ LskfUnlocked(SuperKey),
+ // The user has registered LSKF, but has not unlocked the device using password, after reboot.
+ // Hence the per-boot super-key(s) is not available in the cache.
+ // However, the encrypted super key is available in the database.
+ LskfLocked,
+ // There's no user in the device for the given user id, or the user with the user id has not
+ // setup LSKF.
+ Uninitialized,
+}
+
+impl UserState {
+ pub fn get(
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ skm: &SuperKeyManager,
+ user_id: u32,
+ ) -> Result<UserState> {
+ match skm.get_per_boot_key_by_user_id(user_id) {
+ Some(super_key) => Ok(UserState::LskfUnlocked(super_key)),
+ None => {
+ //Check if a super key exists in the database or legacy database.
+ //If so, return locked user state.
+ if SuperKeyManager::super_key_exists_in_db_for_user(db, legacy_migrator, user_id)
+ .context("In get.")?
+ {
+ Ok(UserState::LskfLocked)
+ } else {
+ Ok(UserState::Uninitialized)
+ }
+ }
+ }
+ }
+
+ /// Queries user state when serving password change requests.
+ pub fn get_with_password_changed(
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ skm: &SuperKeyManager,
+ user_id: u32,
+ password: Option<&[u8]>,
+ ) -> Result<UserState> {
+ match skm.get_per_boot_key_by_user_id(user_id) {
+ Some(super_key) => {
+ if password.is_none() {
+ //transitioning to swiping, delete only the super key in database and cache, and
+ //super-encrypted keys in database (and in KM)
+ Self::reset_user(db, skm, user_id, true)
+ .context("In get_with_password_changed.")?;
+ //Lskf is now removed in Keystore
+ Ok(UserState::Uninitialized)
+ } else {
+ //Keystore won't be notified when changing to a new password when LSKF is
+ //already setup. Therefore, ideally this path wouldn't be reached.
+ Ok(UserState::LskfUnlocked(super_key))
+ }
+ }
+ None => {
+ //Check if a super key exists in the database or legacy database.
+ //If so, return LskfLocked state.
+ //Otherwise, i) if the password is provided, initialize the super key and return
+ //LskfUnlocked state ii) if password is not provided, return Uninitialized state.
+ skm.check_and_initialize_super_key(db, legacy_migrator, user_id, password)
+ }
+ }
+ }
+
+ /// Queries user state when serving password unlock requests.
+ pub fn get_with_password_unlock(
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ skm: &SuperKeyManager,
+ user_id: u32,
+ password: &[u8],
+ ) -> Result<UserState> {
+ match skm.get_per_boot_key_by_user_id(user_id) {
+ Some(super_key) => {
+ log::info!("In get_with_password_unlock. Trying to unlock when already unlocked.");
+ Ok(UserState::LskfUnlocked(super_key))
+ }
+ None => {
+ //Check if a super key exists in the database or legacy database.
+ //If not, return Uninitialized state.
+ //Otherwise, try to unlock the super key and if successful,
+ //return LskfUnlocked state
+ skm.check_and_unlock_super_key(db, legacy_migrator, user_id, password)
+ .context("In get_with_password_unlock. Failed to unlock super key.")
+ }
+ }
+ }
+
+ /// Delete all the keys created on behalf of the user.
+ /// If 'keep_non_super_encrypted_keys' is set to true, delete only the super key and super
+ /// encrypted keys.
+ pub fn reset_user(
+ db: &mut KeystoreDB,
+ skm: &SuperKeyManager,
+ user_id: u32,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ // mark keys created on behalf of the user as unreferenced.
+ db.unbind_keys_for_user(user_id as u32, keep_non_super_encrypted_keys)
+ .context("In reset user. Error in unbinding keys.")?;
+
+ //delete super key in cache, if exists
+ skm.forget_all_keys_for_user(user_id as u32);
+ Ok(())
+ }
+}
+
+/// This enum represents two states a Keymint Blob can be in, w.r.t super encryption.
+/// Sensitive variant represents a Keymint blob that is supposed to be super encrypted,
+/// but unwrapped during usage. Therefore, it has the super key along with the unwrapped key.
+/// Ref variant represents a Keymint blob that is not required to super encrypt or that is
+/// already super encrypted.
+pub enum KeyBlob<'a> {
+ Sensitive(ZVec, SuperKey),
+ NonSensitive(Vec<u8>),
+ Ref(&'a [u8]),
+}
+
+/// Deref returns a reference to the key material in both variants.
+impl<'a> Deref for KeyBlob<'a> {
+ type Target = [u8];
+
+ fn deref(&self) -> &Self::Target {
+ match self {
+ Self::Sensitive(key, _) => &key,
+ Self::NonSensitive(key) => &key,
+ Self::Ref(key) => key,
+ }
+ }
}
diff --git a/keystore2/src/user_manager.rs b/keystore2/src/user_manager.rs
new file mode 100644
index 0000000..8b7aad9
--- /dev/null
+++ b/keystore2/src/user_manager.rs
@@ -0,0 +1,95 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module implements IKeystoreUserManager AIDL interface.
+
+use crate::error::map_or_log_err;
+use crate::error::Error as KeystoreError;
+use crate::globals::{DB, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::permission::KeystorePerm;
+use crate::super_key::UserState;
+use crate::utils::check_keystore_permission;
+use android_security_usermanager::aidl::android::security::usermanager::IKeystoreUserManager::{
+ BnKeystoreUserManager, IKeystoreUserManager,
+};
+use android_security_usermanager::binder::{Interface, Result as BinderResult};
+use android_system_keystore2::aidl::android::system::keystore2::ResponseCode::ResponseCode;
+use anyhow::{Context, Result};
+use binder::{IBinder, Strong};
+
+/// This struct is defined to implement the aforementioned AIDL interface.
+/// As of now, it is an empty struct.
+pub struct UserManager;
+
+impl UserManager {
+ /// Create a new instance of Keystore User Manager service.
+ pub fn new_native_binder() -> Result<Strong<dyn IKeystoreUserManager>> {
+ let result = BnKeystoreUserManager::new_binder(Self);
+ result.as_binder().set_requesting_sid(true);
+ Ok(result)
+ }
+
+ fn on_user_password_changed(user_id: i32, password: Option<&[u8]>) -> Result<()> {
+ //Check permission. Function should return if this failed. Therefore having '?' at the end
+ //is very important.
+ check_keystore_permission(KeystorePerm::change_password())
+ .context("In on_user_password_changed.")?;
+
+ match DB
+ .with(|db| {
+ UserState::get_with_password_changed(
+ &mut db.borrow_mut(),
+ &LEGACY_MIGRATOR,
+ &SUPER_KEY,
+ user_id as u32,
+ password,
+ )
+ })
+ .context("In on_user_password_changed.")?
+ {
+ UserState::LskfLocked => {
+ //error - password can not be changed when the device is locked
+ Err(KeystoreError::Rc(ResponseCode::LOCKED))
+ .context("In on_user_password_changed. Device is locked.")
+ }
+ _ => {
+ //LskfLocked is the only error case for password change
+ Ok(())
+ }
+ }
+ }
+
+ fn add_or_remove_user(user_id: i32) -> Result<()> {
+ // Check permission. Function should return if this failed. Therefore having '?' at the end
+ // is very important.
+ check_keystore_permission(KeystorePerm::change_user()).context("In add_or_remove_user.")?;
+ DB.with(|db| UserState::reset_user(&mut db.borrow_mut(), &SUPER_KEY, user_id as u32, false))
+ }
+}
+
+impl Interface for UserManager {}
+
+impl IKeystoreUserManager for UserManager {
+ fn onUserPasswordChanged(&self, user_id: i32, password: Option<&[u8]>) -> BinderResult<()> {
+ map_or_log_err(Self::on_user_password_changed(user_id, password), Ok)
+ }
+
+ fn onUserAdded(&self, user_id: i32) -> BinderResult<()> {
+ map_or_log_err(Self::add_or_remove_user(user_id), Ok)
+ }
+
+ fn onUserRemoved(&self, user_id: i32) -> BinderResult<()> {
+ map_or_log_err(Self::add_or_remove_user(user_id), Ok)
+ }
+}
diff --git a/ondevice-signing/Android.bp b/ondevice-signing/Android.bp
index 8da28f2..5db19b7 100644
--- a/ondevice-signing/Android.bp
+++ b/ondevice-signing/Android.bp
@@ -14,6 +14,15 @@
// List of clang-tidy checks that are reported as errors.
// Please keep this list ordered lexicographically.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
tidy_errors = [
"cert-err34-c",
"google-default-arguments",
@@ -62,7 +71,7 @@
tidy_checks: tidy_errors,
tidy_checks_as_errors: tidy_errors,
tidy_flags: [
- "-format-style='file'",
+ "-format-style=file",
],
}
diff --git a/ondevice-signing/odsign_main.cpp b/ondevice-signing/odsign_main.cpp
index efe7d35..2ef9511 100644
--- a/ondevice-signing/odsign_main.cpp
+++ b/ondevice-signing/odsign_main.cpp
@@ -39,7 +39,7 @@
const std::string kSigningKeyBlob = "/data/misc/odsign/key.blob";
const std::string kSigningKeyCert = "/data/misc/odsign/key.cert";
-const std::string kArtArtifactsDir = "/data/misc/apexdata/com.android.art/system";
+const std::string kArtArtifactsDir = "/data/misc/apexdata/com.android.art/dalvik-cache";
static const char* kOdrefreshPath = "/apex/com.android.art/bin/odrefresh";
diff --git a/provisioner/Android.bp b/provisioner/Android.bp
index c1c8d15..d3f06fe 100644
--- a/provisioner/Android.bp
+++ b/provisioner/Android.bp
@@ -14,6 +14,15 @@
// limitations under the License.
//
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
aidl_interface {
name: "android.security.provisioner",
unstable: true,