Merge "Turn on AFDO for keystore2"
diff --git a/diced/Android.bp b/diced/Android.bp
index 525828e..e13d863 100644
--- a/diced/Android.bp
+++ b/diced/Android.bp
@@ -138,6 +138,24 @@
init_rc: ["diced.rc"],
}
+rust_binary {
+ name: "diced.microdroid",
+ srcs: ["src/diced_main.rs"],
+ prefer_rlib: true,
+ rustlibs: [
+ "android.hardware.security.dice-V1-rust",
+ "libandroid_logger",
+ "libbinder_rs",
+ "libdiced",
+ "libdiced_open_dice_cbor",
+ "libdiced_sample_inputs",
+ "libdiced_utils",
+ "liblog_rust",
+ ],
+ init_rc: ["diced.microdroid.rc"],
+ bootstrap: true,
+}
+
rust_test {
name: "diced_test",
crate_name: "diced_test",
diff --git a/diced/aidl/Android.bp b/diced/aidl/Android.bp
index b8d0c7e..75c1856 100644
--- a/diced/aidl/Android.bp
+++ b/diced/aidl/Android.bp
@@ -41,6 +41,10 @@
ndk: {
enabled: true,
apps_enabled: false,
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.compos",
+ ],
}
},
}
diff --git a/diced/diced.microdroid.rc b/diced/diced.microdroid.rc
new file mode 100644
index 0000000..2226f47
--- /dev/null
+++ b/diced/diced.microdroid.rc
@@ -0,0 +1,13 @@
+# Start the Diced service.
+#
+# See system/core/init/README.md for information on the init.rc language.
+
+service diced /system/bin/diced.microdroid
+ class main
+ user diced
+ group diced
+ # The diced service must not be allowed to restart.
+ # If it crashes for any reason security critical state is lost.
+ # The only remedy is to restart the device.
+ oneshot
+ writepid /dev/cpuset/foreground/tasks
diff --git a/diced/src/error.rs b/diced/src/error.rs
index 92aa97c..3e230e4 100644
--- a/diced/src/error.rs
+++ b/diced/src/error.rs
@@ -14,9 +14,7 @@
use android_security_dice::aidl::android::security::dice::ResponseCode::ResponseCode;
use anyhow::Result;
-use binder::{
- public_api::Result as BinderResult, ExceptionCode, Status as BinderStatus, StatusCode,
-};
+use binder::{ExceptionCode, Result as BinderResult, Status as BinderStatus, StatusCode};
use keystore2_selinux as selinux;
use std::ffi::CString;
diff --git a/diced/src/error_vendor.rs b/diced/src/error_vendor.rs
index 10d50dd..e8657e0 100644
--- a/diced/src/error_vendor.rs
+++ b/diced/src/error_vendor.rs
@@ -14,9 +14,7 @@
use android_hardware_security_dice::aidl::android::hardware::security::dice::ResponseCode::ResponseCode;
use anyhow::Result;
-use binder::public_api::{
- ExceptionCode, Result as BinderResult, Status as BinderStatus, StatusCode,
-};
+use binder::{ExceptionCode, Result as BinderResult, Status as BinderStatus, StatusCode};
use std::ffi::CString;
/// This is the error type for DICE HAL implementations. It wraps
diff --git a/diced/src/hal_node.rs b/diced/src/hal_node.rs
index bac60b5..01a7577 100644
--- a/diced/src/hal_node.rs
+++ b/diced/src/hal_node.rs
@@ -33,7 +33,7 @@
InputValues::InputValues as BinderInputValues, Signature::Signature,
};
use anyhow::{Context, Result};
-use binder::public_api::{BinderFeatures, Result as BinderResult, Strong};
+use binder::{BinderFeatures, Result as BinderResult, Strong};
use dice::{ContextImpl, OpenDiceCborContext};
use diced_open_dice_cbor as dice;
use diced_utils as utils;
diff --git a/diced/src/lib.rs b/diced/src/lib.rs
index a663144..50e0e96 100644
--- a/diced/src/lib.rs
+++ b/diced/src/lib.rs
@@ -30,7 +30,7 @@
IDiceNode::IDiceNode, ResponseCode::ResponseCode,
};
use anyhow::{Context, Result};
-use binder::{public_api::Result as BinderResult, BinderFeatures, Strong, ThreadState};
+use binder::{BinderFeatures, Result as BinderResult, Strong, ThreadState};
pub use diced_open_dice_cbor as dice;
use error::{map_or_log_err, Error};
use keystore2_selinux as selinux;
@@ -100,7 +100,7 @@
Ok(BinderInputValues {
codeHash: [0; dice::HASH_SIZE],
config: BinderConfig {
- desc: dice::bcc::format_config_descriptor(Some(&format!("{}", uid)), None, true)
+ desc: dice::bcc::format_config_descriptor(Some(&format!("{}", uid)), None, false)
.context("In client_input_values: failed to format config descriptor")?,
},
authorityHash: [0; dice::HASH_SIZE],
diff --git a/diced/src/proxy_node_hal.rs b/diced/src/proxy_node_hal.rs
index 3f31419..8d883d2 100644
--- a/diced/src/proxy_node_hal.rs
+++ b/diced/src/proxy_node_hal.rs
@@ -23,7 +23,7 @@
InputValues::InputValues as BinderInputValues, Signature::Signature,
};
use anyhow::{Context, Result};
-use binder::public_api::Strong;
+use binder::Strong;
use std::collections::HashMap;
use std::sync::RwLock;
diff --git a/fsverity/Android.bp b/fsverity/Android.bp
index 5c3d6a0..2fc3c01 100644
--- a/fsverity/Android.bp
+++ b/fsverity/Android.bp
@@ -53,3 +53,14 @@
"com.android.compos",
],
}
+
+cc_library_static {
+ name: "libfsverity_digests_proto_cc",
+ proto: {
+ type: "lite",
+ static: true,
+ canonical_path_from_root: false,
+ export_proto_headers: true,
+ },
+ srcs: ["fsverity_digests.proto"],
+}
diff --git a/identity/Android.bp b/identity/Android.bp
index 7b0503a..c69ead1 100644
--- a/identity/Android.bp
+++ b/identity/Android.bp
@@ -27,6 +27,7 @@
defaults: [
"identity_defaults",
"keymint_use_latest_hal_aidl_ndk_shared",
+ "keymint_use_latest_hal_aidl_cpp_static",
],
srcs: [
@@ -53,6 +54,7 @@
"libkeymaster4support",
"libkeystore-attestation-application-id",
"android.security.authorization-ndk",
+ "android.security.remoteprovisioning-cpp",
"libutilscallstack",
],
static_libs: [
diff --git a/identity/CredentialStore.cpp b/identity/CredentialStore.cpp
index 61a9125..c5c429b 100644
--- a/identity/CredentialStore.cpp
+++ b/identity/CredentialStore.cpp
@@ -17,10 +17,15 @@
#define LOG_TAG "credstore"
#include <algorithm>
+#include <optional>
#include <android-base/logging.h>
-
+#include <android/hardware/security/keymint/IRemotelyProvisionedComponent.h>
+#include <android/hardware/security/keymint/RpcHardwareInfo.h>
+#include <android/security/remoteprovisioning/IRemotelyProvisionedKeyPool.h>
+#include <android/security/remoteprovisioning/RemotelyProvisionedKey.h>
#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
#include "Credential.h"
#include "CredentialData.h"
@@ -32,6 +37,46 @@
namespace android {
namespace security {
namespace identity {
+namespace {
+
+using ::android::hardware::security::keymint::IRemotelyProvisionedComponent;
+using ::android::hardware::security::keymint::RpcHardwareInfo;
+using ::android::security::remoteprovisioning::IRemotelyProvisionedKeyPool;
+using ::android::security::remoteprovisioning::RemotelyProvisionedKey;
+
+std::optional<std::string>
+getRemotelyProvisionedComponentId(const sp<IIdentityCredentialStore>& hal) {
+ auto init = [](const sp<IIdentityCredentialStore>& hal) -> std::optional<std::string> {
+ sp<IRemotelyProvisionedComponent> remotelyProvisionedComponent;
+ Status status = hal->getRemotelyProvisionedComponent(&remotelyProvisionedComponent);
+ if (!status.isOk()) {
+ LOG(ERROR) << "Error getting remotely provisioned component: " << status;
+ return std::nullopt;
+ }
+
+ RpcHardwareInfo rpcHwInfo;
+ status = remotelyProvisionedComponent->getHardwareInfo(&rpcHwInfo);
+ if (!status.isOk()) {
+ LOG(ERROR) << "Error getting remotely provisioned component hardware info: " << status;
+ return std::nullopt;
+ }
+
+ if (!rpcHwInfo.uniqueId) {
+ LOG(ERROR) << "Remotely provisioned component is missing a unique id, which is "
+ << "required for credential key remotely provisioned attestation keys. "
+ << "This is a bug in the vendor implementation.";
+ return std::nullopt;
+ }
+
+ // This id is required to later fetch remotely provisioned attestation keys.
+ return *rpcHwInfo.uniqueId;
+ };
+
+ static std::optional<std::string> id = init(hal);
+ return id;
+}
+
+} // namespace
CredentialStore::CredentialStore(const std::string& dataPath, sp<IIdentityCredentialStore> hal)
: dataPath_(dataPath), hal_(hal) {}
@@ -44,6 +89,16 @@
}
halApiVersion_ = hal_->getInterfaceVersion();
+ if (hwInfo_.isRemoteKeyProvisioningSupported) {
+ keyPool_ = android::waitForService<IRemotelyProvisionedKeyPool>(
+ IRemotelyProvisionedKeyPool::descriptor);
+ if (keyPool_.get() == nullptr) {
+ LOG(ERROR) << "Error getting IRemotelyProvisionedKeyPool HAL with service name '"
+ << IRemotelyProvisionedKeyPool::descriptor << "'";
+ return false;
+ }
+ }
+
LOG(INFO) << "Connected to Identity Credential HAL with API version " << halApiVersion_
<< " and name '" << hwInfo_.credentialStoreName << "' authored by '"
<< hwInfo_.credentialStoreAuthorName << "' with chunk size " << hwInfo_.dataChunkSize
@@ -90,6 +145,13 @@
return halStatusToGenericError(status);
}
+ if (hwInfo_.isRemoteKeyProvisioningSupported) {
+ status = setRemotelyProvisionedAttestationKey(halWritableCredential.get());
+ if (!status.isOk()) {
+ return halStatusToGenericError(status);
+ }
+ }
+
sp<IWritableCredential> writableCredential = new WritableCredential(
dataPath_, credentialName, docType, false, hwInfo_, halWritableCredential);
*_aidl_return = writableCredential;
@@ -145,6 +207,33 @@
return Status::ok();
}
+Status CredentialStore::setRemotelyProvisionedAttestationKey(
+ IWritableIdentityCredential* halWritableCredential) {
+ std::optional<std::string> rpcId = getRemotelyProvisionedComponentId(hal_);
+ if (!rpcId) {
+ return Status::fromServiceSpecificError(ERROR_GENERIC,
+ "Error getting remotely provisioned component id");
+ }
+
+ uid_t callingUid = android::IPCThreadState::self()->getCallingUid();
+ RemotelyProvisionedKey key;
+ Status status = keyPool_->getAttestationKey(callingUid, *rpcId, &key);
+ if (!status.isOk()) {
+ LOG(WARNING) << "Unable to fetch remotely provisioned attestation key, falling back "
+ << "to the factory-provisioned attestation key.";
+ return Status::ok();
+ }
+
+ status = halWritableCredential->setRemotelyProvisionedAttestationKey(key.keyBlob,
+ key.encodedCertChain);
+ if (!status.isOk()) {
+ LOG(ERROR) << "Error setting remotely provisioned attestation key on credential";
+ return status;
+ }
+
+ return Status::ok();
+}
+
} // namespace identity
} // namespace security
} // namespace android
diff --git a/identity/CredentialStore.h b/identity/CredentialStore.h
index f2aa506..df7928e 100644
--- a/identity/CredentialStore.h
+++ b/identity/CredentialStore.h
@@ -21,8 +21,8 @@
#include <vector>
#include <android/hardware/identity/IIdentityCredentialStore.h>
-
#include <android/security/identity/BnCredentialStore.h>
+#include <android/security/remoteprovisioning/IRemotelyProvisionedKeyPool.h>
namespace android {
namespace security {
@@ -38,6 +38,8 @@
using ::android::hardware::identity::HardwareInformation;
using ::android::hardware::identity::IIdentityCredentialStore;
using ::android::hardware::identity::IPresentationSession;
+using ::android::hardware::identity::IWritableIdentityCredential;
+using ::android::security::remoteprovisioning::IRemotelyProvisionedKeyPool;
class CredentialStore : public BnCredentialStore {
public:
@@ -64,11 +66,15 @@
Status createPresentationSession(int32_t cipherSuite, sp<ISession>* _aidl_return) override;
private:
+ Status setRemotelyProvisionedAttestationKey(IWritableIdentityCredential* halWritableCredential);
+
string dataPath_;
sp<IIdentityCredentialStore> hal_;
int halApiVersion_;
+ sp<IRemotelyProvisionedKeyPool> keyPool_;
+
HardwareInformation hwInfo_;
};
diff --git a/keystore/tests/confirmationui_invocation_test.cpp b/keystore/tests/confirmationui_invocation_test.cpp
index 7f8a373..822e6a4 100644
--- a/keystore/tests/confirmationui_invocation_test.cpp
+++ b/keystore/tests/confirmationui_invocation_test.cpp
@@ -55,7 +55,7 @@
std::string locale("en");
std::vector<uint8_t> extraData{0xaa, 0xff, 0x00, 0x55};
- auto listener = std::make_shared<ConfirmationListener>();
+ auto listener = ndk::SharedRefBase::make<ConfirmationListener>();
auto future = listener->get_future();
diff --git a/keystore/tests/fuzzer/Android.bp b/keystore/tests/fuzzer/Android.bp
index 589cef7..4116ae1 100644
--- a/keystore/tests/fuzzer/Android.bp
+++ b/keystore/tests/fuzzer/Android.bp
@@ -31,12 +31,12 @@
],
static_libs: [
"libkeystore-wifi-hidl",
- "libutils",
],
shared_libs: [
"android.system.wifi.keystore@1.0",
"libhidlbase",
"liblog",
+ "libutils",
],
fuzz_config: {
cc: [
@@ -51,13 +51,13 @@
static_libs: [
"libkeystore-attestation-application-id",
"liblog",
- "libutils",
"libbase",
"libhidlbase",
],
shared_libs: [
"libbinder",
"libcrypto",
+ "libutils",
],
fuzz_config: {
cc: [
diff --git a/keystore2/Android.bp b/keystore2/Android.bp
index 2a6ef7d..e6cb4fb 100644
--- a/keystore2/Android.bp
+++ b/keystore2/Android.bp
@@ -76,19 +76,13 @@
}
rust_library {
- name: "libkeystore2_noicu",
- defaults: ["libkeystore2_defaults"],
- rustlibs: [
- "liblibsqlite3_sys_noicu",
- "librusqlite_noicu",
- ],
-}
-
-rust_library {
name: "libkeystore2_test_utils",
crate_name: "keystore2_test_utils",
srcs: ["test_utils/lib.rs"],
+ defaults: ["keymint_use_latest_hal_aidl_rust"],
rustlibs: [
+ "android.system.keystore2-V2-rust",
+ "libbinder_rs",
"libkeystore2_selinux",
"liblog_rust",
"libnix",
@@ -98,14 +92,30 @@
],
}
+rust_library {
+ name: "libkeystore2_with_test_utils",
+ defaults: ["libkeystore2_defaults"],
+ features: [
+ "keystore2_blob_test_utils",
+ ],
+ rustlibs: [
+ "liblibsqlite3_sys",
+ "librusqlite",
+ "libkeystore2_test_utils",
+ ],
+}
+
rust_test {
name: "keystore2_test_utils_test",
srcs: ["test_utils/lib.rs"],
+ defaults: ["keymint_use_latest_hal_aidl_rust"],
test_suites: ["general-tests"],
require_root: true,
auto_gen_config: true,
compile_multilib: "first",
rustlibs: [
+ "android.system.keystore2-V2-rust",
+ "libbinder_rs",
"libkeystore2_selinux",
"liblog_rust",
"libnix",
@@ -128,10 +138,12 @@
"liblibsqlite3_sys",
"libnix",
"librusqlite",
+ "libkeystore2_with_test_utils",
],
// The test should always include watchdog.
features: [
"watchdog",
+ "keystore2_blob_test_utils",
],
}
@@ -168,18 +180,3 @@
],
afdo: true,
}
-
-// Variant of keystore2 for use in microdroid. It doesn't depend on the ICU-enabled sqlite.
-// This can be used also in Android, but we choose not to because it will bring two
-// variants of sqlite to the system causing more RAM usage and CPU cycles when loading.
-rust_binary {
- name: "keystore2_microdroid",
- stem: "keystore2",
- defaults: ["keystore2_defaults"],
- rustlibs: [
- "libkeystore2_noicu",
- "liblegacykeystore-rust_noicu",
- "librusqlite_noicu",
- ],
- installable: false, // don't install this to Android
-}
diff --git a/keystore2/TEST_MAPPING b/keystore2/TEST_MAPPING
index 049adc7..5d0a7dd 100644
--- a/keystore2/TEST_MAPPING
+++ b/keystore2/TEST_MAPPING
@@ -13,6 +13,9 @@
"name": "keystore2_test_utils_test"
},
{
+ "name": "keystore2_legacy_blobs_test"
+ },
+ {
"name": "CtsIdentityTestCases"
},
{
diff --git a/keystore2/aidl/Android.bp b/keystore2/aidl/Android.bp
index 7eb2b83..ae08567 100644
--- a/keystore2/aidl/Android.bp
+++ b/keystore2/aidl/Android.bp
@@ -29,7 +29,6 @@
backend: {
java: {
platform_apis: true,
- srcs_available: true,
},
rust: {
enabled: true,
@@ -52,7 +51,6 @@
backend: {
java: {
platform_apis: true,
- srcs_available: true,
},
rust: {
enabled: true,
@@ -71,7 +69,6 @@
backend: {
java: {
enabled: true,
- srcs_available: true,
},
rust: {
enabled: true,
@@ -94,7 +91,6 @@
backend: {
java: {
platform_apis: true,
- srcs_available: true,
},
rust: {
enabled: true,
@@ -116,7 +112,6 @@
backend: {
java: {
platform_apis: true,
- srcs_available: true,
},
ndk: {
enabled: true,
@@ -138,7 +133,6 @@
backend: {
java: {
platform_apis: true,
- srcs_available: true,
},
rust: {
enabled: true,
@@ -157,7 +151,6 @@
backend: {
java: {
platform_apis: true,
- srcs_available: true,
},
rust: {
enabled: true,
@@ -179,7 +172,6 @@
backend: {
java: {
platform_apis: true,
- srcs_available: true,
},
rust: {
enabled: true,
diff --git a/keystore2/aidl/android/security/compat/IKeystoreCompatService.aidl b/keystore2/aidl/android/security/compat/IKeystoreCompatService.aidl
index 50bfa19..8e347f0 100644
--- a/keystore2/aidl/android/security/compat/IKeystoreCompatService.aidl
+++ b/keystore2/aidl/android/security/compat/IKeystoreCompatService.aidl
@@ -29,8 +29,17 @@
*/
interface IKeystoreCompatService {
/**
- * Return an implementation of IKeyMintDevice, that it implemented by Keystore 2.0 itself
- * by means of Keymaster 4.1 or lower.
+ * Return an implementation of IKeyMintDevice, that it implemented by Keystore 2.0 itself.
+ * The underlying implementation depends on the requested securityLevel:
+ * - TRUSTED_ENVIRONMENT or STRONGBOX: implementation is by means of a hardware-backed
+ * Keymaster 4.x instance. In this case, the returned device supports version 1 of
+ * the IKeyMintDevice interface, with some small omissions:
+ * - KeyPurpose::ATTEST_KEY is not supported (b/216437537)
+ * - Specification of the MGF1 digest for RSA-OAEP is not supported (b/216436980)
+ * - Specification of CERTIFICATE_{SUBJECT,SERIAL} is not supported for keys attested
+ * by hardware (b/216468666).
+ * - SOFTWARE: implementation is entirely software based. In this case, the returned device
+ * supports the current version of the IKeyMintDevice interface.
*/
IKeyMintDevice getKeyMintDevice (SecurityLevel securityLevel);
diff --git a/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl b/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
index 3df5936..6a37c78 100644
--- a/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
+++ b/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
@@ -23,15 +23,10 @@
* user's password.
* @hide
*/
-@SensitiveData
+ @SensitiveData
interface IKeystoreMaintenance {
/**
- * Special value indicating the callers uid.
- */
- const int UID_SELF = -1;
-
- /**
* Allows LockSettingsService to inform keystore about adding a new user.
* Callers require 'AddUser' permission.
*
@@ -120,10 +115,6 @@
* The source may be specified by Domain::APP, Domain::SELINUX, or Domain::KEY_ID. The target
* may be specified by Domain::APP or Domain::SELINUX.
*
- * If Domain::APP is selected in either source or destination, nspace must be set to UID_SELF,
- * implying the caller's UID. If the caller has the MIGRATE_ANY_KEY permission, Domain::APP may
- * be used with other nspace values which then indicates the UID of a different application.
- *
* ## Error conditions:
* `ResponseCode::PERMISSION_DENIED` - If the caller lacks any of the required permissions.
* `ResponseCode::KEY_NOT_FOUND` - If the source did not exist.
@@ -140,22 +131,4 @@
* Tag::ROLLBACK_RESISTANCE may or may not be rendered unusable.
*/
void deleteAllKeys();
-
- /**
- * List all entries accessible by the caller in the given `domain` and `nspace`.
- *
- * Callers either has to have the `GET_INFO` permission for the requested namespace or `LIST`
- * permission to list all the entries.
- *
- * ## Error conditions
- * `ResponseCode::INVALID_ARGUMENT` if `domain` is other than `Domain::APP` or `Domain::SELINUX`
- * `ResponseCode::PERMISSION_DENIED` if the caller does not have the permission
- *
- * @param domain `Domain::APP` or `Domain::SELINUX`.
- *
- * @param nspace The SELinux keystore2_key namespace.
- *
- * @return List of KeyDescriptors.
- */
- KeyDescriptor[] listEntries(in Domain domain, in long nspace);
}
diff --git a/keystore2/aidl/android/security/remoteprovisioning/IRemotelyProvisionedKeyPool.aidl b/keystore2/aidl/android/security/remoteprovisioning/IRemotelyProvisionedKeyPool.aidl
new file mode 100644
index 0000000..7d45e52
--- /dev/null
+++ b/keystore2/aidl/android/security/remoteprovisioning/IRemotelyProvisionedKeyPool.aidl
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.remoteprovisioning;
+
+import android.security.remoteprovisioning.RemotelyProvisionedKey;
+
+/**
+ * This is the interface providing access to remotely-provisioned attestation keys
+ * for an `IRemotelyProvisionedComponent`.
+ *
+ * @hide
+ */
+interface IRemotelyProvisionedKeyPool {
+
+ /**
+ * Fetches an attestation key for the given uid and `IRemotelyProvisionedComponent`, as
+ * identified by the given id.
+
+ * Callers require the keystore2::get_attestation_key permission.
+ *
+ * ## Error conditions
+ * `android.system.keystore2.ResponseCode::PERMISSION_DENIED` if the caller does not have the
+ * `keystore2::get_attestation_key` permission
+ *
+ * @param clientUid The client application for which an attestation key is needed.
+ *
+ * @param irpcId The unique identifier for the `IRemotelyProvisionedComponent` for which a key
+ * is requested. This id may be retrieved from a given component via the
+ * `IRemotelyProvisionedComponent::getHardwareInfo` function.
+ *
+ * @return A `RemotelyProvisionedKey` parcelable containing a key and certification chain for
+ * the given `IRemotelyProvisionedComponent`.
+ */
+ RemotelyProvisionedKey getAttestationKey(in int clientUid, in @utf8InCpp String irpcId);
+}
diff --git a/keystore2/aidl/android/security/remoteprovisioning/RemotelyProvisionedKey.aidl b/keystore2/aidl/android/security/remoteprovisioning/RemotelyProvisionedKey.aidl
new file mode 100644
index 0000000..ae21855
--- /dev/null
+++ b/keystore2/aidl/android/security/remoteprovisioning/RemotelyProvisionedKey.aidl
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.remoteprovisioning;
+
+/**
+ * A `RemotelyProvisionedKey` holds an attestation key and the corresponding remotely provisioned
+ * certificate chain.
+ *
+ * @hide
+ */
+@RustDerive(Eq=true, PartialEq=true)
+parcelable RemotelyProvisionedKey {
+ /**
+ * The remotely-provisioned key that may be used to sign attestations. The format of this key
+ * is opaque, and need only be understood by the IRemotelyProvisionedComponent that generated
+ * it.
+ *
+ * Any private key material contained within this blob must be encrypted.
+ */
+ byte[] keyBlob;
+
+ /**
+ * Sequence of DER-encoded X.509 certificates that make up the attestation key's certificate
+ * chain. This is the binary encoding for a chain that is supported by Java's
+ * CertificateFactory.generateCertificates API.
+ */
+ byte[] encodedCertChain;
+}
diff --git a/keystore2/legacykeystore/Android.bp b/keystore2/legacykeystore/Android.bp
index d407569..505b165 100644
--- a/keystore2/legacykeystore/Android.bp
+++ b/keystore2/legacykeystore/Android.bp
@@ -47,15 +47,6 @@
],
}
-rust_library {
- name: "liblegacykeystore-rust_noicu",
- defaults: ["liblegacykeystore-rust_defaults"],
- rustlibs: [
- "libkeystore2_noicu",
- "librusqlite_noicu",
- ],
-}
-
rust_test {
name: "legacykeystore_test",
crate_name: "legacykeystore",
diff --git a/keystore2/legacykeystore/lib.rs b/keystore2/legacykeystore/lib.rs
index da60297..e2d952d 100644
--- a/keystore2/legacykeystore/lib.rs
+++ b/keystore2/legacykeystore/lib.rs
@@ -25,8 +25,9 @@
};
use anyhow::{Context, Result};
use keystore2::{
- async_task::AsyncTask, legacy_blob::LegacyBlobLoader, maintenance::DeleteListener,
- maintenance::Domain, utils::watchdog as wd,
+ async_task::AsyncTask, error::anyhow_error_to_cstring, globals::SUPER_KEY,
+ legacy_blob::LegacyBlobLoader, maintenance::DeleteListener, maintenance::Domain,
+ utils::uid_to_android_user, utils::watchdog as wd,
};
use rusqlite::{
params, Connection, OptionalExtension, Transaction, TransactionBehavior, NO_PARAMS,
@@ -226,7 +227,10 @@
if log_error {
log::error!("{:?}", e);
}
- Err(BinderStatus::new_service_specific_error(rc, None))
+ Err(BinderStatus::new_service_specific_error(
+ rc,
+ anyhow_error_to_cstring(&e).as_deref(),
+ ))
},
handle_ok,
)
@@ -312,8 +316,8 @@
if let Some(entry) = db.get(uid, alias).context("In get: Trying to load entry from DB.")? {
return Ok(entry);
}
- if self.get_legacy(uid, alias).context("In get: Trying to migrate legacy blob.")? {
- // If we were able to migrate a legacy blob try again.
+ if self.get_legacy(uid, alias).context("In get: Trying to import legacy blob.")? {
+ // If we were able to import a legacy blob try again.
if let Some(entry) =
db.get(uid, alias).context("In get: Trying to load entry from DB.")?
{
@@ -325,19 +329,20 @@
fn put(&self, alias: &str, uid: i32, entry: &[u8]) -> Result<()> {
let uid = Self::get_effective_uid(uid).context("In put.")?;
- // In order to make sure that we don't have stale legacy entries, make sure they are
- // migrated before replacing them.
- let _ = self.get_legacy(uid, alias);
let mut db = self.open_db().context("In put.")?;
- db.put(uid, alias, entry).context("In put: Trying to insert entry into DB.")
+ db.put(uid, alias, entry).context("In put: Trying to insert entry into DB.")?;
+ // When replacing an entry, make sure that there is no stale legacy file entry.
+ let _ = self.remove_legacy(uid, alias);
+ Ok(())
}
fn remove(&self, alias: &str, uid: i32) -> Result<()> {
let uid = Self::get_effective_uid(uid).context("In remove.")?;
let mut db = self.open_db().context("In remove.")?;
- // In order to make sure that we don't have stale legacy entries, make sure they are
- // migrated before removing them.
- let _ = self.get_legacy(uid, alias);
+
+ if self.remove_legacy(uid, alias).context("In remove: trying to remove legacy entry")? {
+ return Ok(());
+ }
let removed =
db.remove(uid, alias).context("In remove: Trying to remove entry from DB.")?;
if removed {
@@ -427,17 +432,30 @@
return Ok(true);
}
let mut db = DB::new(&state.db_path).context("In open_db: Failed to open db.")?;
- let migrated =
- Self::migrate_one_legacy_entry(uid, &alias, &state.legacy_loader, &mut db)
- .context("Trying to migrate legacy keystore entries.")?;
- if migrated {
+ let imported =
+ Self::import_one_legacy_entry(uid, &alias, &state.legacy_loader, &mut db)
+ .context("Trying to import legacy keystore entries.")?;
+ if imported {
state.recently_imported.insert((uid, alias));
}
- Ok(migrated)
+ Ok(imported)
})
.context("In get_legacy.")
}
+ fn remove_legacy(&self, uid: u32, alias: &str) -> Result<bool> {
+ let alias = alias.to_string();
+ self.do_serialized(move |state| {
+ if state.recently_imported.contains(&(uid, alias.clone())) {
+ return Ok(false);
+ }
+ state
+ .legacy_loader
+ .remove_legacy_keystore_entry(uid, &alias)
+ .context("Trying to remove legacy entry.")
+ })
+ }
+
fn bulk_delete_uid(&self, uid: u32) -> Result<()> {
self.do_serialized(move |state| {
let entries = state
@@ -470,21 +488,31 @@
})
}
- fn migrate_one_legacy_entry(
+ fn import_one_legacy_entry(
uid: u32,
alias: &str,
legacy_loader: &LegacyBlobLoader,
db: &mut DB,
) -> Result<bool> {
let blob = legacy_loader
- .read_legacy_keystore_entry(uid, alias)
- .context("In migrate_one_legacy_entry: Trying to read legacy keystore entry.")?;
+ .read_legacy_keystore_entry(uid, alias, |ciphertext, iv, tag, _salt, _key_size| {
+ if let Some(key) = SUPER_KEY
+ .read()
+ .unwrap()
+ .get_per_boot_key_by_user_id(uid_to_android_user(uid as u32))
+ {
+ key.decrypt(ciphertext, iv, tag)
+ } else {
+ Err(Error::sys()).context("No key found for user. Device may be locked.")
+ }
+ })
+ .context("In import_one_legacy_entry: Trying to read legacy keystore entry.")?;
if let Some(entry) = blob {
db.put(uid, alias, &entry)
- .context("In migrate_one_legacy_entry: Trying to insert entry into DB.")?;
+ .context("In import_one_legacy_entry: Trying to insert entry into DB.")?;
legacy_loader
.remove_legacy_keystore_entry(uid, alias)
- .context("In migrate_one_legacy_entry: Trying to delete legacy keystore entry.")?;
+ .context("In import_one_legacy_entry: Trying to delete legacy keystore entry.")?;
Ok(true)
} else {
Ok(false)
diff --git a/keystore2/src/apc.rs b/keystore2/src/apc.rs
index 0096686..7d56dc9 100644
--- a/keystore2/src/apc.rs
+++ b/keystore2/src/apc.rs
@@ -21,6 +21,7 @@
sync::{mpsc::Sender, Arc, Mutex},
};
+use crate::error::anyhow_error_to_cstring;
use crate::utils::{compat_2_response_code, ui_opts_2_compat, watchdog as wd};
use android_security_apc::aidl::android::security::apc::{
IConfirmationCallback::IConfirmationCallback,
@@ -110,7 +111,10 @@
_ => ResponseCode::SYSTEM_ERROR.0,
},
};
- Err(BinderStatus::new_service_specific_error(rc, None))
+ Err(BinderStatus::new_service_specific_error(
+ rc,
+ anyhow_error_to_cstring(&e).as_deref(),
+ ))
},
handle_ok,
)
diff --git a/keystore2/src/attestation_key_utils.rs b/keystore2/src/attestation_key_utils.rs
index a8c1ca9..8354ba5 100644
--- a/keystore2/src/attestation_key_utils.rs
+++ b/keystore2/src/attestation_key_utils.rs
@@ -35,6 +35,7 @@
/// handled quite differently, thus the different representations.
pub enum AttestationKeyInfo {
RemoteProvisioned {
+ key_id_guard: KeyIdGuard,
attestation_key: AttestationKey,
attestation_certs: Certificate,
},
@@ -66,8 +67,12 @@
"Trying to get remotely provisioned attestation key."
))
.map(|result| {
- result.map(|(attestation_key, attestation_certs)| {
- AttestationKeyInfo::RemoteProvisioned { attestation_key, attestation_certs }
+ result.map(|(key_id_guard, attestation_key, attestation_certs)| {
+ AttestationKeyInfo::RemoteProvisioned {
+ key_id_guard,
+ attestation_key,
+ attestation_certs,
+ }
})
}),
None => Ok(None),
diff --git a/keystore2/src/authorization.rs b/keystore2/src/authorization.rs
index 04626bc..8265dd0 100644
--- a/keystore2/src/authorization.rs
+++ b/keystore2/src/authorization.rs
@@ -15,7 +15,8 @@
//! This module implements IKeystoreAuthorization AIDL interface.
use crate::error::Error as KeystoreError;
-use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_MIGRATOR};
+use crate::error::anyhow_error_to_cstring;
+use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_IMPORTER};
use crate::permission::KeystorePerm;
use crate::super_key::UserState;
use crate::utils::{check_keystore_permission, watchdog as wd};
@@ -88,7 +89,10 @@
// as well.
_ => ResponseCode::SYSTEM_ERROR.0,
};
- return Err(BinderStatus::new_service_specific_error(rc, None));
+ return Err(BinderStatus::new_service_specific_error(
+ rc,
+ anyhow_error_to_cstring(&e).as_deref(),
+ ));
}
let rc = match root_cause.downcast_ref::<Error>() {
Some(Error::Rc(rcode)) => rcode.0,
@@ -98,7 +102,10 @@
_ => ResponseCode::SYSTEM_ERROR.0,
},
};
- Err(BinderStatus::new_service_specific_error(rc, None))
+ Err(BinderStatus::new_service_specific_error(
+ rc,
+ anyhow_error_to_cstring(&e).as_deref(),
+ ))
},
handle_ok,
)
@@ -147,8 +154,10 @@
.context("In on_lock_screen_event: Unlock with password.")?;
ENFORCEMENTS.set_device_locked(user_id, false);
+ let mut skm = SUPER_KEY.write().unwrap();
+
DB.with(|db| {
- SUPER_KEY.unlock_screen_lock_bound_key(
+ skm.unlock_screen_lock_bound_key(
&mut db.borrow_mut(),
user_id as u32,
&password,
@@ -159,10 +168,9 @@
// Unlock super key.
if let UserState::Uninitialized = DB
.with(|db| {
- UserState::get_with_password_unlock(
+ skm.unlock_and_get_user_state(
&mut db.borrow_mut(),
- &LEGACY_MIGRATOR,
- &SUPER_KEY,
+ &LEGACY_IMPORTER,
user_id as u32,
&password,
)
@@ -180,8 +188,9 @@
check_keystore_permission(KeystorePerm::Unlock)
.context("In on_lock_screen_event: Unlock.")?;
ENFORCEMENTS.set_device_locked(user_id, false);
+ let mut skm = SUPER_KEY.write().unwrap();
DB.with(|db| {
- SUPER_KEY.try_unlock_user_with_biometric(&mut db.borrow_mut(), user_id as u32)
+ skm.try_unlock_user_with_biometric(&mut db.borrow_mut(), user_id as u32)
})
.context("In on_lock_screen_event: try_unlock_user_with_biometric failed")?;
Ok(())
@@ -190,8 +199,9 @@
check_keystore_permission(KeystorePerm::Lock)
.context("In on_lock_screen_event: Lock")?;
ENFORCEMENTS.set_device_locked(user_id, true);
+ let mut skm = SUPER_KEY.write().unwrap();
DB.with(|db| {
- SUPER_KEY.lock_screen_lock_bound_key(
+ skm.lock_screen_lock_bound_key(
&mut db.borrow_mut(),
user_id as u32,
unlocking_sids.unwrap_or(&[]),
@@ -265,7 +275,7 @@
challenge: i64,
secure_user_id: i64,
auth_token_max_age_millis: i64,
- ) -> binder::public_api::Result<AuthorizationTokens> {
+ ) -> binder::Result<AuthorizationTokens> {
let _wp = wd::watch_millis("IKeystoreAuthorization::getAuthTokensForCredStore", 500);
map_or_log_err(
self.get_auth_tokens_for_credstore(
diff --git a/keystore2/src/crypto/Android.bp b/keystore2/src/crypto/Android.bp
index 76c02c5..c3f6f3c 100644
--- a/keystore2/src/crypto/Android.bp
+++ b/keystore2/src/crypto/Android.bp
@@ -62,6 +62,7 @@
shared_libs: ["libcrypto"],
bindgen_flags: [
"--size_t-is-usize",
+ "--allowlist-function", "hmacSha256",
"--allowlist-function", "randomBytes",
"--allowlist-function", "AES_gcm_encrypt",
"--allowlist-function", "AES_gcm_decrypt",
diff --git a/keystore2/src/crypto/crypto.cpp b/keystore2/src/crypto/crypto.cpp
index 5d360a1..34a9a40 100644
--- a/keystore2/src/crypto/crypto.cpp
+++ b/keystore2/src/crypto/crypto.cpp
@@ -25,6 +25,7 @@
#include <openssl/ecdh.h>
#include <openssl/evp.h>
#include <openssl/hkdf.h>
+#include <openssl/hmac.h>
#include <openssl/rand.h>
#include <openssl/x509.h>
@@ -66,6 +67,14 @@
return cipher;
}
+bool hmacSha256(const uint8_t* key, size_t key_size, const uint8_t* msg, size_t msg_size,
+ uint8_t* out, size_t out_size) {
+ const EVP_MD* digest = EVP_sha256();
+ unsigned int actual_out_size = out_size;
+ uint8_t* p = HMAC(digest, key, key_size, msg, msg_size, out, &actual_out_size);
+ return (p != nullptr);
+}
+
bool randomBytes(uint8_t* out, size_t len) {
return RAND_bytes(out, len);
}
diff --git a/keystore2/src/crypto/crypto.hpp b/keystore2/src/crypto/crypto.hpp
index f841eb3..d66532f 100644
--- a/keystore2/src/crypto/crypto.hpp
+++ b/keystore2/src/crypto/crypto.hpp
@@ -22,6 +22,8 @@
#include <stddef.h>
extern "C" {
+ bool hmacSha256(const uint8_t* key, size_t key_size, const uint8_t* msg, size_t msg_size,
+ uint8_t* out, size_t out_size);
bool randomBytes(uint8_t* out, size_t len);
bool AES_gcm_encrypt(const uint8_t* in, uint8_t* out, size_t len,
const uint8_t* key, size_t key_size, const uint8_t* iv, uint8_t* tag);
diff --git a/keystore2/src/crypto/error.rs b/keystore2/src/crypto/error.rs
index c6476f9..48a2d4c 100644
--- a/keystore2/src/crypto/error.rs
+++ b/keystore2/src/crypto/error.rs
@@ -95,6 +95,10 @@
#[error("Failed to extract certificate subject.")]
ExtractSubjectFailed,
+ /// This is returned if the C implementation of hmacSha256 failed.
+ #[error("Failed to calculate HMAC-SHA256.")]
+ HmacSha256Failed,
+
/// Zvec error.
#[error(transparent)]
ZVec(#[from] zvec::Error),
diff --git a/keystore2/src/crypto/lib.rs b/keystore2/src/crypto/lib.rs
index 92da965..14bdf04 100644
--- a/keystore2/src/crypto/lib.rs
+++ b/keystore2/src/crypto/lib.rs
@@ -19,8 +19,8 @@
pub mod zvec;
pub use error::Error;
use keystore2_crypto_bindgen::{
- extractSubjectFromCertificate, generateKeyFromPassword, randomBytes, AES_gcm_decrypt,
- AES_gcm_encrypt, ECDHComputeKey, ECKEYGenerateKey, ECKEYMarshalPrivateKey,
+ extractSubjectFromCertificate, generateKeyFromPassword, hmacSha256, randomBytes,
+ AES_gcm_decrypt, AES_gcm_encrypt, ECDHComputeKey, ECKEYGenerateKey, ECKEYMarshalPrivateKey,
ECKEYParsePrivateKey, ECPOINTOct2Point, ECPOINTPoint2Oct, EC_KEY_free, EC_KEY_get0_public_key,
EC_POINT_free, HKDFExpand, HKDFExtract, EC_KEY, EC_MAX_BYTES, EC_POINT, EVP_MAX_MD_SIZE,
};
@@ -39,6 +39,8 @@
pub const AES_128_KEY_LENGTH: usize = 16;
/// Length of the expected salt for key from password generation.
pub const SALT_LENGTH: usize = 16;
+/// Length of an HMAC-SHA256 tag in bytes.
+pub const HMAC_SHA256_LEN: usize = 32;
/// Older versions of keystore produced IVs with four extra
/// ignored zero bytes at the end; recognise and trim those.
@@ -72,6 +74,21 @@
}
}
+/// Perform HMAC-SHA256.
+pub fn hmac_sha256(key: &[u8], msg: &[u8]) -> Result<Vec<u8>, Error> {
+ let mut tag = vec![0; HMAC_SHA256_LEN];
+ // Safety: The first two pairs of arguments must point to const buffers with
+ // size given by the second arg of the pair. The final pair of arguments
+ // must point to an output buffer with size given by the second arg of the
+ // pair.
+ match unsafe {
+ hmacSha256(key.as_ptr(), key.len(), msg.as_ptr(), msg.len(), tag.as_mut_ptr(), tag.len())
+ } {
+ true => Ok(tag),
+ false => Err(Error::HmacSha256Failed),
+ }
+}
+
/// Uses AES GCM to decipher a message given an initialization vector, aead tag, and key.
/// This function accepts 128 and 256-bit keys and uses AES128 and AES256 respectively based
/// on the key length.
@@ -565,4 +582,18 @@
assert_eq!(left_key, right_key);
Ok(())
}
+
+ #[test]
+ fn test_hmac_sha256() {
+ let key = b"This is the key";
+ let msg1 = b"This is a message";
+ let msg2 = b"This is another message";
+ let tag1a = hmac_sha256(key, msg1).unwrap();
+ assert_eq!(tag1a.len(), HMAC_SHA256_LEN);
+ let tag1b = hmac_sha256(key, msg1).unwrap();
+ assert_eq!(tag1a, tag1b);
+ let tag2 = hmac_sha256(key, msg2).unwrap();
+ assert_eq!(tag2.len(), HMAC_SHA256_LEN);
+ assert_ne!(tag1a, tag2);
+ }
}
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index 7099f5a..7713618 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -323,6 +323,8 @@
0x41, 0xe3, 0xb9, 0xce, 0x27, 0x58, 0x4e, 0x91, 0xbc, 0xfd, 0xa5, 0x5d, 0x91, 0x85, 0xab, 0x11,
]);
+static EXPIRATION_BUFFER_MS: i64 = 20000;
+
/// Indicates how the sensitive part of this key blob is encrypted.
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
pub enum EncryptedBy {
@@ -578,6 +580,36 @@
cert_chain: Option<Vec<u8>>,
}
+/// This type represents a Blob with its metadata and an optional superseded blob.
+#[derive(Debug)]
+pub struct BlobInfo<'a> {
+ blob: &'a [u8],
+ metadata: &'a BlobMetaData,
+ /// Superseded blobs are an artifact of legacy import. In some rare occasions
+ /// the key blob needs to be upgraded during import. In that case two
+ /// blob are imported, the superseded one will have to be imported first,
+ /// so that the garbage collector can reap it.
+ superseded_blob: Option<(&'a [u8], &'a BlobMetaData)>,
+}
+
+impl<'a> BlobInfo<'a> {
+ /// Create a new instance of blob info with blob and corresponding metadata
+ /// and no superseded blob info.
+ pub fn new(blob: &'a [u8], metadata: &'a BlobMetaData) -> Self {
+ Self { blob, metadata, superseded_blob: None }
+ }
+
+ /// Create a new instance of blob info with blob and corresponding metadata
+ /// as well as superseded blob info.
+ pub fn new_with_superseded(
+ blob: &'a [u8],
+ metadata: &'a BlobMetaData,
+ superseded_blob: Option<(&'a [u8], &'a BlobMetaData)>,
+ ) -> Self {
+ Self { blob, metadata, superseded_blob }
+ }
+}
+
impl CertificateInfo {
/// Constructs a new CertificateInfo object from `cert` and `cert_chain`
pub fn new(cert: Option<Vec<u8>>, cert_chain: Option<Vec<u8>>) -> Self {
@@ -1909,8 +1941,11 @@
)?
.collect::<rusqlite::Result<Vec<(i64, DateTime)>>>()
.context("Failed to get date metadata")?;
+ // Calculate curr_time with a discount factor to avoid a key that's milliseconds away
+ // from expiration dodging this delete call.
let curr_time = DateTime::from_millis_epoch(
- SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64,
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64
+ + EXPIRATION_BUFFER_MS,
);
let mut num_deleted = 0;
for id in key_ids_to_check.iter().filter(|kt| kt.1 < curr_time).map(|kt| kt.0) {
@@ -2019,6 +2054,41 @@
.context("In get_attestation_pool_status: ")
}
+ fn query_kid_for_attestation_key_and_cert_chain(
+ &self,
+ tx: &Transaction,
+ domain: Domain,
+ namespace: i64,
+ km_uuid: &Uuid,
+ ) -> Result<Option<i64>> {
+ let mut stmt = tx.prepare(
+ "SELECT id
+ FROM persistent.keyentry
+ WHERE key_type = ?
+ AND domain = ?
+ AND namespace = ?
+ AND state = ?
+ AND km_uuid = ?;",
+ )?;
+ let rows = stmt
+ .query_map(
+ params![
+ KeyType::Attestation,
+ domain.0 as u32,
+ namespace,
+ KeyLifeCycle::Live,
+ km_uuid
+ ],
+ |row| row.get(0),
+ )?
+ .collect::<rusqlite::Result<Vec<i64>>>()
+ .context("query failed.")?;
+ if rows.is_empty() {
+ return Ok(None);
+ }
+ Ok(Some(rows[0]))
+ }
+
/// Fetches the private key and corresponding certificate chain assigned to a
/// domain/namespace pair. Will either return nothing if the domain/namespace is
/// not assigned, or one CertificateChain.
@@ -2027,7 +2097,7 @@
domain: Domain,
namespace: i64,
km_uuid: &Uuid,
- ) -> Result<Option<CertificateChain>> {
+ ) -> Result<Option<(KeyIdGuard, CertificateChain)>> {
let _wp = wd::watch_millis("KeystoreDB::retrieve_attestation_key_and_cert_chain", 500);
match domain {
@@ -2037,69 +2107,71 @@
.context(format!("Domain {:?} must be either App or SELinux.", domain));
}
}
- self.with_transaction(TransactionBehavior::Deferred, |tx| {
- let mut stmt = tx.prepare(
- "SELECT subcomponent_type, blob
- FROM persistent.blobentry
- WHERE keyentryid IN
- (SELECT id
- FROM persistent.keyentry
- WHERE key_type = ?
- AND domain = ?
- AND namespace = ?
- AND state = ?
- AND km_uuid = ?);",
- )?;
- let rows = stmt
- .query_map(
- params![
- KeyType::Attestation,
- domain.0 as u32,
- namespace,
- KeyLifeCycle::Live,
- km_uuid
- ],
- |row| Ok((row.get(0)?, row.get(1)?)),
- )?
- .collect::<rusqlite::Result<Vec<(SubComponentType, Vec<u8>)>>>()
- .context("query failed.")?;
- if rows.is_empty() {
- return Ok(None).no_gc();
- } else if rows.len() != 3 {
- return Err(KsError::sys()).context(format!(
- concat!(
- "Expected to get a single attestation",
- "key, cert, and cert chain for a total of 3 entries, but instead got {}."
- ),
- rows.len()
- ));
- }
- let mut km_blob: Vec<u8> = Vec::new();
- let mut cert_chain_blob: Vec<u8> = Vec::new();
- let mut batch_cert_blob: Vec<u8> = Vec::new();
- for row in rows {
- let sub_type: SubComponentType = row.0;
- match sub_type {
- SubComponentType::KEY_BLOB => {
- km_blob = row.1;
- }
- SubComponentType::CERT_CHAIN => {
- cert_chain_blob = row.1;
- }
- SubComponentType::CERT => {
- batch_cert_blob = row.1;
- }
- _ => Err(KsError::sys()).context("Unknown or incorrect subcomponent type.")?,
+
+ self.delete_expired_attestation_keys().context(
+ "In retrieve_attestation_key_and_cert_chain: failed to prune expired attestation keys",
+ )?;
+ let tx = self.conn.unchecked_transaction().context(
+ "In retrieve_attestation_key_and_cert_chain: Failed to initialize transaction.",
+ )?;
+ let key_id: i64 = match self
+ .query_kid_for_attestation_key_and_cert_chain(&tx, domain, namespace, km_uuid)?
+ {
+ None => return Ok(None),
+ Some(kid) => kid,
+ };
+ tx.commit()
+ .context("In retrieve_attestation_key_and_cert_chain: Failed to commit keyid query")?;
+ let key_id_guard = KEY_ID_LOCK.get(key_id);
+ let tx = self.conn.unchecked_transaction().context(
+ "In retrieve_attestation_key_and_cert_chain: Failed to initialize transaction.",
+ )?;
+ let mut stmt = tx.prepare(
+ "SELECT subcomponent_type, blob
+ FROM persistent.blobentry
+ WHERE keyentryid = ?;",
+ )?;
+ let rows = stmt
+ .query_map(params![key_id_guard.id()], |row| Ok((row.get(0)?, row.get(1)?)))?
+ .collect::<rusqlite::Result<Vec<(SubComponentType, Vec<u8>)>>>()
+ .context("query failed.")?;
+ if rows.is_empty() {
+ return Ok(None);
+ } else if rows.len() != 3 {
+ return Err(KsError::sys()).context(format!(
+ concat!(
+ "Expected to get a single attestation",
+ "key, cert, and cert chain for a total of 3 entries, but instead got {}."
+ ),
+ rows.len()
+ ));
+ }
+ let mut km_blob: Vec<u8> = Vec::new();
+ let mut cert_chain_blob: Vec<u8> = Vec::new();
+ let mut batch_cert_blob: Vec<u8> = Vec::new();
+ for row in rows {
+ let sub_type: SubComponentType = row.0;
+ match sub_type {
+ SubComponentType::KEY_BLOB => {
+ km_blob = row.1;
}
+ SubComponentType::CERT_CHAIN => {
+ cert_chain_blob = row.1;
+ }
+ SubComponentType::CERT => {
+ batch_cert_blob = row.1;
+ }
+ _ => Err(KsError::sys()).context("Unknown or incorrect subcomponent type.")?,
}
- Ok(Some(CertificateChain {
+ }
+ Ok(Some((
+ key_id_guard,
+ CertificateChain {
private_key: ZVec::try_from(km_blob)?,
batch_cert: batch_cert_blob,
cert_chain: cert_chain_blob,
- }))
- .no_gc()
- })
- .context("In retrieve_attestation_key_and_cert_chain:")
+ },
+ )))
}
/// Updates the alias column of the given key id `newid` with the given alias,
@@ -2233,7 +2305,7 @@
key: &KeyDescriptor,
key_type: KeyType,
params: &[KeyParameter],
- blob_info: &(&[u8], &BlobMetaData),
+ blob_info: &BlobInfo,
cert_info: &CertificateInfo,
metadata: &KeyMetaData,
km_uuid: &Uuid,
@@ -2253,7 +2325,27 @@
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let key_id = Self::create_key_entry_internal(tx, &domain, namespace, key_type, km_uuid)
.context("Trying to create new key entry.")?;
- let (blob, blob_metadata) = *blob_info;
+ let BlobInfo { blob, metadata: blob_metadata, superseded_blob } = *blob_info;
+
+ // In some occasions the key blob is already upgraded during the import.
+ // In order to make sure it gets properly deleted it is inserted into the
+ // database here and then immediately replaced by the superseding blob.
+ // The garbage collector will then subject the blob to deleteKey of the
+ // KM back end to permanently invalidate the key.
+ let need_gc = if let Some((blob, blob_metadata)) = superseded_blob {
+ Self::set_blob_internal(
+ tx,
+ key_id.id(),
+ SubComponentType::KEY_BLOB,
+ Some(blob),
+ Some(blob_metadata),
+ )
+ .context("Trying to insert superseded key blob.")?;
+ true
+ } else {
+ false
+ };
+
Self::set_blob_internal(
tx,
key_id.id(),
@@ -2280,7 +2372,8 @@
.context("Trying to insert key parameters.")?;
metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
let need_gc = Self::rebind_alias(tx, &key_id, alias, &domain, namespace, key_type)
- .context("Trying to rebind alias.")?;
+ .context("Trying to rebind alias.")?
+ || need_gc;
Ok(key_id).do_gc(need_gc)
})
.context("In store_new_key.")
@@ -3207,7 +3300,7 @@
}
#[cfg(test)]
-mod tests {
+pub mod tests {
use super::*;
use crate::key_parameter::{
@@ -3231,13 +3324,14 @@
use std::collections::BTreeMap;
use std::fmt::Write;
use std::sync::atomic::{AtomicU8, Ordering};
- use std::sync::Arc;
+ use std::sync::{Arc, RwLock};
use std::thread;
use std::time::{Duration, SystemTime};
+ use crate::utils::AesGcm;
#[cfg(disabled)]
use std::time::Instant;
- fn new_test_db() -> Result<KeystoreDB> {
+ pub fn new_test_db() -> Result<KeystoreDB> {
let conn = KeystoreDB::make_connection("file::memory:")?;
let mut db = KeystoreDB { conn, gc: None, perboot: Arc::new(perboot::PerbootDB::new()) };
@@ -3251,7 +3345,7 @@
where
F: Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static,
{
- let super_key: Arc<SuperKeyManager> = Default::default();
+ let super_key: Arc<RwLock<SuperKeyManager>> = Default::default();
let gc_db = KeystoreDB::new(path, None).expect("Failed to open test gc db_connection.");
let gc = Gc::new_init_with(Default::default(), move || (Box::new(cb), gc_db, super_key));
@@ -3456,7 +3550,10 @@
#[test]
fn test_store_signed_attestation_certificate_chain() -> Result<()> {
let mut db = new_test_db()?;
- let expiration_date: i64 = 20;
+ let expiration_date: i64 =
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64
+ + EXPIRATION_BUFFER_MS
+ + 10000;
let namespace: i64 = 30;
let base_byte: u8 = 1;
let loaded_values =
@@ -3464,7 +3561,7 @@
let chain =
db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
assert!(chain.is_some());
- let cert_chain = chain.unwrap();
+ let (_, cert_chain) = chain.unwrap();
assert_eq!(cert_chain.private_key.to_vec(), loaded_values.priv_key);
assert_eq!(cert_chain.batch_cert, loaded_values.batch_cert);
assert_eq!(cert_chain.cert_chain, loaded_values.cert_chain);
@@ -3533,7 +3630,9 @@
TempDir::new("test_remove_expired_certs_").expect("Failed to create temp dir.");
let mut db = new_test_db_with_gc(temp_dir.path(), |_, _| Ok(()))?;
let expiration_date: i64 =
- SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64 + 10000;
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64
+ + EXPIRATION_BUFFER_MS
+ + 10000;
let namespace: i64 = 30;
let namespace_del1: i64 = 45;
let namespace_del2: i64 = 60;
@@ -3544,7 +3643,7 @@
0x01, /* base_byte */
)?;
load_attestation_key_pool(&mut db, 45, namespace_del1, 0x02)?;
- load_attestation_key_pool(&mut db, 60, namespace_del2, 0x03)?;
+ load_attestation_key_pool(&mut db, expiration_date - 10001, namespace_del2, 0x03)?;
let blob_entry_row_count: u32 = db
.conn
@@ -3559,7 +3658,7 @@
let mut cert_chain =
db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
assert!(cert_chain.is_some());
- let value = cert_chain.unwrap();
+ let (_, value) = cert_chain.unwrap();
assert_eq!(entry_values.batch_cert, value.batch_cert);
assert_eq!(entry_values.cert_chain, value.cert_chain);
assert_eq!(entry_values.priv_key, value.private_key.to_vec());
@@ -3569,13 +3668,13 @@
namespace_del1,
&KEYSTORE_UUID,
)?;
- assert!(!cert_chain.is_some());
+ assert!(cert_chain.is_none());
cert_chain = db.retrieve_attestation_key_and_cert_chain(
Domain::APP,
namespace_del2,
&KEYSTORE_UUID,
)?;
- assert!(!cert_chain.is_some());
+ assert!(cert_chain.is_none());
// Give the garbage collector half a second to catch up.
std::thread::sleep(Duration::from_millis(500));
@@ -3591,6 +3690,73 @@
Ok(())
}
+ fn compare_rem_prov_values(
+ expected: &RemoteProvValues,
+ actual: Option<(KeyIdGuard, CertificateChain)>,
+ ) {
+ assert!(actual.is_some());
+ let (_, value) = actual.unwrap();
+ assert_eq!(expected.batch_cert, value.batch_cert);
+ assert_eq!(expected.cert_chain, value.cert_chain);
+ assert_eq!(expected.priv_key, value.private_key.to_vec());
+ }
+
+ #[test]
+ fn test_dont_remove_valid_certs() -> Result<()> {
+ let temp_dir =
+ TempDir::new("test_remove_expired_certs_").expect("Failed to create temp dir.");
+ let mut db = new_test_db_with_gc(temp_dir.path(), |_, _| Ok(()))?;
+ let expiration_date: i64 =
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64
+ + EXPIRATION_BUFFER_MS
+ + 10000;
+ let namespace1: i64 = 30;
+ let namespace2: i64 = 45;
+ let namespace3: i64 = 60;
+ let entry_values1 = load_attestation_key_pool(
+ &mut db,
+ expiration_date,
+ namespace1,
+ 0x01, /* base_byte */
+ )?;
+ let entry_values2 =
+ load_attestation_key_pool(&mut db, expiration_date + 40000, namespace2, 0x02)?;
+ let entry_values3 =
+ load_attestation_key_pool(&mut db, expiration_date - 9000, namespace3, 0x03)?;
+
+ let blob_entry_row_count: u32 = db
+ .conn
+ .query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
+ .expect("Failed to get blob entry row count.");
+ // We expect 9 rows here because there are three blobs per attestation key, i.e.,
+ // one key, one certificate chain, and one certificate.
+ assert_eq!(blob_entry_row_count, 9);
+
+ let mut cert_chain =
+ db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace1, &KEYSTORE_UUID)?;
+ compare_rem_prov_values(&entry_values1, cert_chain);
+
+ cert_chain =
+ db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace2, &KEYSTORE_UUID)?;
+ compare_rem_prov_values(&entry_values2, cert_chain);
+
+ cert_chain =
+ db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace3, &KEYSTORE_UUID)?;
+ compare_rem_prov_values(&entry_values3, cert_chain);
+
+ // Give the garbage collector half a second to catch up.
+ std::thread::sleep(Duration::from_millis(500));
+
+ let blob_entry_row_count: u32 = db
+ .conn
+ .query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
+ .expect("Failed to get blob entry row count.");
+ // There shound be 9 blob entries left, because all three keys are valid with
+ // three blobs each.
+ assert_eq!(blob_entry_row_count, 9);
+
+ Ok(())
+ }
#[test]
fn test_delete_all_attestation_keys() -> Result<()> {
let mut db = new_test_db()?;
@@ -5557,8 +5723,7 @@
None,
)?;
- let decrypted_secret_bytes =
- loaded_super_key.aes_gcm_decrypt(&encrypted_secret, &iv, &tag)?;
+ let decrypted_secret_bytes = loaded_super_key.decrypt(&encrypted_secret, &iv, &tag)?;
assert_eq!(secret_bytes, &*decrypted_secret_bytes);
Ok(())
diff --git a/keystore2/src/enforcements.rs b/keystore2/src/enforcements.rs
index 2407525..cb6a266 100644
--- a/keystore2/src/enforcements.rs
+++ b/keystore2/src/enforcements.rs
@@ -450,7 +450,7 @@
KeyParameterValue::Algorithm(Algorithm::RSA)
| KeyParameterValue::Algorithm(Algorithm::EC) => {
return Err(Error::Km(Ec::UNSUPPORTED_PURPOSE)).context(
- "In authorize_create: public operations on asymmetric keys are not
+ "In authorize_create: public operations on asymmetric keys are not \
supported.",
);
}
@@ -566,8 +566,7 @@
// if both NO_AUTH_REQUIRED and USER_SECURE_ID tags are present, return error
if !user_secure_ids.is_empty() && no_auth_required {
return Err(Error::Km(Ec::INVALID_KEY_BLOB)).context(
- "In authorize_create: key has both NO_AUTH_REQUIRED
- and USER_SECURE_ID tags.",
+ "In authorize_create: key has both NO_AUTH_REQUIRED and USER_SECURE_ID tags.",
);
}
@@ -576,8 +575,8 @@
|| (user_auth_type.is_none() && !user_secure_ids.is_empty())
{
return Err(Error::Km(Ec::KEY_USER_NOT_AUTHENTICATED)).context(
- "In authorize_create: Auth required, but either auth type or secure ids
- are not present.",
+ "In authorize_create: Auth required, but either auth type or secure ids \
+ are not present.",
);
}
@@ -587,8 +586,7 @@
&& op_params.iter().any(|kp| kp.tag == Tag::NONCE)
{
return Err(Error::Km(Ec::CALLER_NONCE_PROHIBITED)).context(
- "In authorize_create, NONCE is present,
- although CALLER_NONCE is not present",
+ "In authorize_create, NONCE is present, although CALLER_NONCE is not present",
);
}
@@ -602,7 +600,7 @@
}
if let Some(level) = max_boot_level {
- if !SUPER_KEY.level_accessible(level) {
+ if !SUPER_KEY.read().unwrap().level_accessible(level) {
return Err(Error::Km(Ec::BOOT_LEVEL_EXCEEDED))
.context("In authorize_create: boot level is too late.");
}
diff --git a/keystore2/src/error.rs b/keystore2/src/error.rs
index f969cb6..f34c5da 100644
--- a/keystore2/src/error.rs
+++ b/keystore2/src/error.rs
@@ -37,6 +37,7 @@
};
use keystore2_selinux as selinux;
use std::cmp::PartialEq;
+use std::ffi::CString;
/// This is the main Keystore error type. It wraps the Keystore `ResponseCode` generated
/// from AIDL in the `Rc` variant and Keymint `ErrorCode` in the Km variant.
@@ -66,10 +67,15 @@
Error::Rc(ResponseCode::SYSTEM_ERROR)
}
- /// Short hand for `Error::Rc(ResponseCode::PERMISSION_DENIED`
+ /// Short hand for `Error::Rc(ResponseCode::PERMISSION_DENIED)`
pub fn perm() -> Self {
Error::Rc(ResponseCode::PERMISSION_DENIED)
}
+
+ /// Short hand for `Error::Rc(ResponseCode::OUT_OF_KEYS)`
+ pub fn out_of_keys() -> Self {
+ Error::Rc(ResponseCode::OUT_OF_KEYS)
+ }
}
/// Helper function to map the binder status we get from calls into KeyMint
@@ -184,6 +190,20 @@
)
}
+/// This function turns an anyhow error into an optional CString.
+/// This is especially useful to add a message string to a service specific error.
+/// If the formatted string was not convertible because it contained a nul byte,
+/// None is returned and a warning is logged.
+pub fn anyhow_error_to_cstring(e: &anyhow::Error) -> Option<CString> {
+ match CString::new(format!("{:?}", e)) {
+ Ok(msg) => Some(msg),
+ Err(_) => {
+ log::warn!("Cannot convert error message to CStr. It contained a nul byte.");
+ None
+ }
+ }
+}
+
/// This function behaves similar to map_or_log_error, but it does not log the errors, instead
/// it calls map_err on the error before mapping it to a binder result allowing callers to
/// log or transform the error before mapping it.
@@ -200,7 +220,10 @@
|e| {
let e = map_err(e);
let rc = get_error_code(&e);
- Err(BinderStatus::new_service_specific_error(rc, None))
+ Err(BinderStatus::new_service_specific_error(
+ rc,
+ anyhow_error_to_cstring(&e).as_deref(),
+ ))
},
handle_ok,
)
diff --git a/keystore2/src/gc.rs b/keystore2/src/gc.rs
index 25f08c8..341aa0a 100644
--- a/keystore2/src/gc.rs
+++ b/keystore2/src/gc.rs
@@ -27,7 +27,7 @@
use async_task::AsyncTask;
use std::sync::{
atomic::{AtomicU8, Ordering},
- Arc,
+ Arc, RwLock,
};
pub struct Gc {
@@ -47,7 +47,7 @@
F: FnOnce() -> (
Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
KeystoreDB,
- Arc<SuperKeyManager>,
+ Arc<RwLock<SuperKeyManager>>,
) + Send
+ 'static,
{
@@ -87,7 +87,7 @@
invalidate_key: Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
db: KeystoreDB,
async_task: std::sync::Weak<AsyncTask>,
- super_key: Arc<SuperKeyManager>,
+ super_key: Arc<RwLock<SuperKeyManager>>,
notified: Arc<AtomicU8>,
}
@@ -121,6 +121,8 @@
if let Some(uuid) = blob_metadata.km_uuid() {
let blob = self
.super_key
+ .read()
+ .unwrap()
.unwrap_key_if_required(&blob_metadata, &blob)
.context("In process_one_key: Trying to unwrap to-be-deleted blob.")?;
(self.invalidate_key)(uuid, &*blob)
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 7028aae..14b3601 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -18,7 +18,7 @@
use crate::gc::Gc;
use crate::legacy_blob::LegacyBlobLoader;
-use crate::legacy_migrator::LegacyMigrator;
+use crate::legacy_importer::LegacyImporter;
use crate::super_key::SuperKeyManager;
use crate::utils::watchdog as wd;
use crate::{async_task::AsyncTask, database::MonotonicRawTime};
@@ -27,6 +27,7 @@
database::Uuid,
error::{map_binder_status, map_binder_status_code, Error, ErrorCode},
};
+use crate::km_compat::{KeyMintV1, BacklevelKeyMintWrapper};
use crate::{enforcements::Enforcements, error::map_km_error};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
IKeyMintDevice::IKeyMintDevice, IRemotelyProvisionedComponent::IRemotelyProvisionedComponent,
@@ -156,7 +157,7 @@
pub static ref DB_PATH: RwLock<PathBuf> = RwLock::new(
Path::new("/data/misc/keystore").to_path_buf());
/// Runtime database of unwrapped super keys.
- pub static ref SUPER_KEY: Arc<SuperKeyManager> = Default::default();
+ pub static ref SUPER_KEY: Arc<RwLock<SuperKeyManager>> = Default::default();
/// Map of KeyMint devices.
static ref KEY_MINT_DEVICES: Mutex<DevicesMap<dyn IKeyMintDevice>> = Default::default();
/// Timestamp service.
@@ -175,8 +176,8 @@
pub static ref LEGACY_BLOB_LOADER: Arc<LegacyBlobLoader> = Arc::new(LegacyBlobLoader::new(
&DB_PATH.read().expect("Could not get the database path for legacy blob loader.")));
/// Legacy migrator. Atomically migrates legacy blobs to the database.
- pub static ref LEGACY_MIGRATOR: Arc<LegacyMigrator> =
- Arc::new(LegacyMigrator::new(Arc::new(Default::default())));
+ pub static ref LEGACY_IMPORTER: Arc<LegacyImporter> =
+ Arc::new(LegacyImporter::new(Arc::new(Default::default())));
/// Background thread which handles logging via statsd and logd
pub static ref LOGS_HANDLER: Arc<AsyncTask> = Default::default();
@@ -197,14 +198,15 @@
static KEYMINT_SERVICE_NAME: &str = "android.hardware.security.keymint.IKeyMintDevice";
-/// Make a new connection to a KeyMint device of the given security level.
-/// If no native KeyMint device can be found this function also brings
-/// up the compatibility service and attempts to connect to the legacy wrapper.
-fn connect_keymint(
+/// Determine the service name for a KeyMint device of the given security level
+/// which implements at least the specified version of the `IKeyMintDevice`
+/// interface.
+fn keymint_service_name_by_version(
security_level: &SecurityLevel,
-) -> Result<(Strong<dyn IKeyMintDevice>, KeyMintHardwareInfo)> {
+ version: i32,
+) -> Result<Option<(i32, String)>> {
let keymint_instances =
- get_aidl_instances("android.hardware.security.keymint", 1, "IKeyMintDevice");
+ get_aidl_instances("android.hardware.security.keymint", version as usize, "IKeyMintDevice");
let service_name = match *security_level {
SecurityLevel::TRUSTED_ENVIRONMENT => {
@@ -222,12 +224,36 @@
}
}
_ => {
- return Err(Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE))
- .context("In connect_keymint.")
+ return Err(Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)).context(format!(
+ "In keymint_service_name_by_version: Trying to find keymint V{} for security level: {:?}",
+ version, security_level
+ ));
}
};
- let (keymint, hal_version) = if let Some(service_name) = service_name {
+ Ok(service_name.map(|service_name| (version, service_name)))
+}
+
+/// Make a new connection to a KeyMint device of the given security level.
+/// If no native KeyMint device can be found this function also brings
+/// up the compatibility service and attempts to connect to the legacy wrapper.
+fn connect_keymint(
+ security_level: &SecurityLevel,
+) -> Result<(Strong<dyn IKeyMintDevice>, KeyMintHardwareInfo)> {
+ // Count down from the current interface version back to one in order to
+ // also find out the interface version -- an implementation of V2 will show
+ // up in the list of V1-capable devices, but not vice-versa.
+ let service_name = keymint_service_name_by_version(security_level, 2)
+ .and_then(|sl| {
+ if sl.is_none() {
+ keymint_service_name_by_version(security_level, 1)
+ } else {
+ Ok(sl)
+ }
+ })
+ .context("In connect_keymint.")?;
+
+ let (keymint, hal_version) = if let Some((version, service_name)) = service_name {
let km: Strong<dyn IKeyMintDevice> =
map_binder_status_code(binder::get_interface(&service_name))
.context("In connect_keymint: Trying to connect to genuine KeyMint service.")?;
@@ -235,11 +261,7 @@
// - V1 is 100
// - V2 is 200
// etc.
- let hal_version = km
- .getInterfaceVersion()
- .map(|v| v * 100i32)
- .context("In connect_keymint: Trying to determine KeyMint AIDL version")?;
- (km, Some(hal_version))
+ (km, Some(version * 100))
} else {
// This is a no-op if it was called before.
keystore2_km_compat::add_keymint_device_service();
@@ -260,6 +282,48 @@
)
};
+ // If the KeyMint device is back-level, use a wrapper that intercepts and
+ // emulates things that are not supported by the hardware.
+ let keymint = match hal_version {
+ Some(200) => {
+ // Current KeyMint version: use as-is.
+ log::info!(
+ "KeyMint device is current version ({:?}) for security level: {:?}",
+ hal_version,
+ security_level
+ );
+ keymint
+ }
+ Some(100) => {
+ // KeyMint v1: perform software emulation.
+ log::info!(
+ "Add emulation wrapper around {:?} device for security level: {:?}",
+ hal_version,
+ security_level
+ );
+ BacklevelKeyMintWrapper::wrap(KeyMintV1::new(*security_level), keymint)
+ .context("In connect_keymint: Trying to create V1 compatibility wrapper.")?
+ }
+ None => {
+ // Compatibility wrapper around a KeyMaster device: this roughly
+ // behaves like KeyMint V1 (e.g. it includes AGREE_KEY support,
+ // albeit in software.)
+ log::info!(
+ "Add emulation wrapper around Keymaster device for security level: {:?}",
+ security_level
+ );
+ BacklevelKeyMintWrapper::wrap(KeyMintV1::new(*security_level), keymint).context(
+ "In connect_keymint: Trying to create km_compat V1 compatibility wrapper .",
+ )?
+ }
+ _ => {
+ return Err(Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)).context(format!(
+ "In connect_keymint: unexpected hal_version {:?} for security level: {:?}",
+ hal_version, security_level
+ ))
+ }
+ };
+
let wp = wd::watch_millis("In connect_keymint: calling getHardwareInfo()", 500);
let mut hw_info = map_km_error(keymint.getHardwareInfo())
.context("In connect_keymint: Failed to get hardware info.")?;
diff --git a/keystore2/src/key_parameter.rs b/keystore2/src/key_parameter.rs
index 771d609..9854974 100644
--- a/keystore2/src/key_parameter.rs
+++ b/keystore2/src/key_parameter.rs
@@ -107,6 +107,9 @@
use anyhow::{Context, Result};
use rusqlite::types::{Null, ToSql, ToSqlOutput};
use rusqlite::Result as SqlResult;
+use serde::de::Deserializer;
+use serde::ser::Serializer;
+use serde::{Deserialize, Serialize};
/// This trait is used to associate a primitive to any type that can be stored inside a
/// KeyParameterValue, especially the AIDL enum types, e.g., keymint::{Algorithm, Digest, ...}.
@@ -121,7 +124,7 @@
/// there is no wrapped type):
/// `KeyParameterValue::$vname(<$vtype>::from_primitive(row.get(0)))`
trait AssociatePrimitive {
- type Primitive;
+ type Primitive: Into<Primitive> + TryFrom<Primitive>;
fn from_primitive(v: Self::Primitive) -> Self;
fn to_primitive(&self) -> Self::Primitive;
@@ -177,6 +180,7 @@
/// This enum allows passing a primitive value to `KeyParameterValue::new_from_tag_primitive_pair`
/// Usually, it is not necessary to use this type directly because the function uses
/// `Into<Primitive>` as a trait bound.
+#[derive(Deserialize, Serialize)]
pub enum Primitive {
/// Wraps an i64.
I64(i64),
@@ -213,37 +217,57 @@
UnknownTag,
}
-impl TryInto<i64> for Primitive {
+impl TryFrom<Primitive> for i64 {
type Error = PrimitiveError;
- fn try_into(self) -> Result<i64, Self::Error> {
- match self {
- Self::I64(v) => Ok(v),
+ fn try_from(p: Primitive) -> Result<i64, Self::Error> {
+ match p {
+ Primitive::I64(v) => Ok(v),
_ => Err(Self::Error::TypeMismatch),
}
}
}
-impl TryInto<i32> for Primitive {
+impl TryFrom<Primitive> for i32 {
type Error = PrimitiveError;
- fn try_into(self) -> Result<i32, Self::Error> {
- match self {
- Self::I32(v) => Ok(v),
+ fn try_from(p: Primitive) -> Result<i32, Self::Error> {
+ match p {
+ Primitive::I32(v) => Ok(v),
_ => Err(Self::Error::TypeMismatch),
}
}
}
-impl TryInto<Vec<u8>> for Primitive {
+impl TryFrom<Primitive> for Vec<u8> {
type Error = PrimitiveError;
- fn try_into(self) -> Result<Vec<u8>, Self::Error> {
- match self {
- Self::Vec(v) => Ok(v),
+ fn try_from(p: Primitive) -> Result<Vec<u8>, Self::Error> {
+ match p {
+ Primitive::Vec(v) => Ok(v),
_ => Err(Self::Error::TypeMismatch),
}
}
}
+fn serialize_primitive<S, P>(v: &P, serializer: S) -> Result<S::Ok, S::Error>
+where
+ S: Serializer,
+ P: AssociatePrimitive,
+{
+ let primitive: Primitive = v.to_primitive().into();
+ primitive.serialize(serializer)
+}
+
+fn deserialize_primitive<'de, D, T>(deserializer: D) -> Result<T, D::Error>
+where
+ D: Deserializer<'de>,
+ T: AssociatePrimitive,
+{
+ let primitive: Primitive = serde::de::Deserialize::deserialize(deserializer)?;
+ Ok(T::from_primitive(
+ primitive.try_into().map_err(|_| serde::de::Error::custom("Type Mismatch"))?,
+ ))
+}
+
/// Expands the list of KeyParameterValue variants as follows:
///
/// Input:
@@ -763,6 +787,14 @@
value: KmKeyParameterValue::$field_name(Default::default())}
),*]
}
+
+ #[cfg(test)]
+ fn make_key_parameter_defaults_vector() -> Vec<KeyParameter> {
+ vec![$(KeyParameter{
+ value: KeyParameterValue::$vname$((<$vtype as Default>::default()))?,
+ security_level: SecurityLevel(100),
+ }),*]
+ }
}
implement_try_from_to_km_parameter!(
@@ -777,27 +809,37 @@
implement_key_parameter_value! {
/// KeyParameterValue holds a value corresponding to one of the Tags defined in
/// the AIDL spec at hardware/interfaces/security/keymint
-#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Deserialize, Serialize)]
pub enum KeyParameterValue {
/// Associated with Tag:INVALID
#[key_param(tag = INVALID, field = Invalid)]
Invalid,
/// Set of purposes for which the key may be used
+ #[serde(deserialize_with = "deserialize_primitive")]
+ #[serde(serialize_with = "serialize_primitive")]
#[key_param(tag = PURPOSE, field = KeyPurpose)]
KeyPurpose(KeyPurpose),
/// Cryptographic algorithm with which the key is used
+ #[serde(deserialize_with = "deserialize_primitive")]
+ #[serde(serialize_with = "serialize_primitive")]
#[key_param(tag = ALGORITHM, field = Algorithm)]
Algorithm(Algorithm),
/// Size of the key , in bits
#[key_param(tag = KEY_SIZE, field = Integer)]
KeySize(i32),
/// Block cipher mode(s) with which the key may be used
+ #[serde(deserialize_with = "deserialize_primitive")]
+ #[serde(serialize_with = "serialize_primitive")]
#[key_param(tag = BLOCK_MODE, field = BlockMode)]
BlockMode(BlockMode),
/// Digest algorithms that may be used with the key to perform signing and verification
+ #[serde(deserialize_with = "deserialize_primitive")]
+ #[serde(serialize_with = "serialize_primitive")]
#[key_param(tag = DIGEST, field = Digest)]
Digest(Digest),
/// Padding modes that may be used with the key. Relevant to RSA, AES and 3DES keys.
+ #[serde(deserialize_with = "deserialize_primitive")]
+ #[serde(serialize_with = "serialize_primitive")]
#[key_param(tag = PADDING, field = PaddingMode)]
PaddingMode(PaddingMode),
/// Can the caller provide a nonce for nonce-requiring operations
@@ -807,6 +849,8 @@
#[key_param(tag = MIN_MAC_LENGTH, field = Integer)]
MinMacLength(i32),
/// The elliptic curve
+ #[serde(deserialize_with = "deserialize_primitive")]
+ #[serde(serialize_with = "serialize_primitive")]
#[key_param(tag = EC_CURVE, field = EcCurve)]
EcCurve(EcCurve),
/// Value of the public exponent for an RSA key pair
@@ -856,6 +900,8 @@
#[key_param(tag = NO_AUTH_REQUIRED, field = BoolValue)]
NoAuthRequired,
/// The types of user authenticators that may be used to authorize this key
+ #[serde(deserialize_with = "deserialize_primitive")]
+ #[serde(serialize_with = "serialize_primitive")]
#[key_param(tag = USER_AUTH_TYPE, field = HardwareAuthenticatorType)]
HardwareAuthenticatorType(HardwareAuthenticatorType),
/// The time in seconds for which the key is authorized for use, after user authentication
@@ -886,6 +932,8 @@
#[key_param(tag = CREATION_DATETIME, field = DateTime)]
CreationDateTime(i64),
/// Specifies where the key was created, if known
+ #[serde(deserialize_with = "deserialize_primitive")]
+ #[serde(serialize_with = "serialize_primitive")]
#[key_param(tag = ORIGIN, field = Origin)]
KeyOrigin(KeyOrigin),
/// The key used by verified boot to validate the operating system booted
@@ -981,9 +1029,11 @@
}
/// KeyParameter wraps the KeyParameterValue and the security level at which it is enforced.
-#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)]
pub struct KeyParameter {
value: KeyParameterValue,
+ #[serde(deserialize_with = "deserialize_primitive")]
+ #[serde(serialize_with = "serialize_primitive")]
security_level: SecurityLevel,
}
@@ -1106,6 +1156,18 @@
fn key_parameter_value_field_matches_tag_type() {
check_field_matches_tag_type(&KeyParameterValue::make_field_matches_tag_type_test_vector());
}
+
+ #[test]
+ fn key_parameter_serialization_test() {
+ let params = KeyParameterValue::make_key_parameter_defaults_vector();
+ let mut out_buffer: Vec<u8> = Default::default();
+ serde_cbor::to_writer(&mut out_buffer, ¶ms)
+ .expect("Failed to serialize key parameters.");
+ let deserialized_params: Vec<KeyParameter> =
+ serde_cbor::from_reader(&mut out_buffer.as_slice())
+ .expect("Failed to deserialize key parameters.");
+ assert_eq!(params, deserialized_params);
+ }
}
#[cfg(test)]
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index abab4b6..55f5d15 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -19,7 +19,9 @@
use keystore2::maintenance::Maintenance;
use keystore2::metrics::Metrics;
use keystore2::metrics_store;
-use keystore2::remote_provisioning::RemoteProvisioningService;
+use keystore2::remote_provisioning::{
+ RemoteProvisioningService, RemotelyProvisionedKeyPoolService,
+};
use keystore2::service::KeystoreService;
use keystore2::{apc::ApcManager, shared_secret_negotiation};
use keystore2::{authorization::AuthorizationManager, id_rotation::IdRotationState};
@@ -33,6 +35,8 @@
static AUTHORIZATION_SERVICE_NAME: &str = "android.security.authorization";
static METRICS_SERVICE_NAME: &str = "android.security.metrics";
static REMOTE_PROVISIONING_SERVICE_NAME: &str = "android.security.remoteprovisioning";
+static REMOTELY_PROVISIONED_KEY_POOL_SERVICE_NAME: &str =
+ "android.security.remoteprovisioning.IRemotelyProvisionedKeyPool";
static USER_MANAGER_SERVICE_NAME: &str = "android.security.maintenance";
static LEGACY_KEYSTORE_SERVICE_NAME: &str = "android.security.legacykeystore";
@@ -40,7 +44,10 @@
fn main() {
// Initialize android logging.
android_logger::init_once(
- android_logger::Config::default().with_tag("keystore2").with_min_level(log::Level::Debug),
+ android_logger::Config::default()
+ .with_tag("keystore2")
+ .with_min_level(log::Level::Debug)
+ .with_log_id(android_logger::LogId::System),
);
// Redirect panic messages to logcat.
panic::set_hook(Box::new(|panic_info| {
@@ -145,6 +152,25 @@
});
}
+ // Even if the IRemotelyProvisionedComponent HAL is implemented, it doesn't mean that the keys
+ // may be fetched via the key pool. The HAL must be a new version that exports a unique id. If
+ // none of the HALs support this, then the key pool service is not published.
+ match RemotelyProvisionedKeyPoolService::new_native_binder() {
+ Ok(key_pool_service) => {
+ binder::add_service(
+ REMOTELY_PROVISIONED_KEY_POOL_SERVICE_NAME,
+ key_pool_service.as_binder(),
+ )
+ .unwrap_or_else(|e| {
+ panic!(
+ "Failed to register service {} because of {:?}.",
+ REMOTELY_PROVISIONED_KEY_POOL_SERVICE_NAME, e
+ );
+ });
+ }
+ Err(e) => log::info!("Not publishing IRemotelyProvisionedKeyPool service: {:?}", e),
+ }
+
binder::add_service(LEGACY_KEYSTORE_SERVICE_NAME, legacykeystore.as_binder()).unwrap_or_else(
|e| {
panic!(
diff --git a/keystore2/src/km_compat.rs b/keystore2/src/km_compat.rs
new file mode 100644
index 0000000..788beef
--- /dev/null
+++ b/keystore2/src/km_compat.rs
@@ -0,0 +1,588 @@
+// Copyright 2020, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Provide a wrapper around a KeyMint device that allows up-level features to
+//! be emulated on back-level devices.
+
+use crate::error::{map_binder_status, map_binder_status_code, map_or_log_err, Error, ErrorCode};
+use android_hardware_security_keymint::binder::{BinderFeatures, StatusCode, Strong};
+use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::TimeStampToken::TimeStampToken;
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ AttestationKey::AttestationKey, BeginResult::BeginResult, EcCurve::EcCurve,
+ HardwareAuthToken::HardwareAuthToken, IKeyMintDevice::BnKeyMintDevice,
+ IKeyMintDevice::IKeyMintDevice, KeyCharacteristics::KeyCharacteristics,
+ KeyCreationResult::KeyCreationResult, KeyFormat::KeyFormat,
+ KeyMintHardwareInfo::KeyMintHardwareInfo, KeyParameter::KeyParameter,
+ KeyParameterValue::KeyParameterValue, KeyPurpose::KeyPurpose, SecurityLevel::SecurityLevel,
+ Tag::Tag,
+};
+use android_security_compat::aidl::android::security::compat::IKeystoreCompatService::IKeystoreCompatService;
+use anyhow::Context;
+use keystore2_crypto::{hmac_sha256, HMAC_SHA256_LEN};
+
+/// Key data associated with key generation/import.
+#[derive(Debug, PartialEq, Eq)]
+pub enum KeyImportData<'a> {
+ None,
+ Pkcs8(&'a [u8]),
+ Raw(&'a [u8]),
+}
+
+impl<'a> KeyImportData<'a> {
+ /// Translate import parameters into a `KeyImportData` instance.
+ fn new(key_format: KeyFormat, key_data: &'a [u8]) -> binder::Result<Self> {
+ match key_format {
+ KeyFormat::PKCS8 => Ok(KeyImportData::Pkcs8(key_data)),
+ KeyFormat::RAW => Ok(KeyImportData::Raw(key_data)),
+ _ => Err(binder::Status::new_service_specific_error(
+ ErrorCode::UNSUPPORTED_KEY_FORMAT.0,
+ None,
+ )),
+ }
+ }
+}
+
+/// A key blob that may be software-emulated or may be directly produced by an
+/// underlying device. In either variant the inner data is the keyblob itself,
+/// as seen by the relevant device.
+#[derive(Debug, PartialEq, Eq)]
+pub enum KeyBlob<'a> {
+ Raw(&'a [u8]),
+ Wrapped(&'a [u8]),
+}
+
+/// Trait for detecting that software emulation of a current-version KeyMint
+/// feature is required for a back-level KeyMint implementation.
+pub trait EmulationDetector: Send + Sync {
+ /// Indicate whether software emulation is required for key
+ /// generation/import using the provided parameters.
+ fn emulation_required(&self, params: &[KeyParameter], import_data: &KeyImportData) -> bool;
+}
+
+const KEYBLOB_PREFIX: &[u8] = b"SoftKeyMintForV1Blob";
+const KEYBLOB_HMAC_KEY: &[u8] = b"SoftKeyMintForV1HMACKey";
+
+/// Wrap the provided keyblob:
+/// - prefix it with an identifier specific to this wrapper
+/// - suffix it with an HMAC tag, using the [`KEYBLOB_HMAC_KEY`] and `keyblob`.
+fn wrap_keyblob(keyblob: &[u8]) -> anyhow::Result<Vec<u8>> {
+ let mut result = Vec::with_capacity(KEYBLOB_PREFIX.len() + keyblob.len() + HMAC_SHA256_LEN);
+ result.extend_from_slice(KEYBLOB_PREFIX);
+ result.extend_from_slice(keyblob);
+ let tag = hmac_sha256(KEYBLOB_HMAC_KEY, keyblob)
+ .context("In wrap_keyblob, failed to calculate HMAC-SHA256")?;
+ result.extend_from_slice(&tag);
+ Ok(result)
+}
+
+/// Return an unwrapped version of the provided `keyblob`, which may or may
+/// not be associated with the software emulation.
+fn unwrap_keyblob(keyblob: &[u8]) -> KeyBlob {
+ if !keyblob.starts_with(KEYBLOB_PREFIX) {
+ return KeyBlob::Raw(keyblob);
+ }
+ let without_prefix = &keyblob[KEYBLOB_PREFIX.len()..];
+ if without_prefix.len() < HMAC_SHA256_LEN {
+ return KeyBlob::Raw(keyblob);
+ }
+ let (inner_keyblob, want_tag) = without_prefix.split_at(without_prefix.len() - HMAC_SHA256_LEN);
+ let got_tag = match hmac_sha256(KEYBLOB_HMAC_KEY, inner_keyblob) {
+ Ok(tag) => tag,
+ Err(e) => {
+ log::error!("Error calculating HMAC-SHA256 for keyblob unwrap: {:?}", e);
+ return KeyBlob::Raw(keyblob);
+ }
+ };
+ // Comparison does not need to be constant-time here.
+ if want_tag == got_tag {
+ KeyBlob::Wrapped(inner_keyblob)
+ } else {
+ KeyBlob::Raw(keyblob)
+ }
+}
+
+/// Wrapper around a real device that implements a back-level version of
+/// `IKeyMintDevice`
+pub struct BacklevelKeyMintWrapper<T: EmulationDetector> {
+ /// The `real` device implements some earlier version of `IKeyMintDevice`
+ real: Strong<dyn IKeyMintDevice>,
+ /// The `soft`ware device implements the current version of `IKeyMintDevice`
+ soft: Strong<dyn IKeyMintDevice>,
+ /// Detector for operations that are not supported by the earlier version of
+ /// `IKeyMintDevice`. Or possibly a large flightless bird, who can tell.
+ emu: T,
+}
+
+impl<T> BacklevelKeyMintWrapper<T>
+where
+ T: EmulationDetector + 'static,
+{
+ /// Create a wrapper around the provided back-level KeyMint device, so that
+ /// software emulation can be performed for any current-version features not
+ /// provided by the real device.
+ pub fn wrap(
+ emu: T,
+ real: Strong<dyn IKeyMintDevice>,
+ ) -> anyhow::Result<Strong<dyn IKeyMintDevice>> {
+ // This is a no-op if it was called before.
+ keystore2_km_compat::add_keymint_device_service();
+
+ let keystore_compat_service: Strong<dyn IKeystoreCompatService> = map_binder_status_code(
+ binder::get_interface("android.security.compat"),
+ )
+ .context("In BacklevelKeyMintWrapper::wrap: Trying to connect to compat service.")?;
+ let soft =
+ map_binder_status(keystore_compat_service.getKeyMintDevice(SecurityLevel::SOFTWARE))
+ .map_err(|e| match e {
+ Error::BinderTransaction(StatusCode::NAME_NOT_FOUND) => {
+ Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)
+ }
+ e => e,
+ })
+ .context("In BacklevelKeyMintWrapper::wrap: Trying to get software device.")?;
+
+ Ok(BnKeyMintDevice::new_binder(
+ Self { real, soft, emu },
+ BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
+ ))
+ }
+}
+
+impl<T> binder::Interface for BacklevelKeyMintWrapper<T> where T: EmulationDetector {}
+
+impl<T> IKeyMintDevice for BacklevelKeyMintWrapper<T>
+where
+ T: EmulationDetector + 'static,
+{
+ // For methods that don't involve keyblobs, forward to either the real
+ // device, or to both real & emulated devices.
+ fn getHardwareInfo(&self) -> binder::Result<KeyMintHardwareInfo> {
+ self.real.getHardwareInfo()
+ }
+ fn addRngEntropy(&self, data: &[u8]) -> binder::Result<()> {
+ self.real.addRngEntropy(data)
+ }
+ fn deleteAllKeys(&self) -> binder::Result<()> {
+ self.real.deleteAllKeys()
+ }
+ fn destroyAttestationIds(&self) -> binder::Result<()> {
+ self.real.destroyAttestationIds()
+ }
+ fn deviceLocked(
+ &self,
+ password_only: bool,
+ timestamp_token: Option<&TimeStampToken>,
+ ) -> binder::Result<()> {
+ // Propagate to both real and software devices, but only pay attention
+ // to the result from the real device.
+ let _ = self.soft.deviceLocked(password_only, timestamp_token);
+ self.real.deviceLocked(password_only, timestamp_token)
+ }
+ fn earlyBootEnded(&self) -> binder::Result<()> {
+ // Propagate to both real and software devices, but only pay attention
+ // to the result from the real device.
+ let _ = self.soft.earlyBootEnded();
+ self.real.earlyBootEnded()
+ }
+
+ // For methods that emit keyblobs, check whether the underlying real device
+ // supports the relevant parameters, and forward to the appropriate device.
+ // If the emulated device is used, ensure that the created keyblob gets
+ // prefixed so we can recognize it in future.
+ fn generateKey(
+ &self,
+ key_params: &[KeyParameter],
+ attestation_key: Option<&AttestationKey>,
+ ) -> binder::Result<KeyCreationResult> {
+ if self.emu.emulation_required(key_params, &KeyImportData::None) {
+ let mut result = self.soft.generateKey(key_params, attestation_key)?;
+ result.keyBlob = map_or_log_err(wrap_keyblob(&result.keyBlob), Ok)?;
+ Ok(result)
+ } else {
+ self.real.generateKey(key_params, attestation_key)
+ }
+ }
+ fn importKey(
+ &self,
+ key_params: &[KeyParameter],
+ key_format: KeyFormat,
+ key_data: &[u8],
+ attestation_key: Option<&AttestationKey>,
+ ) -> binder::Result<KeyCreationResult> {
+ if self.emu.emulation_required(key_params, &KeyImportData::new(key_format, key_data)?) {
+ let mut result =
+ self.soft.importKey(key_params, key_format, key_data, attestation_key)?;
+ result.keyBlob = map_or_log_err(wrap_keyblob(&result.keyBlob), Ok)?;
+ Ok(result)
+ } else {
+ self.real.importKey(key_params, key_format, key_data, attestation_key)
+ }
+ }
+ fn importWrappedKey(
+ &self,
+ wrapped_key_data: &[u8],
+ wrapping_key_blob: &[u8],
+ masking_key: &[u8],
+ unwrapping_params: &[KeyParameter],
+ password_sid: i64,
+ biometric_sid: i64,
+ ) -> binder::Result<KeyCreationResult> {
+ // A wrapped key cannot be software-emulated, as the wrapping key is
+ // likely hardware-bound.
+ self.real.importWrappedKey(
+ wrapped_key_data,
+ wrapping_key_blob,
+ masking_key,
+ unwrapping_params,
+ password_sid,
+ biometric_sid,
+ )
+ }
+
+ // For methods that use keyblobs, determine which device to forward the
+ // operation to based on whether the keyblob is appropriately prefixed.
+ fn upgradeKey(
+ &self,
+ keyblob_to_upgrade: &[u8],
+ upgrade_params: &[KeyParameter],
+ ) -> binder::Result<Vec<u8>> {
+ match unwrap_keyblob(keyblob_to_upgrade) {
+ KeyBlob::Raw(keyblob) => self.real.upgradeKey(keyblob, upgrade_params),
+ KeyBlob::Wrapped(keyblob) => {
+ // Re-wrap the upgraded keyblob.
+ let upgraded_keyblob = self.soft.upgradeKey(keyblob, upgrade_params)?;
+ map_or_log_err(wrap_keyblob(&upgraded_keyblob), Ok)
+ }
+ }
+ }
+ fn deleteKey(&self, keyblob: &[u8]) -> binder::Result<()> {
+ match unwrap_keyblob(keyblob) {
+ KeyBlob::Raw(keyblob) => self.real.deleteKey(keyblob),
+ KeyBlob::Wrapped(keyblob) => {
+ // Forward to the software implementation for completeness, but
+ // this should always be a no-op.
+ self.soft.deleteKey(keyblob)
+ }
+ }
+ }
+ fn begin(
+ &self,
+ purpose: KeyPurpose,
+ keyblob: &[u8],
+ params: &[KeyParameter],
+ auth_token: Option<&HardwareAuthToken>,
+ ) -> binder::Result<BeginResult> {
+ match unwrap_keyblob(keyblob) {
+ KeyBlob::Raw(keyblob) => self.real.begin(purpose, keyblob, params, auth_token),
+ KeyBlob::Wrapped(keyblob) => self.soft.begin(purpose, keyblob, params, auth_token),
+ }
+ }
+ fn getKeyCharacteristics(
+ &self,
+ keyblob: &[u8],
+ app_id: &[u8],
+ app_data: &[u8],
+ ) -> binder::Result<Vec<KeyCharacteristics>> {
+ match unwrap_keyblob(keyblob) {
+ KeyBlob::Raw(keyblob) => self.real.getKeyCharacteristics(keyblob, app_id, app_data),
+ KeyBlob::Wrapped(keyblob) => self.soft.getKeyCharacteristics(keyblob, app_id, app_data),
+ }
+ }
+ fn getRootOfTrustChallenge(&self) -> binder::Result<[u8; 16]> {
+ self.real.getRootOfTrustChallenge()
+ }
+ fn getRootOfTrust(&self, challenge: &[u8; 16]) -> binder::Result<Vec<u8>> {
+ self.real.getRootOfTrust(challenge)
+ }
+ fn sendRootOfTrust(&self, root_of_trust: &[u8]) -> binder::Result<()> {
+ self.real.sendRootOfTrust(root_of_trust)
+ }
+ fn convertStorageKeyToEphemeral(&self, storage_keyblob: &[u8]) -> binder::Result<Vec<u8>> {
+ // Storage keys should never be associated with a software emulated device.
+ self.real.convertStorageKeyToEphemeral(storage_keyblob)
+ }
+}
+
+/// Detector for current features that are not implemented by KeyMint V1.
+#[derive(Debug)]
+pub struct KeyMintV1 {
+ sec_level: SecurityLevel,
+}
+
+impl KeyMintV1 {
+ pub fn new(sec_level: SecurityLevel) -> Self {
+ Self { sec_level }
+ }
+}
+
+impl EmulationDetector for KeyMintV1 {
+ fn emulation_required(&self, params: &[KeyParameter], _import_data: &KeyImportData) -> bool {
+ // No current difference from KeyMint v1 for STRONGBOX (it doesn't
+ // support curve 25519).
+ if self.sec_level == SecurityLevel::STRONGBOX {
+ return false;
+ }
+
+ // KeyMint V1 does not support the use of curve 25519, so hunt for that
+ // in the parameters.
+ if params.iter().any(|p| {
+ p.tag == Tag::EC_CURVE && p.value == KeyParameterValue::EcCurve(EcCurve::CURVE_25519)
+ }) {
+ return true;
+ }
+ // In theory, if the `import_data` is `KeyImportData::Pkcs8` we could
+ // check the imported keymaterial for the Ed25519 / X25519 OIDs in the
+ // PKCS8 keydata, and use that to decide to route to software. However,
+ // the KeyMint spec doesn't require that so don't attempt to parse the
+ // key material here.
+ false
+ }
+}
+
+/// Detector for current features that are not implemented by KeyMaster, via the
+/// km_compat wrapper.
+#[derive(Debug)]
+pub struct Keymaster {
+ v1: KeyMintV1,
+}
+
+/// TODO(b/216434270): This could be used this to replace the emulation routing
+/// in the km_compat C++ code, and allow support for imported ECDH keys along
+/// the way. Would need to figure out what would happen to existing emulated
+/// keys though.
+#[allow(dead_code)]
+impl Keymaster {
+ pub fn new(sec_level: SecurityLevel) -> Self {
+ Self { v1: KeyMintV1::new(sec_level) }
+ }
+}
+
+impl EmulationDetector for Keymaster {
+ fn emulation_required(&self, params: &[KeyParameter], import_data: &KeyImportData) -> bool {
+ // The km_compat wrapper on top of Keymaster emulates the KeyMint V1
+ // interface, so any feature from > v1 needs to be emulated.
+ if self.v1.emulation_required(params, import_data) {
+ return true;
+ }
+
+ // Keymaster does not support ECDH (KeyPurpose::AGREE_KEY), so hunt for
+ // that in the parameters.
+ if params.iter().any(|p| {
+ p.tag == Tag::PURPOSE && p.value == KeyParameterValue::KeyPurpose(KeyPurpose::AGREE_KEY)
+ }) {
+ return true;
+ }
+ false
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_key_import_data() {
+ let data = vec![1, 2, 3];
+ assert_eq!(KeyImportData::new(KeyFormat::PKCS8, &data), Ok(KeyImportData::Pkcs8(&data)));
+ assert_eq!(KeyImportData::new(KeyFormat::RAW, &data), Ok(KeyImportData::Raw(&data)));
+ assert!(KeyImportData::new(KeyFormat::X509, &data).is_err());
+ }
+
+ #[test]
+ fn test_wrap_keyblob() {
+ let keyblob = vec![1, 2, 3];
+ let wrapped = wrap_keyblob(&keyblob).unwrap();
+ assert_eq!(&wrapped[..KEYBLOB_PREFIX.len()], KEYBLOB_PREFIX);
+ assert_eq!(&wrapped[KEYBLOB_PREFIX.len()..KEYBLOB_PREFIX.len() + keyblob.len()], &keyblob);
+ assert_eq!(unwrap_keyblob(&keyblob), KeyBlob::Raw(&keyblob));
+ assert_eq!(unwrap_keyblob(&wrapped), KeyBlob::Wrapped(&keyblob));
+
+ let mut corrupt_prefix = wrapped.clone();
+ corrupt_prefix[0] ^= 0x01;
+ assert_eq!(unwrap_keyblob(&corrupt_prefix), KeyBlob::Raw(&corrupt_prefix));
+
+ let mut corrupt_suffix = wrapped.clone();
+ corrupt_suffix[wrapped.len() - 1] ^= 0x01;
+ assert_eq!(unwrap_keyblob(&corrupt_suffix), KeyBlob::Raw(&corrupt_suffix));
+
+ let too_short = &wrapped[..wrapped.len() - 4];
+ assert_eq!(unwrap_keyblob(too_short), KeyBlob::Raw(too_short));
+ }
+
+ #[test]
+ fn test_keymintv1_emulation_required() {
+ let tests = vec![
+ (SecurityLevel::TRUSTED_ENVIRONMENT, vec![], false),
+ (
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ vec![
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::SIGN),
+ },
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::VERIFY),
+ },
+ ],
+ false,
+ ),
+ (
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ vec![KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::AGREE_KEY),
+ }],
+ false,
+ ),
+ (
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ vec![
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::AGREE_KEY),
+ },
+ KeyParameter {
+ tag: Tag::EC_CURVE,
+ value: KeyParameterValue::EcCurve(EcCurve::P_256),
+ },
+ ],
+ false,
+ ),
+ (
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ vec![
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::AGREE_KEY),
+ },
+ KeyParameter {
+ tag: Tag::EC_CURVE,
+ value: KeyParameterValue::EcCurve(EcCurve::CURVE_25519),
+ },
+ ],
+ true,
+ ),
+ (
+ SecurityLevel::STRONGBOX,
+ vec![
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::AGREE_KEY),
+ },
+ KeyParameter {
+ tag: Tag::EC_CURVE,
+ value: KeyParameterValue::EcCurve(EcCurve::CURVE_25519),
+ },
+ ],
+ false,
+ ),
+ ];
+ for (sec_level, params, want) in tests {
+ let v1 = KeyMintV1::new(sec_level);
+ let got = v1.emulation_required(¶ms, &KeyImportData::None);
+ assert_eq!(got, want, "emulation_required({:?})={}, want {}", params, got, want);
+ }
+ }
+
+ #[test]
+ fn test_keymaster_emulation_required() {
+ let tests = vec![
+ (SecurityLevel::TRUSTED_ENVIRONMENT, vec![], false),
+ (
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ vec![
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::SIGN),
+ },
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::VERIFY),
+ },
+ ],
+ false,
+ ),
+ (
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ vec![KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::AGREE_KEY),
+ }],
+ true,
+ ),
+ (
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ vec![
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::AGREE_KEY),
+ },
+ KeyParameter {
+ tag: Tag::EC_CURVE,
+ value: KeyParameterValue::EcCurve(EcCurve::P_256),
+ },
+ ],
+ true,
+ ),
+ (
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ vec![
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::AGREE_KEY),
+ },
+ KeyParameter {
+ tag: Tag::EC_CURVE,
+ value: KeyParameterValue::EcCurve(EcCurve::CURVE_25519),
+ },
+ ],
+ true,
+ ),
+ (
+ SecurityLevel::STRONGBOX,
+ vec![
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::AGREE_KEY),
+ },
+ KeyParameter {
+ tag: Tag::EC_CURVE,
+ value: KeyParameterValue::EcCurve(EcCurve::CURVE_25519),
+ },
+ ],
+ true,
+ ),
+ (
+ SecurityLevel::STRONGBOX,
+ vec![
+ KeyParameter {
+ tag: Tag::PURPOSE,
+ value: KeyParameterValue::KeyPurpose(KeyPurpose::SIGN),
+ },
+ KeyParameter {
+ tag: Tag::EC_CURVE,
+ value: KeyParameterValue::EcCurve(EcCurve::CURVE_25519),
+ },
+ ],
+ false,
+ ),
+ ];
+ for (sec_level, params, want) in tests {
+ let v0 = Keymaster::new(sec_level);
+ let got = v0.emulation_required(¶ms, &KeyImportData::None);
+ assert_eq!(got, want, "emulation_required({:?})={}, want {}", params, got, want);
+ }
+ }
+}
diff --git a/keystore2/src/km_compat/km_compat.cpp b/keystore2/src/km_compat/km_compat.cpp
index bb60047..6d0630b 100644
--- a/keystore2/src/km_compat/km_compat.cpp
+++ b/keystore2/src/km_compat/km_compat.cpp
@@ -80,7 +80,6 @@
case Tag::CERTIFICATE_SUBJECT:
case Tag::CERTIFICATE_NOT_BEFORE:
case Tag::CERTIFICATE_NOT_AFTER:
- case Tag::INCLUDE_UNIQUE_ID:
case Tag::DEVICE_UNIQUE_ATTESTATION:
return true;
default:
@@ -127,7 +126,7 @@
case Tag::TRUSTED_CONFIRMATION_REQUIRED:
case Tag::UNLOCKED_DEVICE_REQUIRED:
case Tag::CREATION_DATETIME:
- case Tag::UNIQUE_ID:
+ case Tag::INCLUDE_UNIQUE_ID:
case Tag::IDENTITY_CREDENTIAL_KEY:
case Tag::STORAGE_KEY:
case Tag::MAC_LENGTH:
@@ -384,29 +383,39 @@
return ssps;
}
-void OperationSlots::setNumFreeSlots(uint8_t numFreeSlots) {
+void OperationSlotManager::setNumFreeSlots(uint8_t numFreeSlots) {
std::lock_guard<std::mutex> lock(mNumFreeSlotsMutex);
mNumFreeSlots = numFreeSlots;
}
-bool OperationSlots::claimSlot() {
- std::lock_guard<std::mutex> lock(mNumFreeSlotsMutex);
- if (mNumFreeSlots > 0) {
- mNumFreeSlots--;
- return true;
+std::optional<OperationSlot>
+OperationSlotManager::claimSlot(std::shared_ptr<OperationSlotManager> operationSlots) {
+ std::lock_guard<std::mutex> lock(operationSlots->mNumFreeSlotsMutex);
+ if (operationSlots->mNumFreeSlots > 0) {
+ operationSlots->mNumFreeSlots--;
+ return OperationSlot(std::move(operationSlots), std::nullopt);
}
- return false;
+ return std::nullopt;
}
-void OperationSlots::freeSlot() {
+OperationSlot
+OperationSlotManager::claimReservedSlot(std::shared_ptr<OperationSlotManager> operationSlots) {
+ std::unique_lock<std::mutex> reservedGuard(operationSlots->mReservedSlotMutex);
+ return OperationSlot(std::move(operationSlots), std::move(reservedGuard));
+}
+
+OperationSlot::OperationSlot(std::shared_ptr<OperationSlotManager> slots,
+ std::optional<std::unique_lock<std::mutex>> reservedGuard)
+ : mOperationSlots(std::move(slots)), mReservedGuard(std::move(reservedGuard)) {}
+
+void OperationSlotManager::freeSlot() {
std::lock_guard<std::mutex> lock(mNumFreeSlotsMutex);
mNumFreeSlots++;
}
-void OperationSlot::freeSlot() {
- if (mIsActive) {
+OperationSlot::~OperationSlot() {
+ if (!mReservedGuard && mOperationSlots) {
mOperationSlots->freeSlot();
- mIsActive = false;
}
}
@@ -496,16 +505,15 @@
auto legacyKeyGENParams = convertKeyParametersToLegacy(extractGenerationParams(inKeyParams));
auto legacyKeyFormat = convertKeyFormatToLegacy(in_inKeyFormat);
KMV1::ErrorCode errorCode;
- auto result = mDevice->importKey(legacyKeyGENParams, legacyKeyFormat, in_inKeyData,
- [&](V4_0_ErrorCode error, const hidl_vec<uint8_t>& keyBlob,
- const V4_0_KeyCharacteristics& keyCharacteristics) {
- errorCode = convert(error);
- out_creationResult->keyBlob =
- keyBlobPrefix(keyBlob, false);
- out_creationResult->keyCharacteristics =
- processLegacyCharacteristics(
- securityLevel_, inKeyParams, keyCharacteristics);
- });
+ auto result = mDevice->importKey(
+ legacyKeyGENParams, legacyKeyFormat, in_inKeyData,
+ [&](V4_0_ErrorCode error, const hidl_vec<uint8_t>& keyBlob,
+ const V4_0_KeyCharacteristics& keyCharacteristics) {
+ errorCode = convert(error);
+ out_creationResult->keyBlob = keyBlobPrefix(keyBlob, false);
+ out_creationResult->keyCharacteristics =
+ processLegacyCharacteristics(securityLevel_, inKeyParams, keyCharacteristics);
+ });
if (!result.isOk()) {
LOG(ERROR) << __func__ << " transaction failed. " << result.description();
return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
@@ -613,9 +621,15 @@
const std::vector<KeyParameter>& in_inParams,
const std::optional<HardwareAuthToken>& in_inAuthToken,
BeginResult* _aidl_return) {
- if (!mOperationSlots.claimSlot()) {
- return convertErrorCode(V4_0_ErrorCode::TOO_MANY_OPERATIONS);
- }
+ return beginInternal(in_inPurpose, prefixedKeyBlob, in_inParams, in_inAuthToken,
+ false /* useReservedSlot */, _aidl_return);
+}
+
+ScopedAStatus KeyMintDevice::beginInternal(KeyPurpose in_inPurpose,
+ const std::vector<uint8_t>& prefixedKeyBlob,
+ const std::vector<KeyParameter>& in_inParams,
+ const std::optional<HardwareAuthToken>& in_inAuthToken,
+ bool useReservedSlot, BeginResult* _aidl_return) {
const std::vector<uint8_t>& in_inKeyBlob = prefixedKeyBlobRemovePrefix(prefixedKeyBlob);
if (prefixedKeyBlobIsSoftKeyMint(prefixedKeyBlob)) {
@@ -623,28 +637,41 @@
_aidl_return);
}
+ OperationSlot slot;
+ // No need to claim a slot for software device.
+ if (useReservedSlot) {
+ // There is only one reserved slot. This function blocks until
+ // the reserved slot becomes available.
+ slot = OperationSlotManager::claimReservedSlot(mOperationSlots);
+ } else {
+ if (auto opt_slot = OperationSlotManager::claimSlot(mOperationSlots)) {
+ slot = std::move(*opt_slot);
+ } else {
+ return convertErrorCode(V4_0_ErrorCode::TOO_MANY_OPERATIONS);
+ }
+ }
+
auto legacyPurpose =
static_cast<::android::hardware::keymaster::V4_0::KeyPurpose>(in_inPurpose);
auto legacyParams = convertKeyParametersToLegacy(in_inParams);
auto legacyAuthToken = convertAuthTokenToLegacy(in_inAuthToken);
KMV1::ErrorCode errorCode;
- auto result = mDevice->begin(
- legacyPurpose, in_inKeyBlob, legacyParams, legacyAuthToken,
- [&](V4_0_ErrorCode error, const hidl_vec<V4_0_KeyParameter>& outParams,
- uint64_t operationHandle) {
- errorCode = convert(error);
- _aidl_return->challenge = operationHandle;
- _aidl_return->params = convertKeyParametersFromLegacy(outParams);
- _aidl_return->operation = ndk::SharedRefBase::make<KeyMintOperation>(
- mDevice, operationHandle, &mOperationSlots, error == V4_0_ErrorCode::OK);
- });
+ auto result =
+ mDevice->begin(legacyPurpose, in_inKeyBlob, legacyParams, legacyAuthToken,
+ [&](V4_0_ErrorCode error, const hidl_vec<V4_0_KeyParameter>& outParams,
+ uint64_t operationHandle) {
+ errorCode = convert(error);
+ if (error == V4_0_ErrorCode::OK) {
+ _aidl_return->challenge = operationHandle;
+ _aidl_return->params = convertKeyParametersFromLegacy(outParams);
+ _aidl_return->operation = ndk::SharedRefBase::make<KeyMintOperation>(
+ mDevice, operationHandle, std::move(slot));
+ }
+ });
if (!result.isOk()) {
LOG(ERROR) << __func__ << " transaction failed. " << result.description();
errorCode = KMV1::ErrorCode::UNKNOWN_ERROR;
}
- if (errorCode != KMV1::ErrorCode::OK) {
- mOperationSlots.freeSlot();
- }
return convertErrorCode(errorCode);
}
@@ -704,8 +731,9 @@
LOG(ERROR) << __func__ << " export_key failed: " << ret.description();
return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
}
- if (km_error != KMV1::ErrorCode::OK)
+ if (km_error != KMV1::ErrorCode::OK) {
LOG(ERROR) << __func__ << " export_key failed, code " << int32_t(km_error);
+ }
return convertErrorCode(km_error);
}
@@ -741,6 +769,19 @@
}
}
+ScopedAStatus KeyMintDevice::getRootOfTrustChallenge(std::array<uint8_t, 16>* /* challenge */) {
+ return convertErrorCode(KMV1::ErrorCode::UNIMPLEMENTED);
+}
+
+ScopedAStatus KeyMintDevice::getRootOfTrust(const std::array<uint8_t, 16>& /* challenge */,
+ std::vector<uint8_t>* /* rootOfTrust */) {
+ return convertErrorCode(KMV1::ErrorCode::UNIMPLEMENTED);
+}
+
+ScopedAStatus KeyMintDevice::sendRootOfTrust(const std::vector<uint8_t>& /* rootOfTrust */) {
+ return convertErrorCode(KMV1::ErrorCode::UNIMPLEMENTED);
+}
+
ScopedAStatus KeyMintOperation::updateAad(const std::vector<uint8_t>& input,
const std::optional<HardwareAuthToken>& optAuthToken,
const std::optional<TimeStampToken>& optTimeStampToken) {
@@ -757,7 +798,11 @@
LOG(ERROR) << __func__ << " transaction failed. " << result.description();
errorCode = KMV1::ErrorCode::UNKNOWN_ERROR;
}
- if (errorCode != KMV1::ErrorCode::OK) mOperationSlot.freeSlot();
+
+ // Operation slot is no longer occupied.
+ if (errorCode != KMV1::ErrorCode::OK) {
+ mOperationSlot = std::nullopt;
+ }
return convertErrorCode(errorCode);
}
@@ -815,7 +860,10 @@
inputPos += consumed;
}
- if (errorCode != KMV1::ErrorCode::OK) mOperationSlot.freeSlot();
+ // Operation slot is no longer occupied.
+ if (errorCode != KMV1::ErrorCode::OK) {
+ mOperationSlot = std::nullopt;
+ }
return convertErrorCode(errorCode);
}
@@ -846,17 +894,19 @@
*out_output = output;
});
- mOperationSlot.freeSlot();
if (!result.isOk()) {
LOG(ERROR) << __func__ << " transaction failed. " << result.description();
errorCode = KMV1::ErrorCode::UNKNOWN_ERROR;
}
+
+ mOperationSlot = std::nullopt;
+
return convertErrorCode(errorCode);
}
ScopedAStatus KeyMintOperation::abort() {
auto result = mDevice->abort(mOperationHandle);
- mOperationSlot.freeSlot();
+ mOperationSlot = std::nullopt;
if (!result.isOk()) {
LOG(ERROR) << __func__ << " transaction failed. " << result.description();
return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
@@ -865,7 +915,7 @@
}
KeyMintOperation::~KeyMintOperation() {
- if (mOperationSlot.hasSlot()) {
+ if (mOperationSlot) {
auto error = abort();
if (!error.isOk()) {
LOG(WARNING) << "Error calling abort in ~KeyMintOperation: " << error.getMessage();
@@ -1118,8 +1168,8 @@
kps.push_back(KMV1::makeKeyParameter(KMV1::TAG_PADDING, origPadding));
}
BeginResult beginResult;
- auto error =
- begin(KeyPurpose::SIGN, prefixedKeyBlob, kps, HardwareAuthToken(), &beginResult);
+ auto error = beginInternal(KeyPurpose::SIGN, prefixedKeyBlob, kps, HardwareAuthToken(),
+ true /* useReservedSlot */, &beginResult);
if (!error.isOk()) {
errorCode = toErrorCode(error);
return std::vector<uint8_t>();
@@ -1355,20 +1405,21 @@
}
void KeyMintDevice::setNumFreeSlots(uint8_t numFreeSlots) {
- mOperationSlots.setNumFreeSlots(numFreeSlots);
+ mOperationSlots->setNumFreeSlots(numFreeSlots);
}
// Constructors and helpers.
KeyMintDevice::KeyMintDevice(sp<Keymaster> device, KeyMintSecurityLevel securityLevel)
- : mDevice(device), securityLevel_(securityLevel) {
+ : mDevice(device), mOperationSlots(std::make_shared<OperationSlotManager>()),
+ securityLevel_(securityLevel) {
if (securityLevel == KeyMintSecurityLevel::STRONGBOX) {
setNumFreeSlots(3);
} else {
setNumFreeSlots(15);
}
- softKeyMintDevice_.reset(CreateKeyMintDevice(KeyMintSecurityLevel::SOFTWARE));
+ softKeyMintDevice_ = CreateKeyMintDevice(KeyMintSecurityLevel::SOFTWARE);
}
sp<Keymaster> getDevice(KeyMintSecurityLevel securityLevel) {
@@ -1391,14 +1442,33 @@
}
}
+std::shared_ptr<IKeyMintDevice> getSoftwareKeymintDevice() {
+ static std::mutex mutex;
+ static std::shared_ptr<IKeyMintDevice> swDevice;
+ std::lock_guard<std::mutex> lock(mutex);
+ if (!swDevice) {
+ swDevice = CreateKeyMintDevice(KeyMintSecurityLevel::SOFTWARE);
+ }
+ return swDevice;
+}
+
std::shared_ptr<KeyMintDevice>
-KeyMintDevice::createKeyMintDevice(KeyMintSecurityLevel securityLevel) {
+KeyMintDevice::getWrappedKeymasterDevice(KeyMintSecurityLevel securityLevel) {
if (auto dev = getDevice(securityLevel)) {
return ndk::SharedRefBase::make<KeyMintDevice>(std::move(dev), securityLevel);
}
return {};
}
+std::shared_ptr<IKeyMintDevice>
+KeyMintDevice::createKeyMintDevice(KeyMintSecurityLevel securityLevel) {
+ if (securityLevel == KeyMintSecurityLevel::SOFTWARE) {
+ return getSoftwareKeymintDevice();
+ } else {
+ return getWrappedKeymasterDevice(securityLevel);
+ }
+}
+
std::shared_ptr<SharedSecret> SharedSecret::createSharedSecret(KeyMintSecurityLevel securityLevel) {
auto device = getDevice(securityLevel);
if (!device) {
diff --git a/keystore2/src/km_compat/km_compat.h b/keystore2/src/km_compat/km_compat.h
index 70c7b86..6654c4a 100644
--- a/keystore2/src/km_compat/km_compat.h
+++ b/keystore2/src/km_compat/km_compat.h
@@ -50,41 +50,55 @@
using ::android::hardware::keymaster::V4_1::support::Keymaster;
using ::ndk::ScopedAStatus;
-class OperationSlots {
- private:
- uint8_t mNumFreeSlots;
- std::mutex mNumFreeSlotsMutex;
-
- public:
- void setNumFreeSlots(uint8_t numFreeSlots);
- bool claimSlot();
- void freeSlot();
-};
-
+class OperationSlot;
+class OperationSlotManager;
// An abstraction for a single operation slot.
// This contains logic to ensure that we do not free the slot multiple times,
// e.g., if we call abort twice on the same operation.
class OperationSlot {
+ friend OperationSlotManager;
+
private:
- OperationSlots* mOperationSlots;
- bool mIsActive;
+ std::shared_ptr<OperationSlotManager> mOperationSlots;
+ std::optional<std::unique_lock<std::mutex>> mReservedGuard;
+
+ protected:
+ OperationSlot(std::shared_ptr<OperationSlotManager>,
+ std::optional<std::unique_lock<std::mutex>> reservedGuard);
+ OperationSlot(const OperationSlot&) = delete;
+ OperationSlot& operator=(const OperationSlot&) = delete;
public:
- OperationSlot(OperationSlots* slots, bool isActive)
- : mOperationSlots(slots), mIsActive(isActive) {}
+ OperationSlot() : mOperationSlots(nullptr), mReservedGuard(std::nullopt) {}
+ OperationSlot(OperationSlot&&) = default;
+ OperationSlot& operator=(OperationSlot&&) = default;
+ ~OperationSlot();
+};
+class OperationSlotManager {
+ private:
+ uint8_t mNumFreeSlots;
+ std::mutex mNumFreeSlotsMutex;
+ std::mutex mReservedSlotMutex;
+
+ public:
+ void setNumFreeSlots(uint8_t numFreeSlots);
+ static std::optional<OperationSlot>
+ claimSlot(std::shared_ptr<OperationSlotManager> operationSlots);
+ static OperationSlot claimReservedSlot(std::shared_ptr<OperationSlotManager> operationSlots);
void freeSlot();
- bool hasSlot() { return mIsActive; }
};
class KeyMintDevice : public aidl::android::hardware::security::keymint::BnKeyMintDevice {
private:
::android::sp<Keymaster> mDevice;
- OperationSlots mOperationSlots;
+ std::shared_ptr<OperationSlotManager> mOperationSlots;
public:
explicit KeyMintDevice(::android::sp<Keymaster>, KeyMintSecurityLevel);
- static std::shared_ptr<KeyMintDevice> createKeyMintDevice(KeyMintSecurityLevel securityLevel);
+ static std::shared_ptr<IKeyMintDevice> createKeyMintDevice(KeyMintSecurityLevel securityLevel);
+ static std::shared_ptr<KeyMintDevice>
+ getWrappedKeymasterDevice(KeyMintSecurityLevel securityLevel);
ScopedAStatus getHardwareInfo(KeyMintHardwareInfo* _aidl_return) override;
ScopedAStatus addRngEntropy(const std::vector<uint8_t>& in_data) override;
@@ -107,10 +121,15 @@
ScopedAStatus deleteKey(const std::vector<uint8_t>& in_inKeyBlob) override;
ScopedAStatus deleteAllKeys() override;
ScopedAStatus destroyAttestationIds() override;
+
ScopedAStatus begin(KeyPurpose in_inPurpose, const std::vector<uint8_t>& in_inKeyBlob,
const std::vector<KeyParameter>& in_inParams,
const std::optional<HardwareAuthToken>& in_inAuthToken,
BeginResult* _aidl_return) override;
+ ScopedAStatus beginInternal(KeyPurpose in_inPurpose, const std::vector<uint8_t>& in_inKeyBlob,
+ const std::vector<KeyParameter>& in_inParams,
+ const std::optional<HardwareAuthToken>& in_inAuthToken,
+ bool useReservedSlot, BeginResult* _aidl_return);
ScopedAStatus deviceLocked(bool passwordOnly,
const std::optional<TimeStampToken>& timestampToken) override;
ScopedAStatus earlyBootEnded() override;
@@ -123,6 +142,11 @@
const std::vector<uint8_t>& appId, const std::vector<uint8_t>& appData,
std::vector<KeyCharacteristics>* keyCharacteristics) override;
+ ScopedAStatus getRootOfTrustChallenge(std::array<uint8_t, 16>* challenge);
+ ScopedAStatus getRootOfTrust(const std::array<uint8_t, 16>& challenge,
+ std::vector<uint8_t>* rootOfTrust);
+ ScopedAStatus sendRootOfTrust(const std::vector<uint8_t>& rootOfTrust);
+
// These are public to allow testing code to use them directly.
// This class should not be used publicly anyway.
std::variant<std::vector<Certificate>, KMV1_ErrorCode>
@@ -141,9 +165,8 @@
class KeyMintOperation : public aidl::android::hardware::security::keymint::BnKeyMintOperation {
public:
- KeyMintOperation(::android::sp<Keymaster> device, uint64_t operationHandle,
- OperationSlots* slots, bool isActive)
- : mDevice(device), mOperationHandle(operationHandle), mOperationSlot(slots, isActive) {}
+ KeyMintOperation(::android::sp<Keymaster> device, uint64_t operationHandle, OperationSlot slot)
+ : mDevice(device), mOperationHandle(operationHandle), mOperationSlot(std::move(slot)) {}
~KeyMintOperation();
ScopedAStatus updateAad(const std::vector<uint8_t>& input,
@@ -181,7 +204,7 @@
std::vector<uint8_t> mUpdateBuffer;
::android::sp<Keymaster> mDevice;
uint64_t mOperationHandle;
- OperationSlot mOperationSlot;
+ std::optional<OperationSlot> mOperationSlot;
};
class SharedSecret : public aidl::android::hardware::security::sharedsecret::BnSharedSecret {
diff --git a/keystore2/src/km_compat/lib.rs b/keystore2/src/km_compat/lib.rs
index 8abf2ba..13f7760 100644
--- a/keystore2/src/km_compat/lib.rs
+++ b/keystore2/src/km_compat/lib.rs
@@ -286,7 +286,7 @@
let operation = begin_result.operation.unwrap();
let update_aad_result = operation.updateAad(
- &b"foobar".to_vec(),
+ b"foobar".as_ref(),
None, /* authToken */
None, /* timestampToken */
);
@@ -310,7 +310,7 @@
let operation = begin_result.operation.unwrap();
let update_aad_result = operation.updateAad(
- &b"foobar".to_vec(),
+ b"foobar".as_ref(),
None, /* authToken */
None, /* timestampToken */
);
diff --git a/keystore2/src/km_compat/slot_test.cpp b/keystore2/src/km_compat/slot_test.cpp
index 43f3bc6..d734970 100644
--- a/keystore2/src/km_compat/slot_test.cpp
+++ b/keystore2/src/km_compat/slot_test.cpp
@@ -26,6 +26,7 @@
using ::aidl::android::hardware::security::keymint::BlockMode;
using ::aidl::android::hardware::security::keymint::Certificate;
using ::aidl::android::hardware::security::keymint::Digest;
+using ::aidl::android::hardware::security::keymint::EcCurve;
using ::aidl::android::hardware::security::keymint::ErrorCode;
using ::aidl::android::hardware::security::keymint::IKeyMintOperation;
using ::aidl::android::hardware::security::keymint::KeyCharacteristics;
@@ -53,6 +54,25 @@
return creationResult.keyBlob;
}
+static bool generateECSingingKey(std::shared_ptr<KeyMintDevice> device) {
+ uint64_t now_ms = (uint64_t)time(nullptr) * 1000;
+
+ auto keyParams = std::vector<KeyParameter>({
+ KMV1::makeKeyParameter(KMV1::TAG_ALGORITHM, Algorithm::EC),
+ KMV1::makeKeyParameter(KMV1::TAG_EC_CURVE, EcCurve::P_256),
+ KMV1::makeKeyParameter(KMV1::TAG_NO_AUTH_REQUIRED, true),
+ KMV1::makeKeyParameter(KMV1::TAG_DIGEST, Digest::SHA_2_256),
+ KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::SIGN),
+ KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::VERIFY),
+ KMV1::makeKeyParameter(KMV1::TAG_CERTIFICATE_NOT_BEFORE, now_ms - 60 * 60 * 1000),
+ KMV1::makeKeyParameter(KMV1::TAG_CERTIFICATE_NOT_AFTER, now_ms + 60 * 60 * 1000),
+ });
+ KeyCreationResult creationResult;
+ auto status = device->generateKey(keyParams, std::nullopt /* attest_key */, &creationResult);
+ EXPECT_TRUE(status.isOk()) << status.getDescription();
+ return status.isOk();
+}
+
static std::variant<BeginResult, ScopedAStatus> begin(std::shared_ptr<KeyMintDevice> device,
bool valid) {
auto blob = generateAESKey(device);
@@ -69,17 +89,57 @@
return beginResult;
}
+static std::shared_ptr<KMV1::IKeyMintOperation>
+generateAndBeginECDHKeyOperation(std::shared_ptr<KeyMintDevice> device) {
+ uint64_t now_ms = (uint64_t)time(nullptr) * 1000;
+
+ auto keyParams = std::vector<KeyParameter>({
+ KMV1::makeKeyParameter(KMV1::TAG_ALGORITHM, Algorithm::EC),
+ KMV1::makeKeyParameter(KMV1::TAG_EC_CURVE, EcCurve::P_256),
+ KMV1::makeKeyParameter(KMV1::TAG_NO_AUTH_REQUIRED, true),
+ KMV1::makeKeyParameter(KMV1::TAG_DIGEST, Digest::NONE),
+ KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::AGREE_KEY),
+ KMV1::makeKeyParameter(KMV1::TAG_CERTIFICATE_NOT_BEFORE, now_ms - 60 * 60 * 1000),
+ KMV1::makeKeyParameter(KMV1::TAG_CERTIFICATE_NOT_AFTER, now_ms + 60 * 60 * 1000),
+ });
+ KeyCreationResult creationResult;
+ auto status = device->generateKey(keyParams, std::nullopt /* attest_key */, &creationResult);
+ EXPECT_TRUE(status.isOk()) << status.getDescription();
+ if (!status.isOk()) {
+ return {};
+ }
+ std::vector<KeyParameter> kps;
+ BeginResult beginResult;
+ auto bstatus = device->begin(KeyPurpose::AGREE_KEY, creationResult.keyBlob, kps,
+ HardwareAuthToken(), &beginResult);
+ EXPECT_TRUE(status.isOk()) << status.getDescription();
+ if (status.isOk()) {
+ return beginResult.operation;
+ }
+ return {};
+}
+
static const int NUM_SLOTS = 2;
TEST(SlotTest, TestSlots) {
static std::shared_ptr<KeyMintDevice> device =
- KeyMintDevice::createKeyMintDevice(SecurityLevel::TRUSTED_ENVIRONMENT);
+ KeyMintDevice::getWrappedKeymasterDevice(SecurityLevel::TRUSTED_ENVIRONMENT);
+ ASSERT_NE(device.get(), nullptr);
+
device->setNumFreeSlots(NUM_SLOTS);
// A begin() that returns a failure should not use a slot.
auto result = begin(device, false);
ASSERT_TRUE(std::holds_alternative<ScopedAStatus>(result));
+ // Software emulated operations must not leak virtual slots.
+ ASSERT_TRUE(!!generateAndBeginECDHKeyOperation(device));
+
+ // Software emulated operations must not impact virtual slots accounting.
+ // As opposed to the previous call, the software operation is kept alive.
+ auto software_op = generateAndBeginECDHKeyOperation(device);
+ ASSERT_TRUE(!!software_op);
+
// Fill up all the slots.
std::vector<std::shared_ptr<IKeyMintOperation>> operations;
for (int i = 0; i < NUM_SLOTS; i++) {
@@ -94,6 +154,14 @@
ASSERT_EQ(std::get<ScopedAStatus>(result).getServiceSpecificError(),
static_cast<int32_t>(ErrorCode::TOO_MANY_OPERATIONS));
+ // At this point all slots are in use. We should still be able to generate keys which
+ // require an operation slot during generation.
+ ASSERT_TRUE(generateECSingingKey(device));
+
+ // Software emulated operations should work despite having all virtual operation slots
+ // depleted.
+ ASSERT_TRUE(generateAndBeginECDHKeyOperation(device));
+
// TODO: I'm not sure how to generate a failing update call to test that.
// Calling finish should free up a slot.
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 7454cca..d75bfd2 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -17,8 +17,8 @@
use crate::{
error::{Error as KsError, ResponseCode},
key_parameter::{KeyParameter, KeyParameterValue},
- super_key::SuperKeyManager,
utils::uid_to_android_user,
+ utils::AesGcm,
};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
SecurityLevel::SecurityLevel, Tag::Tag, TagType::TagType,
@@ -26,6 +26,7 @@
use anyhow::{Context, Result};
use keystore2_crypto::{aes_gcm_decrypt, Password, ZVec};
use std::collections::{HashMap, HashSet};
+use std::sync::Arc;
use std::{convert::TryInto, fs::File, path::Path, path::PathBuf};
use std::{
fs,
@@ -87,6 +88,14 @@
/// an invalid alias filename encoding.
#[error("Invalid alias filename encoding.")]
BadEncoding,
+ /// A component of the requested entry other than the KM key blob itself
+ /// was encrypted and no super key was provided.
+ #[error("Locked entry component.")]
+ LockedComponent,
+ /// The uids presented to move_keystore_entry belonged to different
+ /// Android users.
+ #[error("Cannot move keys across Android users.")]
+ AndroidUserMismatch,
}
/// The blob payload, optionally with all information required to decrypt it.
@@ -96,6 +105,16 @@
Generic(Vec<u8>),
/// A legacy key characteristics file. This has only a single list of Authorizations.
Characteristics(Vec<u8>),
+ /// A legacy key characteristics file. This has only a single list of Authorizations.
+ /// Additionally, this characteristics file was encrypted with the user's super key.
+ EncryptedCharacteristics {
+ /// Initialization vector.
+ iv: Vec<u8>,
+ /// Aead tag for integrity verification.
+ tag: Vec<u8>,
+ /// Ciphertext.
+ data: Vec<u8>,
+ },
/// A key characteristics cache has both a hardware enforced and a software enforced list
/// of authorizations.
CharacteristicsCache(Vec<u8>),
@@ -124,6 +143,17 @@
/// Ciphertext.
data: Vec<u8>,
},
+ /// An encrypted blob. Includes the initialization vector, the aead tag, and the
+ /// ciphertext data. The key can be selected from context, i.e., the owner of the key
+ /// blob. This is a special case for generic encrypted blobs as opposed to key blobs.
+ EncryptedGeneric {
+ /// Initialization vector.
+ iv: Vec<u8>,
+ /// Aead tag for integrity verification.
+ tag: Vec<u8>,
+ /// Ciphertext.
+ data: Vec<u8>,
+ },
/// Holds the plaintext key blob either after unwrapping an encrypted blob or when the
/// blob was stored in "plaintext" on disk. The "plaintext" of a key blob is not actual
/// plaintext because all KeyMint blobs are encrypted with a device bound key. The key
@@ -132,6 +162,19 @@
Decrypted(ZVec),
}
+/// Keystore used two different key characteristics file formats in the past.
+/// The key characteristics cache which superseded the characteristics file.
+/// The latter stored only one list of key parameters, while the former stored
+/// a hardware enforced and a software enforced list. This Enum indicates which
+/// type was read from the file system.
+#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
+pub enum LegacyKeyCharacteristics {
+ /// A characteristics cache was read.
+ Cache(Vec<KeyParameter>),
+ /// A characteristics file was read.
+ File(Vec<KeyParameter>),
+}
+
/// Represents a loaded legacy key blob file.
#[derive(Debug, Eq, PartialEq)]
pub struct Blob {
@@ -169,6 +212,16 @@
}
impl Blob {
+ /// Creates a new blob from flags and value.
+ pub fn new(flags: u8, value: BlobValue) -> Self {
+ Self { flags, value }
+ }
+
+ /// Return the raw flags of this Blob.
+ pub fn get_flags(&self) -> u8 {
+ self.flags
+ }
+
/// This blob was generated with a fallback software KM device.
pub fn is_fallback(&self) -> bool {
self.flags & flags::FALLBACK != 0
@@ -212,10 +265,14 @@
// version (1 Byte)
// blob_type (1 Byte)
// flags (1 Byte)
- // info (1 Byte)
+ // info (1 Byte) Size of an info field appended to the blob.
// initialization_vector (16 Bytes)
// integrity (MD5 digest or gcm tag) (16 Bytes)
// length (4 Bytes)
+ //
+ // The info field is used to store the salt for password encrypted blobs.
+ // The beginning of the info field can be computed from the file length
+ // and the info byte from the header: <file length> - <info> bytes.
const COMMON_HEADER_SIZE: usize = 4 + Self::IV_SIZE + Self::GCM_TAG_LENGTH + 4;
const VERSION_OFFSET: usize = 0;
@@ -341,12 +398,28 @@
let tag = &buffer[Self::AEAD_TAG_OFFSET..Self::AEAD_TAG_OFFSET + Self::GCM_TAG_LENGTH];
match (blob_type, is_encrypted, salt) {
- (blob_types::GENERIC, _, _) => {
+ (blob_types::GENERIC, false, _) => {
Ok(Blob { flags, value: BlobValue::Generic(value.to_vec()) })
}
- (blob_types::KEY_CHARACTERISTICS, _, _) => {
+ (blob_types::GENERIC, true, _) => Ok(Blob {
+ flags,
+ value: BlobValue::EncryptedGeneric {
+ iv: iv.to_vec(),
+ tag: tag.to_vec(),
+ data: value.to_vec(),
+ },
+ }),
+ (blob_types::KEY_CHARACTERISTICS, false, _) => {
Ok(Blob { flags, value: BlobValue::Characteristics(value.to_vec()) })
}
+ (blob_types::KEY_CHARACTERISTICS, true, _) => Ok(Blob {
+ flags,
+ value: BlobValue::EncryptedCharacteristics {
+ iv: iv.to_vec(),
+ tag: tag.to_vec(),
+ data: value.to_vec(),
+ },
+ }),
(blob_types::KEY_CHARACTERISTICS_CACHE, _, _) => {
Ok(Blob { flags, value: BlobValue::CharacteristicsCache(value.to_vec()) })
}
@@ -427,6 +500,15 @@
.context("In new_from_stream_decrypt_with.")?,
),
}),
+ BlobValue::EncryptedGeneric { iv, tag, data } => Ok(Blob {
+ flags: blob.flags,
+ value: BlobValue::Generic(
+ decrypt(data, iv, tag, None, None)
+ .context("In new_from_stream_decrypt_with.")?[..]
+ .to_vec(),
+ ),
+ }),
+
_ => Ok(blob),
}
}
@@ -546,24 +628,91 @@
Ok(params)
}
+ /// This function takes a Blob and an optional AesGcm. Plain text blob variants are
+ /// passed through as is. If a super key is given an attempt is made to decrypt the
+ /// blob thereby mapping BlobValue variants as follows:
+ /// BlobValue::Encrypted => BlobValue::Decrypted
+ /// BlobValue::EncryptedGeneric => BlobValue::Generic
+ /// BlobValue::EncryptedCharacteristics => BlobValue::Characteristics
+ /// If now super key is given or BlobValue::PwEncrypted is encountered,
+ /// Err(Error::LockedComponent) is returned.
+ fn decrypt_if_required(super_key: &Option<Arc<dyn AesGcm>>, blob: Blob) -> Result<Blob> {
+ match blob {
+ Blob { value: BlobValue::Generic(_), .. }
+ | Blob { value: BlobValue::Characteristics(_), .. }
+ | Blob { value: BlobValue::CharacteristicsCache(_), .. }
+ | Blob { value: BlobValue::Decrypted(_), .. } => Ok(blob),
+ Blob { value: BlobValue::EncryptedCharacteristics { iv, tag, data }, flags }
+ if super_key.is_some() =>
+ {
+ Ok(Blob {
+ value: BlobValue::Characteristics(
+ super_key.as_ref().unwrap().decrypt(&data, &iv, &tag).context(
+ "In decrypt_if_required: Failed to decrypt EncryptedCharacteristics",
+ )?[..]
+ .to_vec(),
+ ),
+ flags,
+ })
+ }
+ Blob { value: BlobValue::Encrypted { iv, tag, data }, flags }
+ if super_key.is_some() =>
+ {
+ Ok(Blob {
+ value: BlobValue::Decrypted(
+ super_key
+ .as_ref()
+ .unwrap()
+ .decrypt(&data, &iv, &tag)
+ .context("In decrypt_if_required: Failed to decrypt Encrypted")?,
+ ),
+ flags,
+ })
+ }
+ Blob { value: BlobValue::EncryptedGeneric { iv, tag, data }, flags }
+ if super_key.is_some() =>
+ {
+ Ok(Blob {
+ value: BlobValue::Generic(
+ super_key
+ .as_ref()
+ .unwrap()
+ .decrypt(&data, &iv, &tag)
+ .context("In decrypt_if_required: Failed to decrypt Encrypted")?[..]
+ .to_vec(),
+ ),
+ flags,
+ })
+ }
+ // This arm catches all encrypted cases where super key is not present or cannot
+ // decrypt the blob, the latter being BlobValue::PwEncrypted.
+ _ => Err(Error::LockedComponent)
+ .context("In decrypt_if_required: Encountered encrypted blob without super key."),
+ }
+ }
+
fn read_characteristics_file(
&self,
uid: u32,
prefix: &str,
alias: &str,
hw_sec_level: SecurityLevel,
- ) -> Result<Vec<KeyParameter>> {
+ super_key: &Option<Arc<dyn AesGcm>>,
+ ) -> Result<LegacyKeyCharacteristics> {
let blob = Self::read_generic_blob(&self.make_chr_filename(uid, alias, prefix))
.context("In read_characteristics_file")?;
let blob = match blob {
- None => return Ok(Vec::new()),
+ None => return Ok(LegacyKeyCharacteristics::Cache(Vec::new())),
Some(blob) => blob,
};
- let mut stream = match blob.value() {
- BlobValue::Characteristics(data) => &data[..],
- BlobValue::CharacteristicsCache(data) => &data[..],
+ let blob = Self::decrypt_if_required(super_key, blob)
+ .context("In read_characteristics_file: Trying to decrypt blob.")?;
+
+ let (mut stream, is_cache) = match blob.value() {
+ BlobValue::Characteristics(data) => (&data[..], false),
+ BlobValue::CharacteristicsCache(data) => (&data[..], true),
_ => {
return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(concat!(
"In read_characteristics_file: ",
@@ -589,7 +738,12 @@
.into_iter()
.map(|value| KeyParameter::new(value, SecurityLevel::KEYSTORE));
- Ok(hw_list.into_iter().flatten().chain(sw_list).collect())
+ let params: Vec<KeyParameter> = hw_list.into_iter().flatten().chain(sw_list).collect();
+ if is_cache {
+ Ok(LegacyKeyCharacteristics::Cache(params))
+ } else {
+ Ok(LegacyKeyCharacteristics::File(params))
+ }
}
// This is a list of known prefixes that the Keystore 1.0 SPI used to use.
@@ -639,14 +793,40 @@
Ok(Some(Self::new_from_stream(&mut file).context("In read_generic_blob.")?))
}
+ fn read_generic_blob_decrypt_with<F>(path: &Path, decrypt: F) -> Result<Option<Blob>>
+ where
+ F: FnOnce(&[u8], &[u8], &[u8], Option<&[u8]>, Option<usize>) -> Result<ZVec>,
+ {
+ let mut file = match Self::with_retry_interrupted(|| File::open(path)) {
+ Ok(file) => file,
+ Err(e) => match e.kind() {
+ ErrorKind::NotFound => return Ok(None),
+ _ => return Err(e).context("In read_generic_blob_decrypt_with."),
+ },
+ };
+
+ Ok(Some(
+ Self::new_from_stream_decrypt_with(&mut file, decrypt)
+ .context("In read_generic_blob_decrypt_with.")?,
+ ))
+ }
+
/// Read a legacy keystore entry blob.
- pub fn read_legacy_keystore_entry(&self, uid: u32, alias: &str) -> Result<Option<Vec<u8>>> {
+ pub fn read_legacy_keystore_entry<F>(
+ &self,
+ uid: u32,
+ alias: &str,
+ decrypt: F,
+ ) -> Result<Option<Vec<u8>>>
+ where
+ F: FnOnce(&[u8], &[u8], &[u8], Option<&[u8]>, Option<usize>) -> Result<ZVec>,
+ {
let path = match self.make_legacy_keystore_entry_filename(uid, alias) {
Some(path) => path,
None => return Ok(None),
};
- let blob = Self::read_generic_blob(&path)
+ let blob = Self::read_generic_blob_decrypt_with(&path, decrypt)
.context("In read_legacy_keystore_entry: Failed to read blob.")?;
Ok(blob.and_then(|blob| match blob.value {
@@ -659,22 +839,23 @@
}
/// Remove a legacy keystore entry by the name alias with owner uid.
- pub fn remove_legacy_keystore_entry(&self, uid: u32, alias: &str) -> Result<()> {
+ pub fn remove_legacy_keystore_entry(&self, uid: u32, alias: &str) -> Result<bool> {
let path = match self.make_legacy_keystore_entry_filename(uid, alias) {
Some(path) => path,
- None => return Ok(()),
+ None => return Ok(false),
};
if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
match e.kind() {
- ErrorKind::NotFound => return Ok(()),
+ ErrorKind::NotFound => return Ok(false),
_ => return Err(e).context("In remove_legacy_keystore_entry."),
}
}
let user_id = uid_to_android_user(uid);
self.remove_user_dir_if_empty(user_id)
- .context("In remove_legacy_keystore_entry: Trying to remove empty user dir.")
+ .context("In remove_legacy_keystore_entry: Trying to remove empty user dir.")?;
+ Ok(true)
}
/// List all entries belonging to the given uid.
@@ -988,6 +1169,88 @@
Ok(something_was_deleted)
}
+ /// This function moves a keystore file if it exists. It constructs the source and destination
+ /// file name using the make_filename function with the arguments uid, alias, and prefix.
+ /// The function overwrites existing destination files silently. If the source does not exist,
+ /// this function has no side effect and returns successfully.
+ fn move_keystore_file_if_exists<F>(
+ src_uid: u32,
+ dest_uid: u32,
+ src_alias: &str,
+ dest_alias: &str,
+ prefix: &str,
+ make_filename: F,
+ ) -> Result<()>
+ where
+ F: Fn(u32, &str, &str) -> PathBuf,
+ {
+ let src_path = make_filename(src_uid, src_alias, prefix);
+ let dest_path = make_filename(dest_uid, dest_alias, prefix);
+ match Self::with_retry_interrupted(|| fs::rename(&src_path, &dest_path)) {
+ Err(e) if e.kind() == ErrorKind::NotFound => Ok(()),
+ r => r.context("In move_keystore_file_if_exists: Trying to rename."),
+ }
+ }
+
+ /// Moves a keystore entry from one uid to another. The uids must have the same android user
+ /// component. Moves across android users are not permitted.
+ pub fn move_keystore_entry(
+ &self,
+ src_uid: u32,
+ dest_uid: u32,
+ src_alias: &str,
+ dest_alias: &str,
+ ) -> Result<()> {
+ if src_uid == dest_uid {
+ // Nothing to do in the trivial case.
+ return Ok(());
+ }
+
+ if uid_to_android_user(src_uid) != uid_to_android_user(dest_uid) {
+ return Err(Error::AndroidUserMismatch).context("In move_keystore_entry.");
+ }
+
+ let prefixes = ["USRPKEY", "USRSKEY", "USRCERT", "CACERT"];
+ for prefix in prefixes {
+ Self::move_keystore_file_if_exists(
+ src_uid,
+ dest_uid,
+ src_alias,
+ dest_alias,
+ prefix,
+ |uid, alias, prefix| self.make_blob_filename(uid, alias, prefix),
+ )
+ .with_context(|| {
+ format!(
+ "In move_keystore_entry: Trying to move blob file with prefix: \"{}\"",
+ prefix
+ )
+ })?;
+ }
+
+ let prefixes = ["USRPKEY", "USRSKEY"];
+
+ for prefix in prefixes {
+ Self::move_keystore_file_if_exists(
+ src_uid,
+ dest_uid,
+ src_alias,
+ dest_alias,
+ prefix,
+ |uid, alias, prefix| self.make_chr_filename(uid, alias, prefix),
+ )
+ .with_context(|| {
+ format!(
+ "In move_keystore_entry: Trying to move characteristics file with \
+ prefix: \"{}\"",
+ prefix
+ )
+ })?;
+ }
+
+ Ok(())
+ }
+
fn remove_user_dir_if_empty(&self, user_id: u32) -> Result<()> {
if self
.is_empty_user(user_id)
@@ -1004,79 +1267,66 @@
&self,
uid: u32,
alias: &str,
- key_manager: Option<&SuperKeyManager>,
- ) -> Result<(Option<(Blob, Vec<KeyParameter>)>, Option<Vec<u8>>, Option<Vec<u8>>)> {
+ super_key: &Option<Arc<dyn AesGcm>>,
+ ) -> Result<(Option<(Blob, LegacyKeyCharacteristics)>, Option<Vec<u8>>, Option<Vec<u8>>)> {
let km_blob = self.read_km_blob_file(uid, alias).context("In load_by_uid_alias.")?;
let km_blob = match km_blob {
Some((km_blob, prefix)) => {
- let km_blob = match km_blob {
- Blob { flags: _, value: BlobValue::Decrypted(_) } => km_blob,
- // Unwrap the key blob if required and if we have key_manager.
- Blob { flags, value: BlobValue::Encrypted { ref iv, ref tag, ref data } } => {
- if let Some(key_manager) = key_manager {
- let decrypted = match key_manager
- .get_per_boot_key_by_user_id(uid_to_android_user(uid))
- {
- Some(key) => key.aes_gcm_decrypt(data, iv, tag).context(
- "In load_by_uid_alias: while trying to decrypt legacy blob.",
- )?,
- None => {
- return Err(KsError::Rc(ResponseCode::LOCKED)).context(format!(
- concat!(
- "In load_by_uid_alias: ",
- "User {} has not unlocked the keystore yet.",
- ),
- uid_to_android_user(uid)
- ))
- }
- };
- Blob { flags, value: BlobValue::Decrypted(decrypted) }
- } else {
- km_blob
- }
- }
- _ => {
- return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
+ let km_blob =
+ match km_blob {
+ Blob { flags: _, value: BlobValue::Decrypted(_) }
+ | Blob { flags: _, value: BlobValue::Encrypted { .. } } => km_blob,
+ _ => return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
"In load_by_uid_alias: Found wrong blob type in legacy key blob file.",
- )
- }
- };
+ ),
+ };
let hw_sec_level = match km_blob.is_strongbox() {
true => SecurityLevel::STRONGBOX,
false => SecurityLevel::TRUSTED_ENVIRONMENT,
};
let key_parameters = self
- .read_characteristics_file(uid, &prefix, alias, hw_sec_level)
+ .read_characteristics_file(uid, &prefix, alias, hw_sec_level, super_key)
.context("In load_by_uid_alias.")?;
Some((km_blob, key_parameters))
}
None => None,
};
- let user_cert =
- match Self::read_generic_blob(&self.make_blob_filename(uid, alias, "USRCERT"))
- .context("In load_by_uid_alias: While loading user cert.")?
- {
- Some(Blob { value: BlobValue::Generic(data), .. }) => Some(data),
- None => None,
- _ => {
- return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
- "In load_by_uid_alias: Found unexpected blob type in USRCERT file",
- )
- }
- };
+ let user_cert_blob =
+ Self::read_generic_blob(&self.make_blob_filename(uid, alias, "USRCERT"))
+ .context("In load_by_uid_alias: While loading user cert.")?;
- let ca_cert = match Self::read_generic_blob(&self.make_blob_filename(uid, alias, "CACERT"))
- .context("In load_by_uid_alias: While loading ca cert.")?
- {
- Some(Blob { value: BlobValue::Generic(data), .. }) => Some(data),
- None => None,
- _ => {
+ let user_cert = if let Some(blob) = user_cert_blob {
+ let blob = Self::decrypt_if_required(super_key, blob)
+ .context("In load_by_uid_alias: While decrypting user cert.")?;
+
+ if let Blob { value: BlobValue::Generic(data), .. } = blob {
+ Some(data)
+ } else {
return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED))
- .context("In load_by_uid_alias: Found unexpected blob type in CACERT file")
+ .context("In load_by_uid_alias: Found unexpected blob type in USRCERT file");
}
+ } else {
+ None
+ };
+
+ let ca_cert_blob = Self::read_generic_blob(&self.make_blob_filename(uid, alias, "CACERT"))
+ .context("In load_by_uid_alias: While loading ca cert.")?;
+
+ let ca_cert = if let Some(blob) = ca_cert_blob {
+ let blob = Self::decrypt_if_required(super_key, blob)
+ .context("In load_by_uid_alias: While decrypting ca cert.")?;
+
+ if let Blob { value: BlobValue::Generic(data), .. } = blob {
+ Some(data)
+ } else {
+ return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED))
+ .context("In load_by_uid_alias: Found unexpected blob type in CACERT file");
+ }
+ } else {
+ None
};
Ok((km_blob, user_cert, ca_cert))
@@ -1137,17 +1387,318 @@
}
}
+/// This module implements utility apis for creating legacy blob files.
+#[cfg(feature = "keystore2_blob_test_utils")]
+pub mod test_utils {
+ #![allow(dead_code)]
+
+ /// test vectors for legacy key blobs
+ pub mod legacy_blob_test_vectors;
+
+ use crate::legacy_blob::blob_types::{
+ GENERIC, KEY_CHARACTERISTICS, KEY_CHARACTERISTICS_CACHE, KM_BLOB, SUPER_KEY,
+ SUPER_KEY_AES256,
+ };
+ use crate::legacy_blob::*;
+ use anyhow::{anyhow, Result};
+ use keystore2_crypto::{aes_gcm_decrypt, aes_gcm_encrypt};
+ use std::convert::TryInto;
+ use std::fs::OpenOptions;
+ use std::io::Write;
+
+ /// This function takes a blob and synchronizes the encrypted/super encrypted flags
+ /// with the blob type for the pairs Generic/EncryptedGeneric,
+ /// Characteristics/EncryptedCharacteristics and Encrypted/Decrypted.
+ /// E.g. if a non encrypted enum variant is encountered with flags::SUPER_ENCRYPTED
+ /// or flags::ENCRYPTED is set, the payload is encrypted and the corresponding
+ /// encrypted variant is returned, and vice versa. All other variants remain untouched
+ /// even if flags and BlobValue variant are inconsistent.
+ pub fn prepare_blob(blob: Blob, key: &[u8]) -> Result<Blob> {
+ match blob {
+ Blob { value: BlobValue::Generic(data), flags } if blob.is_encrypted() => {
+ let (ciphertext, iv, tag) = aes_gcm_encrypt(&data, key).unwrap();
+ Ok(Blob { value: BlobValue::EncryptedGeneric { data: ciphertext, iv, tag }, flags })
+ }
+ Blob { value: BlobValue::Characteristics(data), flags } if blob.is_encrypted() => {
+ let (ciphertext, iv, tag) = aes_gcm_encrypt(&data, key).unwrap();
+ Ok(Blob {
+ value: BlobValue::EncryptedCharacteristics { data: ciphertext, iv, tag },
+ flags,
+ })
+ }
+ Blob { value: BlobValue::Decrypted(data), flags } if blob.is_encrypted() => {
+ let (ciphertext, iv, tag) = aes_gcm_encrypt(&data, key).unwrap();
+ Ok(Blob { value: BlobValue::Encrypted { data: ciphertext, iv, tag }, flags })
+ }
+ Blob { value: BlobValue::EncryptedGeneric { data, iv, tag }, flags }
+ if !blob.is_encrypted() =>
+ {
+ let plaintext = aes_gcm_decrypt(&data, &iv, &tag, key).unwrap();
+ Ok(Blob { value: BlobValue::Generic(plaintext[..].to_vec()), flags })
+ }
+ Blob { value: BlobValue::EncryptedCharacteristics { data, iv, tag }, flags }
+ if !blob.is_encrypted() =>
+ {
+ let plaintext = aes_gcm_decrypt(&data, &iv, &tag, key).unwrap();
+ Ok(Blob { value: BlobValue::Characteristics(plaintext[..].to_vec()), flags })
+ }
+ Blob { value: BlobValue::Encrypted { data, iv, tag }, flags }
+ if !blob.is_encrypted() =>
+ {
+ let plaintext = aes_gcm_decrypt(&data, &iv, &tag, key).unwrap();
+ Ok(Blob { value: BlobValue::Decrypted(plaintext), flags })
+ }
+ _ => Ok(blob),
+ }
+ }
+
+ /// Legacy blob header structure.
+ pub struct LegacyBlobHeader {
+ version: u8,
+ blob_type: u8,
+ flags: u8,
+ info: u8,
+ iv: [u8; 12],
+ tag: [u8; 16],
+ blob_size: u32,
+ }
+
+ /// This function takes a Blob and writes it to out as a legacy blob file
+ /// version 3. Note that the flags field and the values field may be
+ /// inconsistent and could be sanitized by this function. It is intentionally
+ /// not done to enable tests to construct malformed blobs.
+ pub fn write_legacy_blob(out: &mut dyn Write, blob: Blob) -> Result<usize> {
+ let (header, data, salt) = match blob {
+ Blob { value: BlobValue::Generic(data), flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: GENERIC,
+ flags,
+ info: 0,
+ iv: [0u8; 12],
+ tag: [0u8; 16],
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::Characteristics(data), flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: KEY_CHARACTERISTICS,
+ flags,
+ info: 0,
+ iv: [0u8; 12],
+ tag: [0u8; 16],
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::CharacteristicsCache(data), flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: KEY_CHARACTERISTICS_CACHE,
+ flags,
+ info: 0,
+ iv: [0u8; 12],
+ tag: [0u8; 16],
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::PwEncrypted { iv, tag, data, salt, key_size }, flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: if key_size == keystore2_crypto::AES_128_KEY_LENGTH {
+ SUPER_KEY
+ } else {
+ SUPER_KEY_AES256
+ },
+ flags,
+ info: 0,
+ iv: iv.try_into().unwrap(),
+ tag: tag[..].try_into().unwrap(),
+ blob_size: data.len() as u32,
+ },
+ data,
+ Some(salt),
+ ),
+ Blob { value: BlobValue::Encrypted { iv, tag, data }, flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: KM_BLOB,
+ flags,
+ info: 0,
+ iv: iv.try_into().unwrap(),
+ tag: tag[..].try_into().unwrap(),
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::EncryptedGeneric { iv, tag, data }, flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: GENERIC,
+ flags,
+ info: 0,
+ iv: iv.try_into().unwrap(),
+ tag: tag[..].try_into().unwrap(),
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::EncryptedCharacteristics { iv, tag, data }, flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: KEY_CHARACTERISTICS,
+ flags,
+ info: 0,
+ iv: iv.try_into().unwrap(),
+ tag: tag[..].try_into().unwrap(),
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::Decrypted(data), flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: KM_BLOB,
+ flags,
+ info: 0,
+ iv: [0u8; 12],
+ tag: [0u8; 16],
+ blob_size: data.len() as u32,
+ },
+ data[..].to_vec(),
+ None,
+ ),
+ };
+ write_legacy_blob_helper(out, &header, &data, salt.as_deref())
+ }
+
+ /// This function takes LegacyBlobHeader, blob payload and writes it to out as a legacy blob file
+ /// version 3.
+ pub fn write_legacy_blob_helper(
+ out: &mut dyn Write,
+ header: &LegacyBlobHeader,
+ data: &[u8],
+ info: Option<&[u8]>,
+ ) -> Result<usize> {
+ if 1 != out.write(&[header.version])? {
+ return Err(anyhow!("Unexpected size while writing version."));
+ }
+ if 1 != out.write(&[header.blob_type])? {
+ return Err(anyhow!("Unexpected size while writing blob_type."));
+ }
+ if 1 != out.write(&[header.flags])? {
+ return Err(anyhow!("Unexpected size while writing flags."));
+ }
+ if 1 != out.write(&[header.info])? {
+ return Err(anyhow!("Unexpected size while writing info."));
+ }
+ if 12 != out.write(&header.iv)? {
+ return Err(anyhow!("Unexpected size while writing iv."));
+ }
+ if 4 != out.write(&[0u8; 4])? {
+ return Err(anyhow!("Unexpected size while writing last 4 bytes of iv."));
+ }
+ if 16 != out.write(&header.tag)? {
+ return Err(anyhow!("Unexpected size while writing tag."));
+ }
+ if 4 != out.write(&header.blob_size.to_be_bytes())? {
+ return Err(anyhow!("Unexpected size while writing blob size."));
+ }
+ if data.len() != out.write(data)? {
+ return Err(anyhow!("Unexpected size while writing blob."));
+ }
+ if let Some(info) = info {
+ if info.len() != out.write(info)? {
+ return Err(anyhow!("Unexpected size while writing inof."));
+ }
+ }
+ Ok(40 + data.len() + info.map(|v| v.len()).unwrap_or(0))
+ }
+
+ /// Create encrypted characteristics file using given key.
+ pub fn make_encrypted_characteristics_file<P: AsRef<Path>>(
+ path: P,
+ key: &[u8],
+ data: &[u8],
+ ) -> Result<()> {
+ let mut file = OpenOptions::new().write(true).create_new(true).open(path).unwrap();
+ let blob =
+ Blob { value: BlobValue::Characteristics(data.to_vec()), flags: flags::ENCRYPTED };
+ let blob = prepare_blob(blob, key).unwrap();
+ write_legacy_blob(&mut file, blob).unwrap();
+ Ok(())
+ }
+
+ /// Create encrypted user certificate file using given key.
+ pub fn make_encrypted_usr_cert_file<P: AsRef<Path>>(
+ path: P,
+ key: &[u8],
+ data: &[u8],
+ ) -> Result<()> {
+ let mut file = OpenOptions::new().write(true).create_new(true).open(path).unwrap();
+ let blob = Blob { value: BlobValue::Generic(data.to_vec()), flags: flags::ENCRYPTED };
+ let blob = prepare_blob(blob, key).unwrap();
+ write_legacy_blob(&mut file, blob).unwrap();
+ Ok(())
+ }
+
+ /// Create encrypted CA certificate file using given key.
+ pub fn make_encrypted_ca_cert_file<P: AsRef<Path>>(
+ path: P,
+ key: &[u8],
+ data: &[u8],
+ ) -> Result<()> {
+ let mut file = OpenOptions::new().write(true).create_new(true).open(path).unwrap();
+ let blob = Blob { value: BlobValue::Generic(data.to_vec()), flags: flags::ENCRYPTED };
+ let blob = prepare_blob(blob, key).unwrap();
+ write_legacy_blob(&mut file, blob).unwrap();
+ Ok(())
+ }
+
+ /// Create encrypted user key file using given key.
+ pub fn make_encrypted_key_file<P: AsRef<Path>>(path: P, key: &[u8], data: &[u8]) -> Result<()> {
+ let mut file = OpenOptions::new().write(true).create_new(true).open(path).unwrap();
+ let blob = Blob {
+ value: BlobValue::Decrypted(ZVec::try_from(data).unwrap()),
+ flags: flags::ENCRYPTED,
+ };
+ let blob = prepare_blob(blob, key).unwrap();
+ write_legacy_blob(&mut file, blob).unwrap();
+ Ok(())
+ }
+
+ /// Create user or ca cert blob file.
+ pub fn make_cert_blob_file<P: AsRef<Path>>(path: P, data: &[u8]) -> Result<()> {
+ let mut file = OpenOptions::new().write(true).create_new(true).open(path).unwrap();
+ let blob = Blob { value: BlobValue::Generic(data.to_vec()), flags: 0 };
+ let blob = prepare_blob(blob, &[]).unwrap();
+ write_legacy_blob(&mut file, blob).unwrap();
+ Ok(())
+ }
+}
+
#[cfg(test)]
mod test {
+ #![allow(dead_code)]
use super::*;
- use anyhow::anyhow;
+ use crate::legacy_blob::test_utils::legacy_blob_test_vectors::*;
+ use crate::legacy_blob::test_utils::*;
+ use anyhow::{anyhow, Result};
use keystore2_crypto::aes_gcm_decrypt;
- use rand::Rng;
- use std::string::FromUtf8Error;
- mod legacy_blob_test_vectors;
- use crate::error;
- use crate::legacy_blob::test::legacy_blob_test_vectors::*;
use keystore2_test_utils::TempDir;
+ use rand::Rng;
+ use std::convert::TryInto;
+ use std::ops::Deref;
+ use std::string::FromUtf8Error;
#[test]
fn decode_encode_alias_test() {
@@ -1203,7 +1754,8 @@
fn read_golden_key_blob_test() -> anyhow::Result<()> {
let blob = LegacyBlobLoader::new_from_stream_decrypt_with(&mut &*BLOB, |_, _, _, _, _| {
Err(anyhow!("should not be called"))
- })?;
+ })
+ .unwrap();
assert!(!blob.is_encrypted());
assert!(!blob.is_fallback());
assert!(!blob.is_strongbox());
@@ -1213,7 +1765,8 @@
let blob = LegacyBlobLoader::new_from_stream_decrypt_with(
&mut &*REAL_LEGACY_BLOB,
|_, _, _, _, _| Err(anyhow!("should not be called")),
- )?;
+ )
+ .unwrap();
assert!(!blob.is_encrypted());
assert!(!blob.is_fallback());
assert!(!blob.is_strongbox());
@@ -1301,62 +1854,75 @@
#[test]
fn test_legacy_blobs() -> anyhow::Result<()> {
- let temp_dir = TempDir::new("legacy_blob_test")?;
- std::fs::create_dir(&*temp_dir.build().push("user_0"))?;
+ let temp_dir = TempDir::new("legacy_blob_test").unwrap();
+ std::fs::create_dir(&*temp_dir.build().push("user_0")).unwrap();
- std::fs::write(&*temp_dir.build().push("user_0").push(".masterkey"), SUPERKEY)?;
+ std::fs::write(&*temp_dir.build().push("user_0").push(".masterkey"), SUPERKEY).unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_USRPKEY_authbound"),
USRPKEY_AUTHBOUND,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push(".10223_chr_USRPKEY_authbound"),
USRPKEY_AUTHBOUND_CHR,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_USRCERT_authbound"),
USRCERT_AUTHBOUND,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_CACERT_authbound"),
CACERT_AUTHBOUND,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_USRPKEY_non_authbound"),
USRPKEY_NON_AUTHBOUND,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push(".10223_chr_USRPKEY_non_authbound"),
USRPKEY_NON_AUTHBOUND_CHR,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_USRCERT_non_authbound"),
USRCERT_NON_AUTHBOUND,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_CACERT_non_authbound"),
CACERT_NON_AUTHBOUND,
- )?;
+ )
+ .unwrap();
- let key_manager: SuperKeyManager = Default::default();
- let mut db = crate::database::KeystoreDB::new(temp_dir.path(), None)?;
let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
- assert_eq!(
- legacy_blob_loader
- .load_by_uid_alias(10223, "authbound", Some(&key_manager))
- .unwrap_err()
- .root_cause()
- .downcast_ref::<error::Error>(),
- Some(&error::Error::Rc(ResponseCode::LOCKED))
- );
-
- key_manager.unlock_user_key(&mut db, 0, &(PASSWORD.into()), &legacy_blob_loader)?;
+ if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain)) =
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &None)?
+ {
+ assert_eq!(flags, 4);
+ assert_eq!(
+ value,
+ BlobValue::Encrypted {
+ data: USRPKEY_AUTHBOUND_ENC_PAYLOAD.to_vec(),
+ iv: USRPKEY_AUTHBOUND_IV.to_vec(),
+ tag: USRPKEY_AUTHBOUND_TAG.to_vec()
+ }
+ );
+ assert_eq!(&cert[..], LOADED_CERT_AUTHBOUND);
+ assert_eq!(&chain[..], LOADED_CACERT_AUTHBOUND);
+ } else {
+ panic!("");
+ }
if let (Some((Blob { flags, value: _ }, _params)), Some(cert), Some(chain)) =
- legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &None)?
{
assert_eq!(flags, 4);
//assert_eq!(value, BlobValue::Encrypted(..));
@@ -1366,7 +1932,7 @@
panic!("");
}
if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain)) =
- legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", Some(&key_manager))?
+ legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", &None)?
{
assert_eq!(flags, 0);
assert_eq!(value, BlobValue::Decrypted(LOADED_USRPKEY_NON_AUTHBOUND.try_into()?));
@@ -1383,11 +1949,11 @@
assert_eq!(
(None, None, None),
- legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &None)?
);
assert_eq!(
(None, None, None),
- legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", Some(&key_manager))?
+ legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", &None)?
);
// The database should not be empty due to the super key.
@@ -1406,9 +1972,319 @@
Ok(())
}
+ struct TestKey(ZVec);
+
+ impl crate::utils::AesGcmKey for TestKey {
+ fn key(&self) -> &[u8] {
+ &self.0
+ }
+ }
+
+ impl Deref for TestKey {
+ type Target = [u8];
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+ }
+
+ #[test]
+ fn test_with_encrypted_characteristics() -> anyhow::Result<()> {
+ let temp_dir = TempDir::new("test_with_encrypted_characteristics").unwrap();
+ std::fs::create_dir(&*temp_dir.build().push("user_0")).unwrap();
+
+ let pw: Password = PASSWORD.into();
+ let pw_key = TestKey(pw.derive_key(Some(SUPERKEY_SALT), 32).unwrap());
+ let super_key =
+ Arc::new(TestKey(pw_key.decrypt(SUPERKEY_PAYLOAD, SUPERKEY_IV, SUPERKEY_TAG).unwrap()));
+
+ std::fs::write(&*temp_dir.build().push("user_0").push(".masterkey"), SUPERKEY).unwrap();
+
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push("10223_USRPKEY_authbound"),
+ USRPKEY_AUTHBOUND,
+ )
+ .unwrap();
+ make_encrypted_characteristics_file(
+ &*temp_dir.build().push("user_0").push(".10223_chr_USRPKEY_authbound"),
+ &super_key,
+ KEY_PARAMETERS,
+ )
+ .unwrap();
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push("10223_USRCERT_authbound"),
+ USRCERT_AUTHBOUND,
+ )
+ .unwrap();
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push("10223_CACERT_authbound"),
+ CACERT_AUTHBOUND,
+ )
+ .unwrap();
+
+ let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
+
+ assert_eq!(
+ legacy_blob_loader
+ .load_by_uid_alias(10223, "authbound", &None)
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<Error>(),
+ Some(&Error::LockedComponent)
+ );
+
+ assert_eq!(
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &Some(super_key)).unwrap(),
+ (
+ Some((
+ Blob {
+ flags: 4,
+ value: BlobValue::Encrypted {
+ data: USRPKEY_AUTHBOUND_ENC_PAYLOAD.to_vec(),
+ iv: USRPKEY_AUTHBOUND_IV.to_vec(),
+ tag: USRPKEY_AUTHBOUND_TAG.to_vec()
+ }
+ },
+ structured_test_params()
+ )),
+ Some(LOADED_CERT_AUTHBOUND.to_vec()),
+ Some(LOADED_CACERT_AUTHBOUND.to_vec())
+ )
+ );
+
+ legacy_blob_loader.remove_keystore_entry(10223, "authbound").expect("This should succeed.");
+
+ assert_eq!(
+ (None, None, None),
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &None).unwrap()
+ );
+
+ // The database should not be empty due to the super key.
+ assert!(!legacy_blob_loader.is_empty().unwrap());
+ assert!(!legacy_blob_loader.is_empty_user(0).unwrap());
+
+ // The database should be considered empty for user 1.
+ assert!(legacy_blob_loader.is_empty_user(1).unwrap());
+
+ legacy_blob_loader.remove_super_key(0);
+
+ // Now it should be empty.
+ assert!(legacy_blob_loader.is_empty_user(0).unwrap());
+ assert!(legacy_blob_loader.is_empty().unwrap());
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_with_encrypted_certificates() -> anyhow::Result<()> {
+ let temp_dir = TempDir::new("test_with_encrypted_certificates").unwrap();
+ std::fs::create_dir(&*temp_dir.build().push("user_0")).unwrap();
+
+ let pw: Password = PASSWORD.into();
+ let pw_key = TestKey(pw.derive_key(Some(SUPERKEY_SALT), 32).unwrap());
+ let super_key =
+ Arc::new(TestKey(pw_key.decrypt(SUPERKEY_PAYLOAD, SUPERKEY_IV, SUPERKEY_TAG).unwrap()));
+
+ std::fs::write(&*temp_dir.build().push("user_0").push(".masterkey"), SUPERKEY).unwrap();
+
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push("10223_USRPKEY_authbound"),
+ USRPKEY_AUTHBOUND,
+ )
+ .unwrap();
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push(".10223_chr_USRPKEY_authbound"),
+ USRPKEY_AUTHBOUND_CHR,
+ )
+ .unwrap();
+ make_encrypted_usr_cert_file(
+ &*temp_dir.build().push("user_0").push("10223_USRCERT_authbound"),
+ &super_key,
+ LOADED_CERT_AUTHBOUND,
+ )
+ .unwrap();
+ make_encrypted_ca_cert_file(
+ &*temp_dir.build().push("user_0").push("10223_CACERT_authbound"),
+ &super_key,
+ LOADED_CACERT_AUTHBOUND,
+ )
+ .unwrap();
+
+ let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
+
+ assert_eq!(
+ legacy_blob_loader
+ .load_by_uid_alias(10223, "authbound", &None)
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<Error>(),
+ Some(&Error::LockedComponent)
+ );
+
+ assert_eq!(
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &Some(super_key)).unwrap(),
+ (
+ Some((
+ Blob {
+ flags: 4,
+ value: BlobValue::Encrypted {
+ data: USRPKEY_AUTHBOUND_ENC_PAYLOAD.to_vec(),
+ iv: USRPKEY_AUTHBOUND_IV.to_vec(),
+ tag: USRPKEY_AUTHBOUND_TAG.to_vec()
+ }
+ },
+ structured_test_params_cache()
+ )),
+ Some(LOADED_CERT_AUTHBOUND.to_vec()),
+ Some(LOADED_CACERT_AUTHBOUND.to_vec())
+ )
+ );
+
+ legacy_blob_loader.remove_keystore_entry(10223, "authbound").expect("This should succeed.");
+
+ assert_eq!(
+ (None, None, None),
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &None).unwrap()
+ );
+
+ // The database should not be empty due to the super key.
+ assert!(!legacy_blob_loader.is_empty().unwrap());
+ assert!(!legacy_blob_loader.is_empty_user(0).unwrap());
+
+ // The database should be considered empty for user 1.
+ assert!(legacy_blob_loader.is_empty_user(1).unwrap());
+
+ legacy_blob_loader.remove_super_key(0);
+
+ // Now it should be empty.
+ assert!(legacy_blob_loader.is_empty_user(0).unwrap());
+ assert!(legacy_blob_loader.is_empty().unwrap());
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_in_place_key_migration() -> anyhow::Result<()> {
+ let temp_dir = TempDir::new("test_in_place_key_migration").unwrap();
+ std::fs::create_dir(&*temp_dir.build().push("user_0")).unwrap();
+
+ let pw: Password = PASSWORD.into();
+ let pw_key = TestKey(pw.derive_key(Some(SUPERKEY_SALT), 32).unwrap());
+ let super_key =
+ Arc::new(TestKey(pw_key.decrypt(SUPERKEY_PAYLOAD, SUPERKEY_IV, SUPERKEY_TAG).unwrap()));
+
+ std::fs::write(&*temp_dir.build().push("user_0").push(".masterkey"), SUPERKEY).unwrap();
+
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push("10223_USRPKEY_authbound"),
+ USRPKEY_AUTHBOUND,
+ )
+ .unwrap();
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push(".10223_chr_USRPKEY_authbound"),
+ USRPKEY_AUTHBOUND_CHR,
+ )
+ .unwrap();
+ make_encrypted_usr_cert_file(
+ &*temp_dir.build().push("user_0").push("10223_USRCERT_authbound"),
+ &super_key,
+ LOADED_CERT_AUTHBOUND,
+ )
+ .unwrap();
+ make_encrypted_ca_cert_file(
+ &*temp_dir.build().push("user_0").push("10223_CACERT_authbound"),
+ &super_key,
+ LOADED_CACERT_AUTHBOUND,
+ )
+ .unwrap();
+
+ let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
+
+ assert_eq!(
+ legacy_blob_loader
+ .load_by_uid_alias(10223, "authbound", &None)
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<Error>(),
+ Some(&Error::LockedComponent)
+ );
+
+ let super_key: Option<Arc<dyn AesGcm>> = Some(super_key);
+
+ assert_eq!(
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &super_key).unwrap(),
+ (
+ Some((
+ Blob {
+ flags: 4,
+ value: BlobValue::Encrypted {
+ data: USRPKEY_AUTHBOUND_ENC_PAYLOAD.to_vec(),
+ iv: USRPKEY_AUTHBOUND_IV.to_vec(),
+ tag: USRPKEY_AUTHBOUND_TAG.to_vec()
+ }
+ },
+ structured_test_params_cache()
+ )),
+ Some(LOADED_CERT_AUTHBOUND.to_vec()),
+ Some(LOADED_CACERT_AUTHBOUND.to_vec())
+ )
+ );
+
+ legacy_blob_loader.move_keystore_entry(10223, 10224, "authbound", "boundauth").unwrap();
+
+ assert_eq!(
+ legacy_blob_loader
+ .load_by_uid_alias(10224, "boundauth", &None)
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<Error>(),
+ Some(&Error::LockedComponent)
+ );
+
+ assert_eq!(
+ legacy_blob_loader.load_by_uid_alias(10224, "boundauth", &super_key).unwrap(),
+ (
+ Some((
+ Blob {
+ flags: 4,
+ value: BlobValue::Encrypted {
+ data: USRPKEY_AUTHBOUND_ENC_PAYLOAD.to_vec(),
+ iv: USRPKEY_AUTHBOUND_IV.to_vec(),
+ tag: USRPKEY_AUTHBOUND_TAG.to_vec()
+ }
+ },
+ structured_test_params_cache()
+ )),
+ Some(LOADED_CERT_AUTHBOUND.to_vec()),
+ Some(LOADED_CACERT_AUTHBOUND.to_vec())
+ )
+ );
+
+ legacy_blob_loader.remove_keystore_entry(10224, "boundauth").expect("This should succeed.");
+
+ assert_eq!(
+ (None, None, None),
+ legacy_blob_loader.load_by_uid_alias(10224, "boundauth", &None).unwrap()
+ );
+
+ // The database should not be empty due to the super key.
+ assert!(!legacy_blob_loader.is_empty().unwrap());
+ assert!(!legacy_blob_loader.is_empty_user(0).unwrap());
+
+ // The database should be considered empty for user 1.
+ assert!(legacy_blob_loader.is_empty_user(1).unwrap());
+
+ legacy_blob_loader.remove_super_key(0);
+
+ // Now it should be empty.
+ assert!(legacy_blob_loader.is_empty_user(0).unwrap());
+ assert!(legacy_blob_loader.is_empty().unwrap());
+
+ Ok(())
+ }
+
#[test]
fn list_non_existing_user() -> Result<()> {
- let temp_dir = TempDir::new("list_non_existing_user")?;
+ let temp_dir = TempDir::new("list_non_existing_user").unwrap();
let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
assert!(legacy_blob_loader.list_user(20)?.is_empty());
@@ -1418,11 +2294,66 @@
#[test]
fn list_legacy_keystore_entries_on_non_existing_user() -> Result<()> {
- let temp_dir = TempDir::new("list_legacy_keystore_entries_on_non_existing_user")?;
+ let temp_dir = TempDir::new("list_legacy_keystore_entries_on_non_existing_user").unwrap();
let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
assert!(legacy_blob_loader.list_legacy_keystore_entries_for_user(20)?.is_empty());
Ok(())
}
+
+ #[test]
+ fn test_move_keystore_entry() {
+ let temp_dir = TempDir::new("test_move_keystore_entry").unwrap();
+ std::fs::create_dir(&*temp_dir.build().push("user_0")).unwrap();
+
+ const SOME_CONTENT: &[u8] = b"some content";
+ const ANOTHER_CONTENT: &[u8] = b"another content";
+ const SOME_FILENAME: &str = "some_file";
+ const ANOTHER_FILENAME: &str = "another_file";
+
+ std::fs::write(&*temp_dir.build().push("user_0").push(SOME_FILENAME), SOME_CONTENT)
+ .unwrap();
+
+ std::fs::write(&*temp_dir.build().push("user_0").push(ANOTHER_FILENAME), ANOTHER_CONTENT)
+ .unwrap();
+
+ // Non existent source id silently ignored.
+ assert!(LegacyBlobLoader::move_keystore_file_if_exists(
+ 1,
+ 2,
+ "non_existent",
+ ANOTHER_FILENAME,
+ "ignored",
+ |_, alias, _| temp_dir.build().push("user_0").push(alias).to_path_buf()
+ )
+ .is_ok());
+
+ // Content of another_file has not changed.
+ let another_content =
+ std::fs::read(&*temp_dir.build().push("user_0").push(ANOTHER_FILENAME)).unwrap();
+ assert_eq!(&another_content, ANOTHER_CONTENT);
+
+ // Check that some_file still exists.
+ assert!(temp_dir.build().push("user_0").push(SOME_FILENAME).exists());
+ // Existing target files are silently overwritten.
+
+ assert!(LegacyBlobLoader::move_keystore_file_if_exists(
+ 1,
+ 2,
+ SOME_FILENAME,
+ ANOTHER_FILENAME,
+ "ignored",
+ |_, alias, _| temp_dir.build().push("user_0").push(alias).to_path_buf()
+ )
+ .is_ok());
+
+ // Content of another_file is now "some content".
+ let another_content =
+ std::fs::read(&*temp_dir.build().push("user_0").push(ANOTHER_FILENAME)).unwrap();
+ assert_eq!(&another_content, SOME_CONTENT);
+
+ // Check that some_file no longer exists.
+ assert!(!temp_dir.build().push("user_0").push(SOME_FILENAME).exists());
+ }
}
diff --git a/keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs b/keystore2/src/legacy_blob/test_utils/legacy_blob_test_vectors.rs
similarity index 91%
rename from keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs
rename to keystore2/src/legacy_blob/test_utils/legacy_blob_test_vectors.rs
index 14bd40c..3eecee0 100644
--- a/keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs
+++ b/keystore2/src/legacy_blob/test_utils/legacy_blob_test_vectors.rs
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use crate::key_parameter::{KeyParameter, KeyParameterValue};
+use crate::legacy_blob::LegacyKeyCharacteristics;
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ Algorithm::Algorithm, Digest::Digest, EcCurve::EcCurve,
+ HardwareAuthenticatorType::HardwareAuthenticatorType, KeyOrigin::KeyOrigin,
+ KeyPurpose::KeyPurpose, SecurityLevel::SecurityLevel,
+};
+
+/// Holds Blob structure.
pub static BLOB: &[u8] = &[
3, // version
1, // type
@@ -22,6 +31,109 @@
0, 0, 0, 4, // length in big endian
0xde, 0xed, 0xbe, 0xef, // payload
];
+
+/// Creates LegacyKeyCharacteristics with security level KEYSTORE.
+pub fn structured_test_params() -> LegacyKeyCharacteristics {
+ LegacyKeyCharacteristics::File(vec![
+ KeyParameter::new(KeyParameterValue::KeyPurpose(KeyPurpose::SIGN), SecurityLevel::KEYSTORE),
+ KeyParameter::new(
+ KeyParameterValue::KeyPurpose(KeyPurpose::VERIFY),
+ SecurityLevel::KEYSTORE,
+ ),
+ KeyParameter::new(KeyParameterValue::Digest(Digest::SHA_2_256), SecurityLevel::KEYSTORE),
+ KeyParameter::new(
+ KeyParameterValue::UserSecureID(2100322049669824240),
+ SecurityLevel::KEYSTORE,
+ ),
+ KeyParameter::new(KeyParameterValue::Algorithm(Algorithm::EC), SecurityLevel::KEYSTORE),
+ KeyParameter::new(KeyParameterValue::KeySize(256), SecurityLevel::KEYSTORE),
+ KeyParameter::new(KeyParameterValue::EcCurve(EcCurve::P_256), SecurityLevel::KEYSTORE),
+ KeyParameter::new(
+ KeyParameterValue::HardwareAuthenticatorType(HardwareAuthenticatorType::FINGERPRINT),
+ SecurityLevel::KEYSTORE,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::KeyOrigin(KeyOrigin::GENERATED),
+ SecurityLevel::KEYSTORE,
+ ),
+ KeyParameter::new(KeyParameterValue::OSVersion(110000), SecurityLevel::KEYSTORE),
+ KeyParameter::new(KeyParameterValue::OSPatchLevel(202101), SecurityLevel::KEYSTORE),
+ KeyParameter::new(KeyParameterValue::BootPatchLevel(20210105), SecurityLevel::KEYSTORE),
+ KeyParameter::new(KeyParameterValue::VendorPatchLevel(20210105), SecurityLevel::KEYSTORE),
+ ])
+}
+
+/// Creates LegacyKeyCharacteristics with security level TRUSTED_ENVIRONMENT.
+pub fn structured_test_params_cache() -> LegacyKeyCharacteristics {
+ LegacyKeyCharacteristics::Cache(vec![
+ KeyParameter::new(
+ KeyParameterValue::KeyPurpose(KeyPurpose::SIGN),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::KeyPurpose(KeyPurpose::VERIFY),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::Digest(Digest::SHA_2_256),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::UserSecureID(2100322049669824240),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::Algorithm(Algorithm::EC),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(KeyParameterValue::KeySize(256), SecurityLevel::TRUSTED_ENVIRONMENT),
+ KeyParameter::new(
+ KeyParameterValue::EcCurve(EcCurve::P_256),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::HardwareAuthenticatorType(HardwareAuthenticatorType::FINGERPRINT),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::KeyOrigin(KeyOrigin::GENERATED),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(KeyParameterValue::OSVersion(110000), SecurityLevel::TRUSTED_ENVIRONMENT),
+ KeyParameter::new(
+ KeyParameterValue::OSPatchLevel(202101),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::BootPatchLevel(20210105),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::VendorPatchLevel(20210105),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::CreationDateTime(1607149002000),
+ SecurityLevel::KEYSTORE,
+ ),
+ KeyParameter::new(KeyParameterValue::UserID(0), SecurityLevel::KEYSTORE),
+ ])
+}
+
+/// One encoded list of key parameters.
+pub static KEY_PARAMETERS: &[u8] = &[
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x20,
+ 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x20, 0x03, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x20,
+ 0x04, 0x00, 0x00, 0x00, 0xf6, 0x01, 0x00, 0xa0, 0xf0, 0x7e, 0x7d, 0xb4, 0xc6, 0xd7, 0x25, 0x1d,
+ 0x02, 0x00, 0x00, 0x10, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x30, 0x00, 0x01, 0x00, 0x00,
+ 0x0a, 0x00, 0x00, 0x10, 0x01, 0x00, 0x00, 0x00, 0x2d, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,
+ 0xf8, 0x01, 0x00, 0x10, 0x02, 0x00, 0x00, 0x00, 0xbe, 0x02, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,
+ 0xc1, 0x02, 0x00, 0x30, 0xb0, 0xad, 0x01, 0x00, 0xc2, 0x02, 0x00, 0x30, 0x75, 0x15, 0x03, 0x00,
+ 0xcf, 0x02, 0x00, 0x30, 0xb9, 0x61, 0x34, 0x01, 0xce, 0x02, 0x00, 0x30, 0xb9, 0x61, 0x34, 0x01,
+ 0x30, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,
+];
+
+/// Real legacy blob.
pub static REAL_LEGACY_BLOB: &[u8] = &[
0x03, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -53,6 +165,7 @@
0xda, 0x40, 0x2b, 0x75, 0xd0, 0xd2, 0x81, 0x7f, 0xe2, 0x2b, 0xef, 0x64,
];
+/// Real legacy blob payload.
pub static REAL_LEGACY_BLOB_PAYLOAD: &[u8] = &[
0x6c, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x25, 0x00, 0x0b, 0x00, 0x06, 0x00, 0x72, 0x00, 0x00,
0x00, 0x06, 0x00, 0x80, 0x00, 0x43, 0x00, 0x20, 0x85, 0x42, 0x9e, 0xe9, 0x34, 0x85, 0x2a, 0x00,
@@ -82,11 +195,13 @@
0xe2, 0x2b, 0xef, 0x64,
];
+/// AES key blob.
pub static AES_KEY: &[u8] = &[
0x48, 0xe4, 0xb5, 0xff, 0xcd, 0x9c, 0x41, 0x1e, 0x20, 0x41, 0xf2, 0x65, 0xa0, 0x4f, 0xf6, 0x57,
0xc6, 0x58, 0xca, 0xbf, 0x28, 0xa3, 0x01, 0x98, 0x01, 0x76, 0x10, 0xc0, 0x30, 0x4e, 0x35, 0x6e,
];
+/// AES-GCM encrypted blob.
pub static AES_GCM_ENCRYPTED_BLOB: &[u8] = &[
0x03, 0x04, 0x04, 0x00, 0xbd, 0xdb, 0x8d, 0x69, 0x72, 0x56, 0xf0, 0xf5, 0xa4, 0x02, 0x88, 0x7f,
0x00, 0x00, 0x00, 0x00, 0x50, 0xd9, 0x97, 0x95, 0x37, 0x6e, 0x28, 0x6a, 0x28, 0x9d, 0x51, 0xb9,
@@ -119,6 +234,7 @@
0x2e, 0x0c, 0xc7, 0xbf, 0x29, 0x1e, 0x31, 0xdc, 0x0e, 0x85, 0x96, 0x7b,
];
+/// Decrypted payload.
pub static DECRYPTED_PAYLOAD: &[u8] = &[
0x7c, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x25, 0x00, 0x0b, 0x00, 0x06, 0x00, 0x72, 0x00, 0x00,
0x00, 0x06, 0x00, 0x80, 0x00, 0x43, 0x00, 0x20, 0xa4, 0xee, 0xdc, 0x1f, 0x9e, 0xba, 0x42, 0xd6,
@@ -149,6 +265,7 @@
0xf6, 0x0b, 0x81, 0x07,
];
+/// Password blob.
pub static PASSWORD: &[u8] = &[
0x42, 0x39, 0x30, 0x37, 0x44, 0x37, 0x32, 0x37, 0x39, 0x39, 0x43, 0x42, 0x39, 0x41, 0x42, 0x30,
0x34, 0x31, 0x30, 0x38, 0x46, 0x44, 0x33, 0x45, 0x39, 0x42, 0x32, 0x38, 0x36, 0x35, 0x41, 0x36,
@@ -156,6 +273,7 @@
0x32, 0x45, 0x31, 0x35, 0x43, 0x43, 0x46, 0x32, 0x39, 0x36, 0x33, 0x34, 0x31, 0x32, 0x41, 0x39,
];
+/// Super key blob.
pub static SUPERKEY: &[u8] = &[
0x03, 0x07, 0x01, 0x10, 0x9a, 0x81, 0x56, 0x7d, 0xf5, 0x86, 0x7c, 0x62, 0xd7, 0xf9, 0x26, 0x06,
0x00, 0x00, 0x00, 0x00, 0xde, 0x2a, 0xcb, 0xac, 0x98, 0x57, 0x2b, 0xe5, 0x57, 0x18, 0x78, 0x57,
@@ -164,6 +282,29 @@
0x76, 0x04, 0x2a, 0x48, 0xd1, 0xa7, 0x59, 0xd1, 0x04, 0x5b, 0xb4, 0x8a, 0x09, 0x22, 0x13, 0x0c,
0x94, 0xb6, 0x67, 0x7b, 0x39, 0x85, 0x28, 0x11,
];
+
+/// Super key IV.
+pub static SUPERKEY_IV: &[u8] = &[
+ 0x9a, 0x81, 0x56, 0x7d, 0xf5, 0x86, 0x7c, 0x62, 0xd7, 0xf9, 0x26, 0x06, 0x00, 0x00, 0x00, 0x00,
+];
+
+/// Super key tag.
+pub static SUPERKEY_TAG: &[u8] = &[
+ 0xde, 0x2a, 0xcb, 0xac, 0x98, 0x57, 0x2b, 0xe5, 0x57, 0x18, 0x78, 0x57, 0x6e, 0x10, 0x09, 0x84,
+];
+
+/// Super key salt.
+pub static SUPERKEY_SALT: &[u8] = &[
+ 0x04, 0x5b, 0xb4, 0x8a, 0x09, 0x22, 0x13, 0x0c, 0x94, 0xb6, 0x67, 0x7b, 0x39, 0x85, 0x28, 0x11,
+];
+
+/// Super key payload.
+pub static SUPERKEY_PAYLOAD: &[u8] = &[
+ 0xac, 0x6d, 0x13, 0xe6, 0xad, 0x2c, 0x89, 0x53, 0x1a, 0x99, 0xa5, 0x6c, 0x88, 0xe9, 0xeb, 0x5c,
+ 0xef, 0x68, 0x5e, 0x5b, 0x53, 0xa8, 0xe7, 0xa2, 0x76, 0x04, 0x2a, 0x48, 0xd1, 0xa7, 0x59, 0xd1,
+];
+
+/// user key blob.
pub static USRPKEY_AUTHBOUND: &[u8] = &[
0x03, 0x04, 0x04, 0x00, 0x1c, 0x34, 0x87, 0x6f, 0xc8, 0x35, 0x0d, 0x34, 0x88, 0x59, 0xbc, 0xf5,
0x00, 0x00, 0x00, 0x00, 0x62, 0xe3, 0x38, 0x2d, 0xd0, 0x58, 0x40, 0xc1, 0xb0, 0xf2, 0x4a, 0xdd,
@@ -203,6 +344,56 @@
0xaf, 0x17, 0x2f, 0x21, 0x07, 0xea, 0x61, 0xff, 0x73, 0x08, 0x50, 0xb2, 0x19, 0xe8, 0x23, 0x1b,
0x83, 0x42, 0xdd, 0x4e, 0x6d,
];
+
+/// Authbound IV.
+pub static USRPKEY_AUTHBOUND_IV: &[u8] = &[
+ 0x1c, 0x34, 0x87, 0x6f, 0xc8, 0x35, 0x0d, 0x34, 0x88, 0x59, 0xbc, 0xf5, 0x00, 0x00, 0x00, 0x00,
+];
+
+/// Authbond IV Tag.
+pub static USRPKEY_AUTHBOUND_TAG: &[u8] = &[
+ 0x62, 0xe3, 0x38, 0x2d, 0xd0, 0x58, 0x40, 0xc1, 0xb0, 0xf2, 0x4a, 0xdd, 0xf7, 0x81, 0x67, 0x0b,
+];
+
+/// Encrypted use key payload.
+pub static USRPKEY_AUTHBOUND_ENC_PAYLOAD: &[u8] = &[
+ 0x05, 0xb2, 0x5a, 0x1d, 0x1b, 0x25, 0x19, 0x48, 0xbf, 0x76, 0x0b, 0x37, 0x8c, 0x60, 0x52, 0xea,
+ 0x30, 0x2a, 0x2c, 0x89, 0x99, 0x95, 0x57, 0x5c, 0xec, 0x62, 0x3c, 0x08, 0x1a, 0xc6, 0x65, 0xf9,
+ 0xad, 0x24, 0x99, 0xf0, 0x5c, 0x44, 0xa0, 0xea, 0x9a, 0x60, 0xa2, 0xef, 0xf5, 0x27, 0x50, 0xba,
+ 0x9c, 0xef, 0xa6, 0x08, 0x88, 0x4b, 0x0f, 0xfe, 0x5d, 0x41, 0xac, 0xba, 0xef, 0x9d, 0xa4, 0xb7,
+ 0x72, 0xd3, 0xc8, 0x11, 0x92, 0x06, 0xf6, 0x26, 0xdf, 0x90, 0xe2, 0x66, 0x89, 0xf3, 0x85, 0x16,
+ 0x4a, 0xdf, 0x7f, 0xac, 0x94, 0x4a, 0x1c, 0xce, 0x18, 0xee, 0xf4, 0x1f, 0x8e, 0xd6, 0xaf, 0xfd,
+ 0x1d, 0xe5, 0x80, 0x4a, 0x6b, 0xbf, 0x91, 0xe2, 0x36, 0x1d, 0xb3, 0x53, 0x12, 0xfd, 0xc9, 0x0b,
+ 0xa6, 0x69, 0x00, 0x45, 0xcb, 0x4c, 0x40, 0x6b, 0x70, 0xcb, 0xd2, 0xa0, 0x44, 0x0b, 0x4b, 0xec,
+ 0xd6, 0x4f, 0x6f, 0x64, 0x37, 0xa7, 0xc7, 0x25, 0x54, 0xf4, 0xac, 0x6b, 0x34, 0x53, 0xea, 0x4e,
+ 0x56, 0x49, 0xba, 0xf4, 0x1e, 0xc6, 0x52, 0x8f, 0xf4, 0x85, 0xe7, 0xb5, 0xaf, 0x49, 0x68, 0xb3,
+ 0xb8, 0x7d, 0x63, 0xfc, 0x6e, 0x83, 0xa0, 0xf3, 0x91, 0x04, 0x80, 0xfd, 0xc5, 0x54, 0x7e, 0x92,
+ 0x1a, 0x87, 0x2c, 0x6e, 0xa6, 0x29, 0xb9, 0x1e, 0x3f, 0xef, 0x30, 0x12, 0x7b, 0x2f, 0xa2, 0x16,
+ 0x61, 0x8a, 0xcf, 0x14, 0x2d, 0x62, 0x98, 0x15, 0xae, 0x3b, 0xe6, 0x08, 0x1e, 0xb1, 0xf1, 0x21,
+ 0xb0, 0x50, 0xc0, 0x4b, 0x81, 0x71, 0x29, 0xe7, 0x86, 0xbf, 0x29, 0xe1, 0xeb, 0xfe, 0xbc, 0x11,
+ 0x3c, 0xc6, 0x15, 0x47, 0x9b, 0x41, 0x84, 0x61, 0x33, 0xbf, 0xca, 0xfe, 0x24, 0x92, 0x9e, 0x70,
+ 0x26, 0x36, 0x46, 0xca, 0xfe, 0xd3, 0x5a, 0x1d, 0x9e, 0x30, 0x19, 0xbd, 0x26, 0x49, 0xb4, 0x90,
+ 0x0c, 0x8d, 0xa2, 0x28, 0xa6, 0x24, 0x62, 0x6b, 0xe2, 0xfa, 0xe0, 0x53, 0xaa, 0x01, 0xeb, 0xaa,
+ 0x41, 0x2b, 0xcb, 0xb1, 0x08, 0x66, 0x9d, 0x21, 0x2d, 0x2a, 0x47, 0x44, 0xee, 0xd5, 0x06, 0xe3,
+ 0x4a, 0xb9, 0x3f, 0xcd, 0x78, 0x67, 0x89, 0x5b, 0xf7, 0x51, 0xc0, 0xc4, 0xa9, 0x68, 0xee, 0x44,
+ 0x9c, 0x47, 0xa4, 0xbd, 0x6f, 0x7b, 0xdd, 0x64, 0xa8, 0xc7, 0x1e, 0x77, 0x1d, 0x68, 0x87, 0xaa,
+ 0xae, 0x3c, 0xfc, 0x58, 0xb6, 0x3c, 0xcf, 0x58, 0xd0, 0x10, 0xaa, 0xef, 0xf0, 0x98, 0x67, 0x14,
+ 0x29, 0x4d, 0x40, 0x8b, 0xe5, 0xb1, 0xdf, 0x7f, 0x40, 0xb1, 0xd8, 0xea, 0x6c, 0xa8, 0xf7, 0x64,
+ 0xed, 0x02, 0x8d, 0xe7, 0x93, 0xfe, 0x79, 0x9a, 0x88, 0x62, 0x4f, 0xd0, 0x8a, 0x80, 0x36, 0x42,
+ 0x0a, 0xf1, 0xa2, 0x0e, 0x30, 0x39, 0xbd, 0x26, 0x1d, 0xd4, 0xf1, 0xc8, 0x6e, 0xdd, 0xc5, 0x41,
+ 0x29, 0xd8, 0xc1, 0x9e, 0x24, 0xf0, 0x25, 0x07, 0x05, 0x06, 0xc5, 0x08, 0xe3, 0x02, 0x2b, 0xe1,
+ 0x40, 0xc5, 0x67, 0xd2, 0x82, 0x96, 0x20, 0x80, 0xcf, 0x87, 0x3a, 0xc6, 0xb0, 0xbe, 0xcc, 0xbb,
+ 0x5a, 0x01, 0xab, 0xdd, 0x00, 0xc7, 0x0e, 0x7b, 0x02, 0x35, 0x27, 0xf4, 0x70, 0xfe, 0xd1, 0x19,
+ 0x6a, 0x64, 0x23, 0x9d, 0xba, 0xe9, 0x1d, 0x76, 0x90, 0xfe, 0x7f, 0xd6, 0xb5, 0xa0, 0xe7, 0xb9,
+ 0xf3, 0x56, 0x82, 0x8e, 0x57, 0x35, 0xf2, 0x69, 0xce, 0x52, 0xac, 0xc2, 0xf6, 0x5e, 0xb6, 0x54,
+ 0x95, 0x83, 0x3b, 0x9f, 0x48, 0xbb, 0x04, 0x06, 0xac, 0x55, 0xa9, 0xb9, 0xa3, 0xe7, 0x89, 0x6e,
+ 0x5c, 0x3a, 0x08, 0x67, 0x00, 0x8f, 0x1e, 0x26, 0x1b, 0x4d, 0x8a, 0xa6, 0x17, 0xa0, 0xa6, 0x18,
+ 0xe6, 0x31, 0x43, 0x15, 0xb8, 0x7f, 0x9e, 0xf5, 0x78, 0x58, 0x98, 0xb1, 0x8c, 0xf5, 0x22, 0x42,
+ 0x33, 0xc0, 0x42, 0x72, 0x4f, 0xce, 0x9f, 0x31, 0xaf, 0x17, 0x2f, 0x21, 0x07, 0xea, 0x61, 0xff,
+ 0x73, 0x08, 0x50, 0xb2, 0x19, 0xe8, 0x23, 0x1b, 0x83, 0x42, 0xdd, 0x4e, 0x6d,
+];
+
+/// User key characterstics blob.
pub static USRPKEY_AUTHBOUND_CHR: &[u8] = &[
0x03, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -218,6 +409,8 @@
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xbd, 0x02, 0x00, 0x60,
0x10, 0x9d, 0x8b, 0x31, 0x76, 0x01, 0x00, 0x00, 0xf5, 0x01, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00,
];
+
+/// User certificate blob.
pub static USRCERT_AUTHBOUND: &[u8] = &[
0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -264,6 +457,8 @@
0x39, 0x58, 0xe9, 0x89, 0x1a, 0x14, 0x41, 0x8d, 0xe0, 0xdc, 0x3d, 0x88, 0xf4, 0x2c, 0x7c, 0xda,
0xa1, 0x84, 0xfa, 0x7f, 0xf9, 0x07, 0x97, 0xfb, 0xb5, 0xb7, 0x28, 0x28, 0x00, 0x7c, 0xa7,
];
+
+/// CA certificate blob.
pub static CACERT_AUTHBOUND: &[u8] = &[
0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -452,6 +647,7 @@
0xab, 0xae, 0x24, 0xe2, 0x44, 0x35, 0x16, 0x8d, 0x55, 0x3c, 0xe4,
];
+/// User non-authbond-key blob.
pub static USRPKEY_NON_AUTHBOUND: &[u8] = &[
0x03, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -491,6 +687,8 @@
0x46, 0xf0, 0xee, 0x50, 0x73, 0x6a, 0x7b, 0xa3, 0xe9, 0xb1, 0x08, 0x81, 0x00, 0xdf, 0x0e, 0xc9,
0xc3, 0x2c, 0x13, 0x64, 0xa1,
];
+
+/// User non-authbond-key characteristics blob.
pub static USRPKEY_NON_AUTHBOUND_CHR: &[u8] = &[
0x03, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -506,6 +704,7 @@
0x60, 0x60, 0x60, 0x8c, 0x31, 0x76, 0x01, 0x00, 0x00, 0xf5, 0x01, 0x00, 0x30, 0x00, 0x00, 0x00,
0x00,
];
+/// User non-authbond-key certificate blob.
pub static USRCERT_NON_AUTHBOUND: &[u8] = &[
0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -553,6 +752,7 @@
0xd8, 0xd5, 0xd1, 0x64, 0x4c, 0x05, 0xdd, 0x13, 0x0e, 0xa4, 0xf3, 0x38, 0xbf, 0x18, 0xd5,
];
+/// User non-authbond-key ca-certs blob.
pub static CACERT_NON_AUTHBOUND: &[u8] = &[
0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -741,6 +941,7 @@
0xab, 0xae, 0x24, 0xe2, 0x44, 0x35, 0x16, 0x8d, 0x55, 0x3c, 0xe4,
];
+/// User decrypted authbond-key blob.
pub static _DECRYPTED_USRPKEY_AUTHBOUND: &[u8] = &[
0x44, 0x4b, 0x4d, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
0xc6, 0x15, 0x3a, 0x08, 0x1e, 0x43, 0xba, 0x7a, 0x0f, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
@@ -778,6 +979,7 @@
0x60, 0x5e, 0xcd, 0xce, 0x3a, 0xd8, 0x09, 0xeb, 0x9d, 0x40, 0xdb, 0x58, 0x53,
];
+/// User loaded authbond certs blob.
pub static LOADED_CERT_AUTHBOUND: &[u8] = &[
0x30, 0x82, 0x02, 0x93, 0x30, 0x82, 0x02, 0x3A, 0xA0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x01, 0x01,
0x30, 0x0A, 0x06, 0x08, 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x04, 0x03, 0x02, 0x30, 0x29, 0x31, 0x19,
@@ -822,6 +1024,8 @@
0xE0, 0xDC, 0x3D, 0x88, 0xF4, 0x2C, 0x7C, 0xDA, 0xA1, 0x84, 0xFA, 0x7F, 0xF9, 0x07, 0x97, 0xFB,
0xB5, 0xB7, 0x28, 0x28, 0x00, 0x7C, 0xA7,
];
+
+/// User loaded authbond ca-certs blob.
pub static LOADED_CACERT_AUTHBOUND: &[u8] = &[
0x30, 0x82, 0x02, 0x26, 0x30, 0x82, 0x01, 0xAB, 0xA0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x0A, 0x05,
0x84, 0x20, 0x26, 0x90, 0x76, 0x23, 0x58, 0x71, 0x77, 0x30, 0x0A, 0x06, 0x08, 0x2A, 0x86, 0x48,
@@ -1008,6 +1212,7 @@
0x55, 0x3C, 0xE4,
];
+/// User loaded non-authbond user key blob.
pub static LOADED_USRPKEY_NON_AUTHBOUND: &[u8] = &[
0x44, 0x4b, 0x4d, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
0x8a, 0xc1, 0x08, 0x13, 0x7c, 0x47, 0xba, 0x09, 0x0e, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
@@ -1045,6 +1250,7 @@
0xe9, 0xb1, 0x08, 0x81, 0x00, 0xdf, 0x0e, 0xc9, 0xc3, 0x2c, 0x13, 0x64, 0xa1,
];
+/// User loaded non-authbond certificate blob.
pub static LOADED_CERT_NON_AUTHBOUND: &[u8] = &[
0x30, 0x82, 0x02, 0x93, 0x30, 0x82, 0x02, 0x39, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x01, 0x01,
0x30, 0x0a, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x03, 0x02, 0x30, 0x29, 0x31, 0x19,
@@ -1090,6 +1296,7 @@
0x0e, 0xa4, 0xf3, 0x38, 0xbf, 0x18, 0xd5,
];
+/// User loaded non-authbond ca-certificates blob.
pub static LOADED_CACERT_NON_AUTHBOUND: &[u8] = &[
0x30, 0x82, 0x02, 0x26, 0x30, 0x82, 0x01, 0xab, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x0a, 0x05,
0x84, 0x20, 0x26, 0x90, 0x76, 0x23, 0x58, 0x71, 0x77, 0x30, 0x0a, 0x06, 0x08, 0x2a, 0x86, 0x48,
diff --git a/keystore2/src/legacy_importer.rs b/keystore2/src/legacy_importer.rs
new file mode 100644
index 0000000..93e1735
--- /dev/null
+++ b/keystore2/src/legacy_importer.rs
@@ -0,0 +1,943 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module acts as a bridge between the legacy key database and the keystore2 database.
+
+use crate::database::{
+ BlobInfo, BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData,
+ KeyMetaEntry, KeyType, KeystoreDB, Uuid, KEYSTORE_UUID,
+};
+use crate::error::{map_km_error, Error};
+use crate::key_parameter::{KeyParameter, KeyParameterValue};
+use crate::legacy_blob::{self, Blob, BlobValue, LegacyKeyCharacteristics};
+use crate::super_key::USER_SUPER_KEY;
+use crate::utils::{
+ key_characteristics_to_internal, uid_to_android_user, upgrade_keyblob_if_required_with,
+ watchdog as wd, AesGcm,
+};
+use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode,
+};
+use anyhow::{Context, Result};
+use core::ops::Deref;
+use keystore2_crypto::{Password, ZVec};
+use std::collections::{HashMap, HashSet};
+use std::sync::atomic::{AtomicU8, Ordering};
+use std::sync::mpsc::channel;
+use std::sync::{Arc, Mutex};
+
+/// Represents LegacyImporter.
+pub struct LegacyImporter {
+ async_task: Arc<AsyncTask>,
+ initializer: Mutex<
+ Option<
+ Box<
+ dyn FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
+ + Send
+ + 'static,
+ >,
+ >,
+ >,
+ /// This atomic is used for cheap interior mutability. It is intended to prevent
+ /// expensive calls into the legacy importer when the legacy database is empty.
+ /// When transitioning from READY to EMPTY, spurious calls may occur for a brief period
+ /// of time. This is tolerable in favor of the common case.
+ state: AtomicU8,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+struct RecentImport {
+ uid: u32,
+ alias: String,
+}
+
+impl RecentImport {
+ fn new(uid: u32, alias: String) -> Self {
+ Self { uid, alias }
+ }
+}
+
+enum BulkDeleteRequest {
+ Uid(u32),
+ User(u32),
+}
+
+struct LegacyImporterState {
+ recently_imported: HashSet<RecentImport>,
+ recently_imported_super_key: HashSet<u32>,
+ legacy_loader: Arc<LegacyBlobLoader>,
+ sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>,
+ db: KeystoreDB,
+}
+
+impl LegacyImporter {
+ const WIFI_NAMESPACE: i64 = 102;
+ const AID_WIFI: u32 = 1010;
+
+ const STATE_UNINITIALIZED: u8 = 0;
+ const STATE_READY: u8 = 1;
+ const STATE_EMPTY: u8 = 2;
+
+ /// Constructs a new LegacyImporter using the given AsyncTask object as import
+ /// worker.
+ pub fn new(async_task: Arc<AsyncTask>) -> Self {
+ Self {
+ async_task,
+ initializer: Default::default(),
+ state: AtomicU8::new(Self::STATE_UNINITIALIZED),
+ }
+ }
+
+ /// The legacy importer must be initialized deferred, because keystore starts very early.
+ /// At this time the data partition may not be mounted. So we cannot open database connections
+ /// until we get actual key load requests. This sets the function that the legacy loader
+ /// uses to connect to the database.
+ pub fn set_init<F>(&self, f_init: F) -> Result<()>
+ where
+ F: FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
+ + Send
+ + 'static,
+ {
+ let mut initializer = self.initializer.lock().expect("Failed to lock initializer.");
+
+ // If we are not uninitialized we have no business setting the initializer.
+ if self.state.load(Ordering::Relaxed) != Self::STATE_UNINITIALIZED {
+ return Ok(());
+ }
+
+ // Only set the initializer if it hasn't been set before.
+ if initializer.is_none() {
+ *initializer = Some(Box::new(f_init))
+ }
+
+ Ok(())
+ }
+
+ /// This function is called by the import requestor to check if it is worth
+ /// making an import request. It also transitions the state from UNINITIALIZED
+ /// to READY or EMPTY on first use. The deferred initialization is necessary, because
+ /// Keystore 2.0 runs early during boot, where data may not yet be mounted.
+ /// Returns Ok(STATE_READY) if an import request is worth undertaking and
+ /// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader
+ /// was not initialized and cannot be initialized.
+ fn check_state(&self) -> Result<u8> {
+ let mut first_try = true;
+ loop {
+ match (self.state.load(Ordering::Relaxed), first_try) {
+ (Self::STATE_EMPTY, _) => {
+ return Ok(Self::STATE_EMPTY);
+ }
+ (Self::STATE_UNINITIALIZED, true) => {
+ // If we find the legacy loader uninitialized, we grab the initializer lock,
+ // check if the legacy database is empty, and if not, schedule an initialization
+ // request. Coming out of the initializer lock, the state is either EMPTY or
+ // READY.
+ let mut initializer = self.initializer.lock().unwrap();
+
+ if let Some(initializer) = initializer.take() {
+ let (db, sec_level_to_km_uuid, legacy_loader) = (initializer)();
+
+ if legacy_loader.is_empty().context(
+ "In check_state: Trying to check if the legacy database is empty.",
+ )? {
+ self.state.store(Self::STATE_EMPTY, Ordering::Relaxed);
+ return Ok(Self::STATE_EMPTY);
+ }
+
+ self.async_task.queue_hi(move |shelf| {
+ shelf.get_or_put_with(|| LegacyImporterState {
+ recently_imported: Default::default(),
+ recently_imported_super_key: Default::default(),
+ legacy_loader,
+ sec_level_to_km_uuid,
+ db,
+ });
+ });
+
+ // It is safe to set this here even though the async task may not yet have
+ // run because any thread observing this will not be able to schedule a
+ // task that can run before the initialization.
+ // Also we can only transition out of this state while having the
+ // initializer lock and having found an initializer.
+ self.state.store(Self::STATE_READY, Ordering::Relaxed);
+ return Ok(Self::STATE_READY);
+ } else {
+ // There is a chance that we just lost the race from state.load() to
+ // grabbing the initializer mutex. If that is the case the state must
+ // be EMPTY or READY after coming out of the lock. So we can give it
+ // one more try.
+ first_try = false;
+ continue;
+ }
+ }
+ (Self::STATE_UNINITIALIZED, false) => {
+ // Okay, tough luck. The legacy loader was really completely uninitialized.
+ return Err(Error::sys()).context(
+ "In check_state: Legacy loader should not be called uninitialized.",
+ );
+ }
+ (Self::STATE_READY, _) => return Ok(Self::STATE_READY),
+ (s, _) => panic!("Unknown legacy importer state. {} ", s),
+ }
+ }
+ }
+
+ /// List all aliases for uid in the legacy database.
+ pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
+ let _wp = wd::watch_millis("LegacyImporter::list_uid", 500);
+
+ let uid = match (domain, namespace) {
+ (Domain::APP, namespace) => namespace as u32,
+ (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
+ _ => return Ok(Vec::new()),
+ };
+ self.do_serialized(move |state| state.list_uid(uid)).unwrap_or_else(|| Ok(Vec::new())).map(
+ |v| {
+ v.into_iter()
+ .map(|alias| KeyDescriptor {
+ domain,
+ nspace: namespace,
+ alias: Some(alias),
+ blob: None,
+ })
+ .collect()
+ },
+ )
+ }
+
+ /// Sends the given closure to the importer thread for execution after calling check_state.
+ /// Returns None if the database was empty and the request was not executed.
+ /// Otherwise returns Some with the result produced by the import request.
+ /// The loader state may transition to STATE_EMPTY during the execution of this function.
+ fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>>
+ where
+ F: FnOnce(&mut LegacyImporterState) -> Result<T> + Send + 'static,
+ {
+ // Short circuit if the database is empty or not initialized (error case).
+ match self.check_state().context("In do_serialized: Checking state.") {
+ Ok(LegacyImporter::STATE_EMPTY) => return None,
+ Ok(LegacyImporter::STATE_READY) => {}
+ Err(e) => return Some(Err(e)),
+ Ok(s) => panic!("Unknown legacy importer state. {} ", s),
+ }
+
+ // We have established that there may be a key in the legacy database.
+ // Now we schedule an import request.
+ let (sender, receiver) = channel();
+ self.async_task.queue_hi(move |shelf| {
+ // Get the importer state from the shelf.
+ // There may not be a state. This can happen if this import request was scheduled
+ // before a previous request established that the legacy database was empty
+ // and removed the state from the shelf. Since we know now that the database
+ // is empty, we can return None here.
+ let (new_state, result) = if let Some(legacy_importer_state) =
+ shelf.get_downcast_mut::<LegacyImporterState>()
+ {
+ let result = f(legacy_importer_state);
+ (legacy_importer_state.check_empty(), Some(result))
+ } else {
+ (Self::STATE_EMPTY, None)
+ };
+
+ // If the import request determined that the database is now empty, we discard
+ // the state from the shelf to free up the resources we won't need any longer.
+ if result.is_some() && new_state == Self::STATE_EMPTY {
+ shelf.remove_downcast_ref::<LegacyImporterState>();
+ }
+
+ // Send the result to the requester.
+ if let Err(e) = sender.send((new_state, result)) {
+ log::error!("In do_serialized. Error in sending the result. {:?}", e);
+ }
+ });
+
+ let (new_state, result) = match receiver.recv() {
+ Err(e) => {
+ return Some(Err(e).context("In do_serialized. Failed to receive from the sender."))
+ }
+ Ok(r) => r,
+ };
+
+ // We can only transition to EMPTY but never back.
+ // The importer never creates any legacy blobs.
+ if new_state == Self::STATE_EMPTY {
+ self.state.store(Self::STATE_EMPTY, Ordering::Relaxed)
+ }
+
+ result
+ }
+
+ /// Runs the key_accessor function and returns its result. If it returns an error and the
+ /// root cause was KEY_NOT_FOUND, tries to import a key with the given parameters from
+ /// the legacy database to the new database and runs the key_accessor function again if
+ /// the import request was successful.
+ pub fn with_try_import<F, T>(
+ &self,
+ key: &KeyDescriptor,
+ caller_uid: u32,
+ super_key: Option<Arc<dyn AesGcm + Send + Sync>>,
+ key_accessor: F,
+ ) -> Result<T>
+ where
+ F: Fn() -> Result<T>,
+ {
+ let _wp = wd::watch_millis("LegacyImporter::with_try_import", 500);
+
+ // Access the key and return on success.
+ match key_accessor() {
+ Ok(result) => return Ok(result),
+ Err(e) => match e.root_cause().downcast_ref::<Error>() {
+ Some(&Error::Rc(ResponseCode::KEY_NOT_FOUND)) => {}
+ _ => return Err(e),
+ },
+ }
+
+ // Filter inputs. We can only load legacy app domain keys and some special rules due
+ // to which we import keys transparently to an SELINUX domain.
+ let uid = match key {
+ KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid,
+ KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => {
+ match *nspace {
+ Self::WIFI_NAMESPACE => Self::AID_WIFI,
+ _ => {
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context(format!("No legacy keys for namespace {}", nspace))
+ }
+ }
+ }
+ _ => {
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("No legacy keys for key descriptor.")
+ }
+ };
+
+ let key_clone = key.clone();
+ let result = self.do_serialized(move |importer_state| {
+ let super_key = super_key.map(|sk| -> Arc<dyn AesGcm> { sk });
+ importer_state.check_and_import(uid, key_clone, super_key)
+ });
+
+ if let Some(result) = result {
+ result?;
+ // After successful import try again.
+ key_accessor()
+ } else {
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.")
+ }
+ }
+
+ /// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error
+ /// this function makes an import request and on success retries the key_accessor.
+ pub fn with_try_import_super_key<F, T>(
+ &self,
+ user_id: u32,
+ pw: &Password,
+ mut key_accessor: F,
+ ) -> Result<Option<T>>
+ where
+ F: FnMut() -> Result<Option<T>>,
+ {
+ let _wp = wd::watch_millis("LegacyImporter::with_try_import_super_key", 500);
+
+ match key_accessor() {
+ Ok(Some(result)) => return Ok(Some(result)),
+ Ok(None) => {}
+ Err(e) => return Err(e),
+ }
+ let pw = pw.try_clone().context("In with_try_import_super_key: Cloning password.")?;
+ let result = self.do_serialized(move |importer_state| {
+ importer_state.check_and_import_super_key(user_id, &pw)
+ });
+
+ if let Some(result) = result {
+ result?;
+ // After successful import try again.
+ key_accessor()
+ } else {
+ Ok(None)
+ }
+ }
+
+ /// Deletes all keys belonging to the given namespace, importing them into the database
+ /// for subsequent garbage collection if necessary.
+ pub fn bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()> {
+ let _wp = wd::watch_millis("LegacyImporter::bulk_delete_uid", 500);
+
+ let uid = match (domain, nspace) {
+ (Domain::APP, nspace) => nspace as u32,
+ (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
+ // Nothing to do.
+ _ => return Ok(()),
+ };
+
+ let result = self.do_serialized(move |importer_state| {
+ importer_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
+ });
+
+ result.unwrap_or(Ok(()))
+ }
+
+ /// Deletes all keys belonging to the given android user, importing them into the database
+ /// for subsequent garbage collection if necessary.
+ pub fn bulk_delete_user(
+ &self,
+ user_id: u32,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ let _wp = wd::watch_millis("LegacyImporter::bulk_delete_user", 500);
+
+ let result = self.do_serialized(move |importer_state| {
+ importer_state
+ .bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
+ });
+
+ result.unwrap_or(Ok(()))
+ }
+
+ /// Queries the legacy database for the presence of a super key for the given user.
+ pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
+ let result =
+ self.do_serialized(move |importer_state| importer_state.has_super_key(user_id));
+ result.unwrap_or(Ok(false))
+ }
+}
+
+impl LegacyImporterState {
+ fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> {
+ let sec_level = if is_strongbox {
+ SecurityLevel::STRONGBOX
+ } else {
+ SecurityLevel::TRUSTED_ENVIRONMENT
+ };
+
+ self.sec_level_to_km_uuid.get(&sec_level).copied().ok_or_else(|| {
+ anyhow::anyhow!(Error::sys()).context("In get_km_uuid: No KM instance for blob.")
+ })
+ }
+
+ fn list_uid(&mut self, uid: u32) -> Result<Vec<String>> {
+ self.legacy_loader
+ .list_keystore_entries_for_uid(uid)
+ .context("In list_uid: Trying to list legacy entries.")
+ }
+
+ /// Checks if the key can potentially be unlocked. And deletes the key entry otherwise.
+ /// If the super_key has already been imported, the super key database id is returned.
+ fn get_super_key_id_check_unlockable_or_delete(
+ &mut self,
+ uid: u32,
+ alias: &str,
+ ) -> Result<i64> {
+ let user_id = uid_to_android_user(uid);
+
+ match self
+ .db
+ .load_super_key(&USER_SUPER_KEY, user_id)
+ .context("In get_super_key_id_check_unlockable_or_delete: Failed to load super key")?
+ {
+ Some((_, entry)) => Ok(entry.id()),
+ None => {
+ // This might be the first time we access the super key,
+ // and it may not have been imported. We cannot import
+ // the legacy super_key key now, because we need to reencrypt
+ // it which we cannot do if we are not unlocked, which we are
+ // not because otherwise the key would have been imported.
+ // We can check though if the key exists. If it does,
+ // we can return Locked. Otherwise, we can delete the
+ // key and return NotFound, because the key will never
+ // be unlocked again.
+ if self.legacy_loader.has_super_key(user_id) {
+ Err(Error::Rc(ResponseCode::LOCKED)).context(
+ "In get_super_key_id_check_unlockable_or_delete: \
+ Cannot import super key of this key while user is locked.",
+ )
+ } else {
+ self.legacy_loader.remove_keystore_entry(uid, alias).context(
+ "In get_super_key_id_check_unlockable_or_delete: \
+ Trying to remove obsolete key.",
+ )?;
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In get_super_key_id_check_unlockable_or_delete: Obsolete key.")
+ }
+ }
+ }
+ }
+
+ fn characteristics_file_to_cache(
+ &mut self,
+ km_blob_params: Option<(Blob, LegacyKeyCharacteristics)>,
+ super_key: &Option<Arc<dyn AesGcm>>,
+ uid: u32,
+ alias: &str,
+ ) -> Result<(Option<(Blob, Vec<KeyParameter>)>, Option<(LegacyBlob<'static>, BlobMetaData)>)>
+ {
+ let (km_blob, params) = match km_blob_params {
+ Some((km_blob, LegacyKeyCharacteristics::File(params))) => (km_blob, params),
+ Some((km_blob, LegacyKeyCharacteristics::Cache(params))) => {
+ return Ok((Some((km_blob, params)), None))
+ }
+ None => return Ok((None, None)),
+ };
+
+ let km_uuid = self
+ .get_km_uuid(km_blob.is_strongbox())
+ .context("In characteristics_file_to_cache: Trying to get KM UUID")?;
+
+ let blob = match (&km_blob.value(), super_key.as_ref()) {
+ (BlobValue::Encrypted { iv, tag, data }, Some(super_key)) => {
+ let blob = super_key
+ .decrypt(data, iv, tag)
+ .context("In characteristics_file_to_cache: Decryption failed.")?;
+ LegacyBlob::ZVec(blob)
+ }
+ (BlobValue::Encrypted { .. }, None) => {
+ return Err(Error::Rc(ResponseCode::LOCKED)).context(
+ "In characteristics_file_to_cache: Oh uh, so close. \
+ This ancient key cannot be imported unless the user is unlocked.",
+ );
+ }
+ (BlobValue::Decrypted(data), _) => LegacyBlob::Ref(data),
+ _ => {
+ return Err(Error::sys())
+ .context("In characteristics_file_to_cache: Unexpected blob type.")
+ }
+ };
+
+ let (km_params, upgraded_blob) = get_key_characteristics_without_app_data(&km_uuid, &*blob)
+ .context(
+ "In characteristics_file_to_cache: Failed to get key characteristics from device.",
+ )?;
+
+ let flags = km_blob.get_flags();
+
+ let (current_blob, superseded_blob) = if let Some(upgraded_blob) = upgraded_blob {
+ match (km_blob.take_value(), super_key.as_ref()) {
+ (BlobValue::Encrypted { iv, tag, data }, Some(super_key)) => {
+ let super_key_id =
+ self.get_super_key_id_check_unlockable_or_delete(uid, alias).context(
+ "In characteristics_file_to_cache: \
+ How is there a super key but no super key id?",
+ )?;
+
+ let mut superseded_metadata = BlobMetaData::new();
+ superseded_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
+ superseded_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
+ superseded_metadata
+ .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
+ superseded_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+ let superseded_blob = (LegacyBlob::Vec(data), superseded_metadata);
+
+ let (data, iv, tag) = super_key.encrypt(&upgraded_blob).context(
+ "In characteristics_file_to_cache: \
+ Failed to encrypt upgraded key blob.",
+ )?;
+ (
+ Blob::new(flags, BlobValue::Encrypted { data, iv, tag }),
+ Some(superseded_blob),
+ )
+ }
+ (BlobValue::Encrypted { .. }, None) => {
+ return Err(Error::sys()).context(
+ "In characteristics_file_to_cache: This should not be reachable. \
+ The blob could not have been decrypted above.",
+ );
+ }
+ (BlobValue::Decrypted(data), _) => {
+ let mut superseded_metadata = BlobMetaData::new();
+ superseded_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+ let superseded_blob = (LegacyBlob::ZVec(data), superseded_metadata);
+ (
+ Blob::new(
+ flags,
+ BlobValue::Decrypted(upgraded_blob.try_into().context(
+ "In characteristics_file_to_cache: \
+ Failed to convert upgraded blob to ZVec.",
+ )?),
+ ),
+ Some(superseded_blob),
+ )
+ }
+ _ => {
+ return Err(Error::sys()).context(
+ "In characteristics_file_to_cache: This should not be reachable. \
+ Any other variant should have resulted in a different error.",
+ )
+ }
+ }
+ } else {
+ (km_blob, None)
+ };
+
+ let params =
+ augment_legacy_characteristics_file_with_key_characteristics(km_params, params);
+ Ok((Some((current_blob, params)), superseded_blob))
+ }
+
+ /// This is a key import request that must run in the importer thread. This must
+ /// be passed to do_serialized.
+ fn check_and_import(
+ &mut self,
+ uid: u32,
+ mut key: KeyDescriptor,
+ super_key: Option<Arc<dyn AesGcm>>,
+ ) -> Result<()> {
+ let alias = key.alias.clone().ok_or_else(|| {
+ anyhow::anyhow!(Error::sys()).context(
+ "In check_and_import: Must be Some because \
+ our caller must not have called us otherwise.",
+ )
+ })?;
+
+ if self.recently_imported.contains(&RecentImport::new(uid, alias.clone())) {
+ return Ok(());
+ }
+
+ if key.domain == Domain::APP {
+ key.nspace = uid as i64;
+ }
+
+ // If the key is not found in the cache, try to load from the legacy database.
+ let (km_blob_params, user_cert, ca_cert) = self
+ .legacy_loader
+ .load_by_uid_alias(uid, &alias, &super_key)
+ .map_err(|e| {
+ if e.root_cause().downcast_ref::<legacy_blob::Error>()
+ == Some(&legacy_blob::Error::LockedComponent)
+ {
+ // There is no chance to succeed at this point. We just check if there is
+ // a super key so that this entry might be unlockable in the future.
+ // If not the entry will be deleted and KEY_NOT_FOUND is returned.
+ // If a super key id was returned we still have to return LOCKED but the key
+ // may be imported when the user unlocks the device.
+ self.get_super_key_id_check_unlockable_or_delete(uid, &alias)
+ .and_then::<i64, _>(|_| {
+ Err(Error::Rc(ResponseCode::LOCKED))
+ .context("Super key present but locked.")
+ })
+ .unwrap_err()
+ } else {
+ e
+ }
+ })
+ .context("In check_and_import: Trying to load legacy blob.")?;
+
+ let (km_blob_params, superseded_blob) = self
+ .characteristics_file_to_cache(km_blob_params, &super_key, uid, &alias)
+ .context("In check_and_import: Trying to update legacy charateristics.")?;
+
+ let result = match km_blob_params {
+ Some((km_blob, params)) => {
+ let is_strongbox = km_blob.is_strongbox();
+
+ let (blob, mut blob_metadata) = match km_blob.take_value() {
+ BlobValue::Encrypted { iv, tag, data } => {
+ // Get super key id for user id.
+ let super_key_id = self
+ .get_super_key_id_check_unlockable_or_delete(uid, &alias)
+ .context("In check_and_import: Failed to get super key id.")?;
+
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
+ blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
+ blob_metadata
+ .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
+ (LegacyBlob::Vec(data), blob_metadata)
+ }
+ BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()),
+ _ => {
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_import: Legacy key has unexpected type.")
+ }
+ };
+
+ let km_uuid = self
+ .get_km_uuid(is_strongbox)
+ .context("In check_and_import: Trying to get KM UUID")?;
+ blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+
+ let mut metadata = KeyMetaData::new();
+ let creation_date = DateTime::now()
+ .context("In check_and_import: Trying to make creation time.")?;
+ metadata.add(KeyMetaEntry::CreationDate(creation_date));
+
+ let blob_info = BlobInfo::new_with_superseded(
+ &blob,
+ &blob_metadata,
+ superseded_blob.as_ref().map(|(b, m)| (&**b, m)),
+ );
+ // Store legacy key in the database.
+ self.db
+ .store_new_key(
+ &key,
+ KeyType::Client,
+ ¶ms,
+ &blob_info,
+ &CertificateInfo::new(user_cert, ca_cert),
+ &metadata,
+ &km_uuid,
+ )
+ .context("In check_and_import.")?;
+ Ok(())
+ }
+ None => {
+ if let Some(ca_cert) = ca_cert {
+ self.db
+ .store_new_certificate(&key, KeyType::Client, &ca_cert, &KEYSTORE_UUID)
+ .context("In check_and_import: Failed to insert new certificate.")?;
+ Ok(())
+ } else {
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_import: Legacy key not found.")
+ }
+ }
+ };
+
+ match result {
+ Ok(()) => {
+ // Add the key to the imported_keys list.
+ self.recently_imported.insert(RecentImport::new(uid, alias.clone()));
+ // Delete legacy key from the file system
+ self.legacy_loader
+ .remove_keystore_entry(uid, &alias)
+ .context("In check_and_import: Trying to remove imported key.")?;
+ Ok(())
+ }
+ Err(e) => Err(e),
+ }
+ }
+
+ fn check_and_import_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
+ if self.recently_imported_super_key.contains(&user_id) {
+ return Ok(());
+ }
+
+ if let Some(super_key) = self
+ .legacy_loader
+ .load_super_key(user_id, pw)
+ .context("In check_and_import_super_key: Trying to load legacy super key.")?
+ {
+ let (blob, blob_metadata) =
+ crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, pw)
+ .context("In check_and_import_super_key: Trying to encrypt super key.")?;
+
+ self.db
+ .store_super_key(
+ user_id,
+ &USER_SUPER_KEY,
+ &blob,
+ &blob_metadata,
+ &KeyMetaData::new(),
+ )
+ .context(concat!(
+ "In check_and_import_super_key: ",
+ "Trying to insert legacy super_key into the database."
+ ))?;
+ self.legacy_loader.remove_super_key(user_id);
+ self.recently_imported_super_key.insert(user_id);
+ Ok(())
+ } else {
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_import_super_key: No key found do import.")
+ }
+ }
+
+ /// Key importer request to be run by do_serialized.
+ /// See LegacyImporter::bulk_delete_uid and LegacyImporter::bulk_delete_user.
+ fn bulk_delete(
+ &mut self,
+ bulk_delete_request: BulkDeleteRequest,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ let (aliases, user_id) = match bulk_delete_request {
+ BulkDeleteRequest::Uid(uid) => (
+ self.legacy_loader
+ .list_keystore_entries_for_uid(uid)
+ .context("In bulk_delete: Trying to get aliases for uid.")
+ .map(|aliases| {
+ let mut h = HashMap::<u32, HashSet<String>>::new();
+ h.insert(uid, aliases.into_iter().collect());
+ h
+ })?,
+ uid_to_android_user(uid),
+ ),
+ BulkDeleteRequest::User(user_id) => (
+ self.legacy_loader
+ .list_keystore_entries_for_user(user_id)
+ .context("In bulk_delete: Trying to get aliases for user_id.")?,
+ user_id,
+ ),
+ };
+
+ let super_key_id = self
+ .db
+ .load_super_key(&USER_SUPER_KEY, user_id)
+ .context("In bulk_delete: Failed to load super key")?
+ .map(|(_, entry)| entry.id());
+
+ for (uid, alias) in aliases
+ .into_iter()
+ .flat_map(|(uid, aliases)| aliases.into_iter().map(move |alias| (uid, alias)))
+ {
+ let (km_blob_params, _, _) = self
+ .legacy_loader
+ .load_by_uid_alias(uid, &alias, &None)
+ .context("In bulk_delete: Trying to load legacy blob.")?;
+
+ // Determine if the key needs special handling to be deleted.
+ let (need_gc, is_super_encrypted) = km_blob_params
+ .as_ref()
+ .map(|(blob, params)| {
+ let params = match params {
+ LegacyKeyCharacteristics::Cache(params)
+ | LegacyKeyCharacteristics::File(params) => params,
+ };
+ (
+ params.iter().any(|kp| {
+ KeyParameterValue::RollbackResistance == *kp.key_parameter_value()
+ }),
+ blob.is_encrypted(),
+ )
+ })
+ .unwrap_or((false, false));
+
+ if keep_non_super_encrypted_keys && !is_super_encrypted {
+ continue;
+ }
+
+ if need_gc {
+ let mark_deleted = match km_blob_params
+ .map(|(blob, _)| (blob.is_strongbox(), blob.take_value()))
+ {
+ Some((is_strongbox, BlobValue::Encrypted { iv, tag, data })) => {
+ let mut blob_metadata = BlobMetaData::new();
+ if let (Ok(km_uuid), Some(super_key_id)) =
+ (self.get_km_uuid(is_strongbox), super_key_id)
+ {
+ blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+ blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
+ blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
+ blob_metadata
+ .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
+ Some((LegacyBlob::Vec(data), blob_metadata))
+ } else {
+ // Oh well - we tried our best, but if we cannot determine which
+ // KeyMint instance we have to send this blob to, we cannot
+ // do more than delete the key from the file system.
+ // And if we don't know which key wraps this key we cannot
+ // unwrap it for KeyMint either.
+ None
+ }
+ }
+ Some((_, BlobValue::Decrypted(data))) => {
+ Some((LegacyBlob::ZVec(data), BlobMetaData::new()))
+ }
+ _ => None,
+ };
+
+ if let Some((blob, blob_metadata)) = mark_deleted {
+ self.db.set_deleted_blob(&blob, &blob_metadata).context(concat!(
+ "In bulk_delete: Trying to insert deleted ",
+ "blob into the database for garbage collection."
+ ))?;
+ }
+ }
+
+ self.legacy_loader
+ .remove_keystore_entry(uid, &alias)
+ .context("In bulk_delete: Trying to remove imported key.")?;
+ }
+ Ok(())
+ }
+
+ fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
+ Ok(self.recently_imported_super_key.contains(&user_id)
+ || self.legacy_loader.has_super_key(user_id))
+ }
+
+ fn check_empty(&self) -> u8 {
+ if self.legacy_loader.is_empty().unwrap_or(false) {
+ LegacyImporter::STATE_EMPTY
+ } else {
+ LegacyImporter::STATE_READY
+ }
+ }
+}
+
+enum LegacyBlob<'a> {
+ Vec(Vec<u8>),
+ ZVec(ZVec),
+ Ref(&'a [u8]),
+}
+
+impl Deref for LegacyBlob<'_> {
+ type Target = [u8];
+
+ fn deref(&self) -> &Self::Target {
+ match self {
+ Self::Vec(v) => v,
+ Self::ZVec(v) => v,
+ Self::Ref(v) => v,
+ }
+ }
+}
+
+/// This function takes two KeyParameter lists. The first is assumed to have been retrieved from the
+/// KM back end using km_dev.getKeyCharacteristics. The second is assumed to have been retrieved
+/// from a legacy key characteristics file (not cache) as used in Android P and older. The function
+/// augments the former with entries from the latter only if no equivalent entry is present ignoring.
+/// the security level of enforcement. All entries in the latter are assumed to have security level
+/// KEYSTORE.
+fn augment_legacy_characteristics_file_with_key_characteristics<T>(
+ mut from_km: Vec<KeyParameter>,
+ legacy: T,
+) -> Vec<KeyParameter>
+where
+ T: IntoIterator<Item = KeyParameter>,
+{
+ for legacy_kp in legacy.into_iter() {
+ if !from_km
+ .iter()
+ .any(|km_kp| km_kp.key_parameter_value() == legacy_kp.key_parameter_value())
+ {
+ from_km.push(legacy_kp);
+ }
+ }
+ from_km
+}
+
+/// Attempts to retrieve the key characteristics for the given blob from the KM back end with the
+/// given UUID. It may upgrade the key blob in the process. In that case the upgraded blob is
+/// returned as the second tuple member.
+fn get_key_characteristics_without_app_data(
+ uuid: &Uuid,
+ blob: &[u8],
+) -> Result<(Vec<KeyParameter>, Option<Vec<u8>>)> {
+ let (km_dev, _) = crate::globals::get_keymint_dev_by_uuid(uuid)
+ .with_context(|| format!("In foo: Trying to get km device for id {:?}", uuid))?;
+
+ let (characteristics, upgraded_blob) = upgrade_keyblob_if_required_with(
+ &*km_dev,
+ blob,
+ &[],
+ |blob| {
+ let _wd = wd::watch_millis("In foo: Calling GetKeyCharacteristics.", 500);
+ map_km_error(km_dev.getKeyCharacteristics(blob, &[], &[]))
+ },
+ |_| Ok(()),
+ )
+ .context("In foo.")?;
+ Ok((key_characteristics_to_internal(characteristics), upgraded_blob))
+}
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_migrator.rs
deleted file mode 100644
index 65f4b0b..0000000
--- a/keystore2/src/legacy_migrator.rs
+++ /dev/null
@@ -1,731 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! This module acts as a bridge between the legacy key database and the keystore2 database.
-
-use crate::key_parameter::KeyParameterValue;
-use crate::legacy_blob::BlobValue;
-use crate::utils::{uid_to_android_user, watchdog as wd};
-use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
-use crate::{database::KeyType, error::Error};
-use crate::{
- database::{
- BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData,
- KeyMetaEntry, KeystoreDB, Uuid, KEYSTORE_UUID,
- },
- super_key::USER_SUPER_KEY,
-};
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
-use android_system_keystore2::aidl::android::system::keystore2::{
- Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode,
-};
-use anyhow::{Context, Result};
-use core::ops::Deref;
-use keystore2_crypto::{Password, ZVec};
-use std::collections::{HashMap, HashSet};
-use std::sync::atomic::{AtomicU8, Ordering};
-use std::sync::mpsc::channel;
-use std::sync::{Arc, Mutex};
-
-/// Represents LegacyMigrator.
-pub struct LegacyMigrator {
- async_task: Arc<AsyncTask>,
- initializer: Mutex<
- Option<
- Box<
- dyn FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
- + Send
- + 'static,
- >,
- >,
- >,
- /// This atomic is used for cheap interior mutability. It is intended to prevent
- /// expensive calls into the legacy migrator when the legacy database is empty.
- /// When transitioning from READY to EMPTY, spurious calls may occur for a brief period
- /// of time. This is tolerable in favor of the common case.
- state: AtomicU8,
-}
-
-#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-struct RecentMigration {
- uid: u32,
- alias: String,
-}
-
-impl RecentMigration {
- fn new(uid: u32, alias: String) -> Self {
- Self { uid, alias }
- }
-}
-
-enum BulkDeleteRequest {
- Uid(u32),
- User(u32),
-}
-
-struct LegacyMigratorState {
- recently_migrated: HashSet<RecentMigration>,
- recently_migrated_super_key: HashSet<u32>,
- legacy_loader: Arc<LegacyBlobLoader>,
- sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>,
- db: KeystoreDB,
-}
-
-impl LegacyMigrator {
- const WIFI_NAMESPACE: i64 = 102;
- const AID_WIFI: u32 = 1010;
-
- const STATE_UNINITIALIZED: u8 = 0;
- const STATE_READY: u8 = 1;
- const STATE_EMPTY: u8 = 2;
-
- /// Constructs a new LegacyMigrator using the given AsyncTask object as migration
- /// worker.
- pub fn new(async_task: Arc<AsyncTask>) -> Self {
- Self {
- async_task,
- initializer: Default::default(),
- state: AtomicU8::new(Self::STATE_UNINITIALIZED),
- }
- }
-
- /// The legacy migrator must be initialized deferred, because keystore starts very early.
- /// At this time the data partition may not be mounted. So we cannot open database connections
- /// until we get actual key load requests. This sets the function that the legacy loader
- /// uses to connect to the database.
- pub fn set_init<F>(&self, f_init: F) -> Result<()>
- where
- F: FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
- + Send
- + 'static,
- {
- let mut initializer = self.initializer.lock().expect("Failed to lock initializer.");
-
- // If we are not uninitialized we have no business setting the initializer.
- if self.state.load(Ordering::Relaxed) != Self::STATE_UNINITIALIZED {
- return Ok(());
- }
-
- // Only set the initializer if it hasn't been set before.
- if initializer.is_none() {
- *initializer = Some(Box::new(f_init))
- }
-
- Ok(())
- }
-
- /// This function is called by the migration requestor to check if it is worth
- /// making a migration request. It also transitions the state from UNINITIALIZED
- /// to READY or EMPTY on first use. The deferred initialization is necessary, because
- /// Keystore 2.0 runs early during boot, where data may not yet be mounted.
- /// Returns Ok(STATE_READY) if a migration request is worth undertaking and
- /// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader
- /// was not initialized and cannot be initialized.
- fn check_state(&self) -> Result<u8> {
- let mut first_try = true;
- loop {
- match (self.state.load(Ordering::Relaxed), first_try) {
- (Self::STATE_EMPTY, _) => {
- return Ok(Self::STATE_EMPTY);
- }
- (Self::STATE_UNINITIALIZED, true) => {
- // If we find the legacy loader uninitialized, we grab the initializer lock,
- // check if the legacy database is empty, and if not, schedule an initialization
- // request. Coming out of the initializer lock, the state is either EMPTY or
- // READY.
- let mut initializer = self.initializer.lock().unwrap();
-
- if let Some(initializer) = initializer.take() {
- let (db, sec_level_to_km_uuid, legacy_loader) = (initializer)();
-
- if legacy_loader.is_empty().context(
- "In check_state: Trying to check if the legacy database is empty.",
- )? {
- self.state.store(Self::STATE_EMPTY, Ordering::Relaxed);
- return Ok(Self::STATE_EMPTY);
- }
-
- self.async_task.queue_hi(move |shelf| {
- shelf.get_or_put_with(|| LegacyMigratorState {
- recently_migrated: Default::default(),
- recently_migrated_super_key: Default::default(),
- legacy_loader,
- sec_level_to_km_uuid,
- db,
- });
- });
-
- // It is safe to set this here even though the async task may not yet have
- // run because any thread observing this will not be able to schedule a
- // task that can run before the initialization.
- // Also we can only transition out of this state while having the
- // initializer lock and having found an initializer.
- self.state.store(Self::STATE_READY, Ordering::Relaxed);
- return Ok(Self::STATE_READY);
- } else {
- // There is a chance that we just lost the race from state.load() to
- // grabbing the initializer mutex. If that is the case the state must
- // be EMPTY or READY after coming out of the lock. So we can give it
- // one more try.
- first_try = false;
- continue;
- }
- }
- (Self::STATE_UNINITIALIZED, false) => {
- // Okay, tough luck. The legacy loader was really completely uninitialized.
- return Err(Error::sys()).context(
- "In check_state: Legacy loader should not be called uninitialized.",
- );
- }
- (Self::STATE_READY, _) => return Ok(Self::STATE_READY),
- (s, _) => panic!("Unknown legacy migrator state. {} ", s),
- }
- }
- }
-
- /// List all aliases for uid in the legacy database.
- pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
- let _wp = wd::watch_millis("LegacyMigrator::list_uid", 500);
-
- let uid = match (domain, namespace) {
- (Domain::APP, namespace) => namespace as u32,
- (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
- _ => return Ok(Vec::new()),
- };
- self.do_serialized(move |state| state.list_uid(uid)).unwrap_or_else(|| Ok(Vec::new())).map(
- |v| {
- v.into_iter()
- .map(|alias| KeyDescriptor {
- domain,
- nspace: namespace,
- alias: Some(alias),
- blob: None,
- })
- .collect()
- },
- )
- }
-
- /// Sends the given closure to the migrator thread for execution after calling check_state.
- /// Returns None if the database was empty and the request was not executed.
- /// Otherwise returns Some with the result produced by the migration request.
- /// The loader state may transition to STATE_EMPTY during the execution of this function.
- fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>>
- where
- F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static,
- {
- // Short circuit if the database is empty or not initialized (error case).
- match self.check_state().context("In do_serialized: Checking state.") {
- Ok(LegacyMigrator::STATE_EMPTY) => return None,
- Ok(LegacyMigrator::STATE_READY) => {}
- Err(e) => return Some(Err(e)),
- Ok(s) => panic!("Unknown legacy migrator state. {} ", s),
- }
-
- // We have established that there may be a key in the legacy database.
- // Now we schedule a migration request.
- let (sender, receiver) = channel();
- self.async_task.queue_hi(move |shelf| {
- // Get the migrator state from the shelf.
- // There may not be a state. This can happen if this migration request was scheduled
- // before a previous request established that the legacy database was empty
- // and removed the state from the shelf. Since we know now that the database
- // is empty, we can return None here.
- let (new_state, result) = if let Some(legacy_migrator_state) =
- shelf.get_downcast_mut::<LegacyMigratorState>()
- {
- let result = f(legacy_migrator_state);
- (legacy_migrator_state.check_empty(), Some(result))
- } else {
- (Self::STATE_EMPTY, None)
- };
-
- // If the migration request determined that the database is now empty, we discard
- // the state from the shelf to free up the resources we won't need any longer.
- if result.is_some() && new_state == Self::STATE_EMPTY {
- shelf.remove_downcast_ref::<LegacyMigratorState>();
- }
-
- // Send the result to the requester.
- if let Err(e) = sender.send((new_state, result)) {
- log::error!("In do_serialized. Error in sending the result. {:?}", e);
- }
- });
-
- let (new_state, result) = match receiver.recv() {
- Err(e) => {
- return Some(Err(e).context("In do_serialized. Failed to receive from the sender."))
- }
- Ok(r) => r,
- };
-
- // We can only transition to EMPTY but never back.
- // The migrator never creates any legacy blobs.
- if new_state == Self::STATE_EMPTY {
- self.state.store(Self::STATE_EMPTY, Ordering::Relaxed)
- }
-
- result
- }
-
- /// Runs the key_accessor function and returns its result. If it returns an error and the
- /// root cause was KEY_NOT_FOUND, tries to migrate a key with the given parameters from
- /// the legacy database to the new database and runs the key_accessor function again if
- /// the migration request was successful.
- pub fn with_try_migrate<F, T>(
- &self,
- key: &KeyDescriptor,
- caller_uid: u32,
- key_accessor: F,
- ) -> Result<T>
- where
- F: Fn() -> Result<T>,
- {
- let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate", 500);
-
- // Access the key and return on success.
- match key_accessor() {
- Ok(result) => return Ok(result),
- Err(e) => match e.root_cause().downcast_ref::<Error>() {
- Some(&Error::Rc(ResponseCode::KEY_NOT_FOUND)) => {}
- _ => return Err(e),
- },
- }
-
- // Filter inputs. We can only load legacy app domain keys and some special rules due
- // to which we migrate keys transparently to an SELINUX domain.
- let uid = match key {
- KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid,
- KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => {
- match *nspace {
- Self::WIFI_NAMESPACE => Self::AID_WIFI,
- _ => {
- return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
- .context(format!("No legacy keys for namespace {}", nspace))
- }
- }
- }
- _ => {
- return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
- .context("No legacy keys for key descriptor.")
- }
- };
-
- let key_clone = key.clone();
- let result = self
- .do_serialized(move |migrator_state| migrator_state.check_and_migrate(uid, key_clone));
-
- if let Some(result) = result {
- result?;
- // After successful migration try again.
- key_accessor()
- } else {
- Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.")
- }
- }
-
- /// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error
- /// this function makes a migration request and on success retries the key_accessor.
- pub fn with_try_migrate_super_key<F, T>(
- &self,
- user_id: u32,
- pw: &Password,
- mut key_accessor: F,
- ) -> Result<Option<T>>
- where
- F: FnMut() -> Result<Option<T>>,
- {
- let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate_super_key", 500);
-
- match key_accessor() {
- Ok(Some(result)) => return Ok(Some(result)),
- Ok(None) => {}
- Err(e) => return Err(e),
- }
- let pw = pw.try_clone().context("In with_try_migrate_super_key: Cloning password.")?;
- let result = self.do_serialized(move |migrator_state| {
- migrator_state.check_and_migrate_super_key(user_id, &pw)
- });
-
- if let Some(result) = result {
- result?;
- // After successful migration try again.
- key_accessor()
- } else {
- Ok(None)
- }
- }
-
- /// Deletes all keys belonging to the given namespace, migrating them into the database
- /// for subsequent garbage collection if necessary.
- pub fn bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()> {
- let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_uid", 500);
-
- let uid = match (domain, nspace) {
- (Domain::APP, nspace) => nspace as u32,
- (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
- // Nothing to do.
- _ => return Ok(()),
- };
-
- let result = self.do_serialized(move |migrator_state| {
- migrator_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
- });
-
- result.unwrap_or(Ok(()))
- }
-
- /// Deletes all keys belonging to the given android user, migrating them into the database
- /// for subsequent garbage collection if necessary.
- pub fn bulk_delete_user(
- &self,
- user_id: u32,
- keep_non_super_encrypted_keys: bool,
- ) -> Result<()> {
- let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_user", 500);
-
- let result = self.do_serialized(move |migrator_state| {
- migrator_state
- .bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
- });
-
- result.unwrap_or(Ok(()))
- }
-
- /// Queries the legacy database for the presence of a super key for the given user.
- pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
- let result =
- self.do_serialized(move |migrator_state| migrator_state.has_super_key(user_id));
- result.unwrap_or(Ok(false))
- }
-}
-
-impl LegacyMigratorState {
- fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> {
- let sec_level = if is_strongbox {
- SecurityLevel::STRONGBOX
- } else {
- SecurityLevel::TRUSTED_ENVIRONMENT
- };
-
- self.sec_level_to_km_uuid.get(&sec_level).copied().ok_or_else(|| {
- anyhow::anyhow!(Error::sys()).context("In get_km_uuid: No KM instance for blob.")
- })
- }
-
- fn list_uid(&mut self, uid: u32) -> Result<Vec<String>> {
- self.legacy_loader
- .list_keystore_entries_for_uid(uid)
- .context("In list_uid: Trying to list legacy entries.")
- }
-
- /// This is a key migration request that must run in the migrator thread. This must
- /// be passed to do_serialized.
- fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
- let alias = key.alias.clone().ok_or_else(|| {
- anyhow::anyhow!(Error::sys()).context(concat!(
- "In check_and_migrate: Must be Some because ",
- "our caller must not have called us otherwise."
- ))
- })?;
-
- if self.recently_migrated.contains(&RecentMigration::new(uid, alias.clone())) {
- return Ok(());
- }
-
- if key.domain == Domain::APP {
- key.nspace = uid as i64;
- }
-
- // If the key is not found in the cache, try to load from the legacy database.
- let (km_blob_params, user_cert, ca_cert) = self
- .legacy_loader
- .load_by_uid_alias(uid, &alias, None)
- .context("In check_and_migrate: Trying to load legacy blob.")?;
- let result = match km_blob_params {
- Some((km_blob, params)) => {
- let is_strongbox = km_blob.is_strongbox();
- let (blob, mut blob_metadata) = match km_blob.take_value() {
- BlobValue::Encrypted { iv, tag, data } => {
- // Get super key id for user id.
- let user_id = uid_to_android_user(uid as u32);
-
- let super_key_id = match self
- .db
- .load_super_key(&USER_SUPER_KEY, user_id)
- .context("In check_and_migrate: Failed to load super key")?
- {
- Some((_, entry)) => entry.id(),
- None => {
- // This might be the first time we access the super key,
- // and it may not have been migrated. We cannot import
- // the legacy super_key key now, because we need to reencrypt
- // it which we cannot do if we are not unlocked, which we are
- // not because otherwise the key would have been migrated.
- // We can check though if the key exists. If it does,
- // we can return Locked. Otherwise, we can delete the
- // key and return NotFound, because the key will never
- // be unlocked again.
- if self.legacy_loader.has_super_key(user_id) {
- return Err(Error::Rc(ResponseCode::LOCKED)).context(concat!(
- "In check_and_migrate: Cannot migrate super key of this ",
- "key while user is locked."
- ));
- } else {
- self.legacy_loader.remove_keystore_entry(uid, &alias).context(
- concat!(
- "In check_and_migrate: ",
- "Trying to remove obsolete key."
- ),
- )?;
- return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
- .context("In check_and_migrate: Obsolete key.");
- }
- }
- };
-
- let mut blob_metadata = BlobMetaData::new();
- blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
- blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
- blob_metadata
- .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
- (LegacyBlob::Vec(data), blob_metadata)
- }
- BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()),
- _ => {
- return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
- .context("In check_and_migrate: Legacy key has unexpected type.")
- }
- };
-
- let km_uuid = self
- .get_km_uuid(is_strongbox)
- .context("In check_and_migrate: Trying to get KM UUID")?;
- blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
-
- let mut metadata = KeyMetaData::new();
- let creation_date = DateTime::now()
- .context("In check_and_migrate: Trying to make creation time.")?;
- metadata.add(KeyMetaEntry::CreationDate(creation_date));
-
- // Store legacy key in the database.
- self.db
- .store_new_key(
- &key,
- KeyType::Client,
- ¶ms,
- &(&blob, &blob_metadata),
- &CertificateInfo::new(user_cert, ca_cert),
- &metadata,
- &km_uuid,
- )
- .context("In check_and_migrate.")?;
- Ok(())
- }
- None => {
- if let Some(ca_cert) = ca_cert {
- self.db
- .store_new_certificate(&key, KeyType::Client, &ca_cert, &KEYSTORE_UUID)
- .context("In check_and_migrate: Failed to insert new certificate.")?;
- Ok(())
- } else {
- Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
- .context("In check_and_migrate: Legacy key not found.")
- }
- }
- };
-
- match result {
- Ok(()) => {
- // Add the key to the migrated_keys list.
- self.recently_migrated.insert(RecentMigration::new(uid, alias.clone()));
- // Delete legacy key from the file system
- self.legacy_loader
- .remove_keystore_entry(uid, &alias)
- .context("In check_and_migrate: Trying to remove migrated key.")?;
- Ok(())
- }
- Err(e) => Err(e),
- }
- }
-
- fn check_and_migrate_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
- if self.recently_migrated_super_key.contains(&user_id) {
- return Ok(());
- }
-
- if let Some(super_key) = self
- .legacy_loader
- .load_super_key(user_id, pw)
- .context("In check_and_migrate_super_key: Trying to load legacy super key.")?
- {
- let (blob, blob_metadata) =
- crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, pw)
- .context("In check_and_migrate_super_key: Trying to encrypt super key.")?;
-
- self.db
- .store_super_key(
- user_id,
- &USER_SUPER_KEY,
- &blob,
- &blob_metadata,
- &KeyMetaData::new(),
- )
- .context(concat!(
- "In check_and_migrate_super_key: ",
- "Trying to insert legacy super_key into the database."
- ))?;
- self.legacy_loader.remove_super_key(user_id);
- self.recently_migrated_super_key.insert(user_id);
- Ok(())
- } else {
- Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
- .context("In check_and_migrate_super_key: No key found do migrate.")
- }
- }
-
- /// Key migrator request to be run by do_serialized.
- /// See LegacyMigrator::bulk_delete_uid and LegacyMigrator::bulk_delete_user.
- fn bulk_delete(
- &mut self,
- bulk_delete_request: BulkDeleteRequest,
- keep_non_super_encrypted_keys: bool,
- ) -> Result<()> {
- let (aliases, user_id) = match bulk_delete_request {
- BulkDeleteRequest::Uid(uid) => (
- self.legacy_loader
- .list_keystore_entries_for_uid(uid)
- .context("In bulk_delete: Trying to get aliases for uid.")
- .map(|aliases| {
- let mut h = HashMap::<u32, HashSet<String>>::new();
- h.insert(uid, aliases.into_iter().collect());
- h
- })?,
- uid_to_android_user(uid),
- ),
- BulkDeleteRequest::User(user_id) => (
- self.legacy_loader
- .list_keystore_entries_for_user(user_id)
- .context("In bulk_delete: Trying to get aliases for user_id.")?,
- user_id,
- ),
- };
-
- let super_key_id = self
- .db
- .load_super_key(&USER_SUPER_KEY, user_id)
- .context("In bulk_delete: Failed to load super key")?
- .map(|(_, entry)| entry.id());
-
- for (uid, alias) in aliases
- .into_iter()
- .map(|(uid, aliases)| aliases.into_iter().map(move |alias| (uid, alias)))
- .flatten()
- {
- let (km_blob_params, _, _) = self
- .legacy_loader
- .load_by_uid_alias(uid, &alias, None)
- .context("In bulk_delete: Trying to load legacy blob.")?;
-
- // Determine if the key needs special handling to be deleted.
- let (need_gc, is_super_encrypted) = km_blob_params
- .as_ref()
- .map(|(blob, params)| {
- (
- params.iter().any(|kp| {
- KeyParameterValue::RollbackResistance == *kp.key_parameter_value()
- }),
- blob.is_encrypted(),
- )
- })
- .unwrap_or((false, false));
-
- if keep_non_super_encrypted_keys && !is_super_encrypted {
- continue;
- }
-
- if need_gc {
- let mark_deleted = match km_blob_params
- .map(|(blob, _)| (blob.is_strongbox(), blob.take_value()))
- {
- Some((is_strongbox, BlobValue::Encrypted { iv, tag, data })) => {
- let mut blob_metadata = BlobMetaData::new();
- if let (Ok(km_uuid), Some(super_key_id)) =
- (self.get_km_uuid(is_strongbox), super_key_id)
- {
- blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
- blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
- blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
- blob_metadata
- .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
- Some((LegacyBlob::Vec(data), blob_metadata))
- } else {
- // Oh well - we tried our best, but if we cannot determine which
- // KeyMint instance we have to send this blob to, we cannot
- // do more than delete the key from the file system.
- // And if we don't know which key wraps this key we cannot
- // unwrap it for KeyMint either.
- None
- }
- }
- Some((_, BlobValue::Decrypted(data))) => {
- Some((LegacyBlob::ZVec(data), BlobMetaData::new()))
- }
- _ => None,
- };
-
- if let Some((blob, blob_metadata)) = mark_deleted {
- self.db.set_deleted_blob(&blob, &blob_metadata).context(concat!(
- "In bulk_delete: Trying to insert deleted ",
- "blob into the database for garbage collection."
- ))?;
- }
- }
-
- self.legacy_loader
- .remove_keystore_entry(uid, &alias)
- .context("In bulk_delete: Trying to remove migrated key.")?;
- }
- Ok(())
- }
-
- fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
- Ok(self.recently_migrated_super_key.contains(&user_id)
- || self.legacy_loader.has_super_key(user_id))
- }
-
- fn check_empty(&self) -> u8 {
- if self.legacy_loader.is_empty().unwrap_or(false) {
- LegacyMigrator::STATE_EMPTY
- } else {
- LegacyMigrator::STATE_READY
- }
- }
-}
-
-enum LegacyBlob {
- Vec(Vec<u8>),
- ZVec(ZVec),
-}
-
-impl Deref for LegacyBlob {
- type Target = [u8];
-
- fn deref(&self) -> &Self::Target {
- match self {
- Self::Vec(v) => v,
- Self::ZVec(v) => v,
- }
- }
-}
diff --git a/keystore2/src/lib.rs b/keystore2/src/lib.rs
index 8b629b1..4a23843 100644
--- a/keystore2/src/lib.rs
+++ b/keystore2/src/lib.rs
@@ -29,7 +29,7 @@
/// Internal Representation of Key Parameter and convenience functions.
pub mod key_parameter;
pub mod legacy_blob;
-pub mod legacy_migrator;
+pub mod legacy_importer;
pub mod maintenance;
pub mod metrics;
pub mod metrics_store;
@@ -40,12 +40,12 @@
pub mod security_level;
pub mod service;
pub mod shared_secret_negotiation;
-pub mod try_insert;
pub mod utils;
mod attestation_key_utils;
mod audit_log;
mod gc;
+mod km_compat;
mod super_key;
#[cfg(feature = "watchdog")]
diff --git a/keystore2/src/maintenance.rs b/keystore2/src/maintenance.rs
index 39958a3..1fca5d9 100644
--- a/keystore2/src/maintenance.rs
+++ b/keystore2/src/maintenance.rs
@@ -19,17 +19,17 @@
use crate::error::map_or_log_err;
use crate::error::Error;
use crate::globals::get_keymint_device;
-use crate::globals::{DB, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::globals::{DB, LEGACY_IMPORTER, SUPER_KEY};
use crate::permission::{KeyPerm, KeystorePerm};
-use crate::super_key::UserState;
+use crate::super_key::{SuperKeyManager, UserState};
use crate::utils::{
- check_key_permission, check_keystore_permission, list_key_entries, watchdog as wd,
+ check_key_permission, check_keystore_permission, uid_to_android_user, watchdog as wd,
};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
IKeyMintDevice::IKeyMintDevice, SecurityLevel::SecurityLevel,
};
use android_security_maintenance::aidl::android::security::maintenance::{
- IKeystoreMaintenance::{BnKeystoreMaintenance, IKeystoreMaintenance, UID_SELF},
+ IKeystoreMaintenance::{BnKeystoreMaintenance, IKeystoreMaintenance},
UserState::UserState as AidlUserState,
};
use android_security_maintenance::binder::{
@@ -39,7 +39,6 @@
use android_system_keystore2::aidl::android::system::keystore2::ResponseCode::ResponseCode;
use anyhow::{Context, Result};
use keystore2_crypto::Password;
-use keystore2_selinux as selinux;
/// Reexport Domain for the benefit of DeleteListener
pub use android_system_keystore2::aidl::android::system::keystore2::Domain::Domain;
@@ -70,24 +69,25 @@
}
fn on_user_password_changed(user_id: i32, password: Option<Password>) -> Result<()> {
- //Check permission. Function should return if this failed. Therefore having '?' at the end
- //is very important.
+ // Check permission. Function should return if this failed. Therefore having '?' at the end
+ // is very important.
check_keystore_permission(KeystorePerm::ChangePassword)
.context("In on_user_password_changed.")?;
+ let mut skm = SUPER_KEY.write().unwrap();
+
if let Some(pw) = password.as_ref() {
DB.with(|db| {
- SUPER_KEY.unlock_screen_lock_bound_key(&mut db.borrow_mut(), user_id as u32, pw)
+ skm.unlock_screen_lock_bound_key(&mut db.borrow_mut(), user_id as u32, pw)
})
.context("In on_user_password_changed: unlock_screen_lock_bound_key failed")?;
}
match DB
.with(|db| {
- UserState::get_with_password_changed(
+ skm.reset_or_init_user_and_get_user_state(
&mut db.borrow_mut(),
- &LEGACY_MIGRATOR,
- &SUPER_KEY,
+ &LEGACY_IMPORTER,
user_id as u32,
password.as_ref(),
)
@@ -110,11 +110,11 @@
// Check permission. Function should return if this failed. Therefore having '?' at the end
// is very important.
check_keystore_permission(KeystorePerm::ChangeUser).context("In add_or_remove_user.")?;
+
DB.with(|db| {
- UserState::reset_user(
+ SUPER_KEY.write().unwrap().reset_user(
&mut db.borrow_mut(),
- &SUPER_KEY,
- &LEGACY_MIGRATOR,
+ &LEGACY_IMPORTER,
user_id as u32,
false,
)
@@ -129,7 +129,7 @@
// Permission check. Must return on error. Do not touch the '?'.
check_keystore_permission(KeystorePerm::ClearUID).context("In clear_namespace.")?;
- LEGACY_MIGRATOR
+ LEGACY_IMPORTER
.bulk_delete_uid(domain, nspace)
.context("In clear_namespace: Trying to delete legacy keys.")?;
DB.with(|db| db.borrow_mut().unbind_keys_for_namespace(domain, nspace))
@@ -145,7 +145,11 @@
check_keystore_permission(KeystorePerm::GetState).context("In get_state.")?;
let state = DB
.with(|db| {
- UserState::get(&mut db.borrow_mut(), &LEGACY_MIGRATOR, &SUPER_KEY, user_id as u32)
+ SUPER_KEY.read().unwrap().get_user_state(
+ &mut db.borrow_mut(),
+ &LEGACY_IMPORTER,
+ user_id as u32,
+ )
})
.context("In get_state. Trying to get UserState.")?;
@@ -158,7 +162,7 @@
fn call_with_watchdog<F>(sec_level: SecurityLevel, name: &'static str, op: &F) -> Result<()>
where
- F: Fn(Strong<dyn IKeyMintDevice>) -> binder::public_api::Result<()>,
+ F: Fn(Strong<dyn IKeyMintDevice>) -> binder::Result<()>,
{
let (km_dev, _, _) = get_keymint_device(&sec_level)
.context("In call_with_watchdog: getting keymint device")?;
@@ -172,7 +176,7 @@
fn call_on_all_security_levels<F>(name: &'static str, op: F) -> Result<()>
where
- F: Fn(Strong<dyn IKeyMintDevice>) -> binder::public_api::Result<()>,
+ F: Fn(Strong<dyn IKeyMintDevice>) -> binder::Result<()>,
{
let sec_levels = [
(SecurityLevel::TRUSTED_ENVIRONMENT, "TRUSTED_ENVIRONMENT"),
@@ -202,7 +206,9 @@
.context("In early_boot_ended. Checking permission")?;
log::info!("In early_boot_ended.");
- if let Err(e) = DB.with(|db| SUPER_KEY.set_up_boot_level_cache(&mut db.borrow_mut())) {
+ if let Err(e) =
+ DB.with(|db| SuperKeyManager::set_up_boot_level_cache(&SUPER_KEY, &mut db.borrow_mut()))
+ {
log::error!("SUPER_KEY.set_up_boot_level_cache failed:\n{:?}\n:(", e);
}
Maintenance::call_on_all_security_levels("earlyBootEnded", |dev| dev.earlyBootEnded())
@@ -217,65 +223,53 @@
}
fn migrate_key_namespace(source: &KeyDescriptor, destination: &KeyDescriptor) -> Result<()> {
- let migrate_any_key_permission =
- check_keystore_permission(KeystorePerm::MigrateAnyKey).is_ok();
+ let calling_uid = ThreadState::get_calling_uid();
- let src_uid = match source.domain {
- Domain::SELINUX | Domain::KEY_ID => ThreadState::get_calling_uid(),
- Domain::APP if source.nspace == UID_SELF.into() => ThreadState::get_calling_uid(),
- Domain::APP if source.nspace != UID_SELF.into() && migrate_any_key_permission => {
- source.nspace as u32
- }
+ match source.domain {
+ Domain::SELINUX | Domain::KEY_ID | Domain::APP => (),
_ => {
return Err(Error::Rc(ResponseCode::INVALID_ARGUMENT)).context(
- "In migrate_key_namespace:
+ "In migrate_key_namespace: \
Source domain must be one of APP, SELINUX, or KEY_ID.",
)
}
};
- let dest_uid = match destination.domain {
- Domain::SELINUX => ThreadState::get_calling_uid(),
- Domain::APP if destination.nspace == UID_SELF.into() => ThreadState::get_calling_uid(),
- Domain::APP if destination.nspace != UID_SELF.into() && migrate_any_key_permission => {
- destination.nspace as u32
- }
+ match destination.domain {
+ Domain::SELINUX | Domain::APP => (),
_ => {
return Err(Error::Rc(ResponseCode::INVALID_ARGUMENT)).context(
- "In migrate_key_namespace:
+ "In migrate_key_namespace: \
Destination domain must be one of APP or SELINUX.",
)
}
};
+ let user_id = uid_to_android_user(calling_uid);
+
+ let super_key = SUPER_KEY.read().unwrap().get_per_boot_key_by_user_id(user_id);
+
DB.with(|db| {
- let (key_id_guard, _) = LEGACY_MIGRATOR
- .with_try_migrate(source, src_uid, || {
+ let (key_id_guard, _) = LEGACY_IMPORTER
+ .with_try_import(source, calling_uid, super_key, || {
db.borrow_mut().load_key_entry(
source,
KeyType::Client,
KeyEntryLoadBits::NONE,
- src_uid,
+ calling_uid,
|k, av| {
- if migrate_any_key_permission {
- Ok(())
- } else {
- check_key_permission(KeyPerm::Use, k, &av)?;
- check_key_permission(KeyPerm::Delete, k, &av)?;
- check_key_permission(KeyPerm::Grant, k, &av)
- }
+ check_key_permission(KeyPerm::Use, k, &av)?;
+ check_key_permission(KeyPerm::Delete, k, &av)?;
+ check_key_permission(KeyPerm::Grant, k, &av)
},
)
})
.context("In migrate_key_namespace: Failed to load key blob.")?;
-
- db.borrow_mut().migrate_key_namespace(key_id_guard, destination, dest_uid, |k| {
- if migrate_any_key_permission {
- Ok(())
- } else {
+ {
+ db.borrow_mut().migrate_key_namespace(key_id_guard, destination, calling_uid, |k| {
check_key_permission(KeyPerm::Rebind, k, &None)
- }
- })
+ })
+ }
})
}
@@ -287,30 +281,6 @@
Maintenance::call_on_all_security_levels("deleteAllKeys", |dev| dev.deleteAllKeys())
}
-
- fn list_entries(domain: Domain, nspace: i64) -> Result<Vec<KeyDescriptor>> {
- let k = match domain {
- Domain::APP | Domain::SELINUX => KeyDescriptor{domain, nspace, ..Default::default()},
- _ => return Err(Error::perm()).context(
- "In list_entries: List entries is only supported for Domain::APP and Domain::SELINUX."
- ),
- };
-
- // The caller has to have either GetInfo for the namespace or List permission
- check_key_permission(KeyPerm::GetInfo, &k, &None)
- .or_else(|e| {
- if Some(&selinux::Error::PermissionDenied)
- == e.root_cause().downcast_ref::<selinux::Error>()
- {
- check_keystore_permission(KeystorePerm::List)
- } else {
- Err(e)
- }
- })
- .context("In list_entries: While checking key and keystore permission.")?;
-
- DB.with(|db| list_key_entries(&mut db.borrow_mut(), domain, nspace))
- }
}
impl Interface for Maintenance {}
@@ -360,11 +330,6 @@
map_or_log_err(Self::migrate_key_namespace(source, destination), Ok)
}
- fn listEntries(&self, domain: Domain, namespace: i64) -> BinderResult<Vec<KeyDescriptor>> {
- let _wp = wd::watch_millis("IKeystoreMaintenance::listEntries", 500);
- map_or_log_err(Self::list_entries(domain, namespace), Ok)
- }
-
fn deleteAllKeys(&self) -> BinderResult<()> {
let _wp = wd::watch_millis("IKeystoreMaintenance::deleteAllKeys", 500);
map_or_log_err(Self::delete_all_keys(), Ok)
diff --git a/keystore2/src/metrics_store.rs b/keystore2/src/metrics_store.rs
index b18d84c..b6f1343 100644
--- a/keystore2/src/metrics_store.rs
+++ b/keystore2/src/metrics_store.rs
@@ -649,6 +649,7 @@
pub fn read_keystore_crash_count() -> Result<i32> {
rustutils::system_properties::read("keystore.crash_count")
.context("In read_keystore_crash_count: Failed read property.")?
+ .context("In read_keystore_crash_count: Property not set.")?
.parse::<i32>()
.map_err(std::convert::Into::into)
}
diff --git a/keystore2/src/operation.rs b/keystore2/src/operation.rs
index 7e08f4e..5da3b32 100644
--- a/keystore2/src/operation.rs
+++ b/keystore2/src/operation.rs
@@ -493,7 +493,7 @@
/// owner uid and returns a new Operation wrapped in a `std::sync::Arc`.
pub fn create_operation(
&self,
- km_op: binder::public_api::Strong<dyn IKeyMintOperation>,
+ km_op: binder::Strong<dyn IKeyMintOperation>,
owner: u32,
auth_info: AuthInfo,
forced: bool,
@@ -771,9 +771,7 @@
/// BnKeystoreOperation proxy object. It also enables
/// `BinderFeatures::set_requesting_sid` on the new interface, because
/// we need it for checking Keystore permissions.
- pub fn new_native_binder(
- operation: Arc<Operation>,
- ) -> binder::public_api::Strong<dyn IKeystoreOperation> {
+ pub fn new_native_binder(operation: Arc<Operation>) -> binder::Strong<dyn IKeystoreOperation> {
BnKeystoreOperation::new_binder(
Self { operation: Mutex::new(Some(operation)) },
BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
@@ -821,7 +819,7 @@
impl binder::Interface for KeystoreOperation {}
impl IKeystoreOperation for KeystoreOperation {
- fn updateAad(&self, aad_input: &[u8]) -> binder::public_api::Result<()> {
+ fn updateAad(&self, aad_input: &[u8]) -> binder::Result<()> {
let _wp = wd::watch_millis("IKeystoreOperation::updateAad", 500);
map_or_log_err(
self.with_locked_operation(
@@ -832,7 +830,7 @@
)
}
- fn update(&self, input: &[u8]) -> binder::public_api::Result<Option<Vec<u8>>> {
+ fn update(&self, input: &[u8]) -> binder::Result<Option<Vec<u8>>> {
let _wp = wd::watch_millis("IKeystoreOperation::update", 500);
map_or_log_err(
self.with_locked_operation(
@@ -846,7 +844,7 @@
&self,
input: Option<&[u8]>,
signature: Option<&[u8]>,
- ) -> binder::public_api::Result<Option<Vec<u8>>> {
+ ) -> binder::Result<Option<Vec<u8>>> {
let _wp = wd::watch_millis("IKeystoreOperation::finish", 500);
map_or_log_err(
self.with_locked_operation(
@@ -857,7 +855,7 @@
)
}
- fn abort(&self) -> binder::public_api::Result<()> {
+ fn abort(&self) -> binder::Result<()> {
let _wp = wd::watch_millis("IKeystoreOperation::abort", 500);
map_err_with(
self.with_locked_operation(
diff --git a/keystore2/src/permission.rs b/keystore2/src/permission.rs
index e6d61b0..22509c4 100644
--- a/keystore2/src/permission.rs
+++ b/keystore2/src/permission.rs
@@ -145,10 +145,9 @@
/// Checked when IKeystoreMaintenance::deleteAllKeys is called.
#[selinux(name = delete_all_keys)]
DeleteAllKeys,
- /// Checked when migrating any key from any namespace to any other namespace. It was
- /// introduced for migrating keys when an app leaves a sharedUserId.
- #[selinux(name = migrate_any_key)]
- MigrateAnyKey,
+ /// Checked on calls to IRemotelyProvisionedKeyPool::getAttestationKey
+ #[selinux(name = get_attestation_key)]
+ GetAttestationKey,
}
);
diff --git a/keystore2/src/raw_device.rs b/keystore2/src/raw_device.rs
index 0ee3db0..4ce9dce 100644
--- a/keystore2/src/raw_device.rs
+++ b/keystore2/src/raw_device.rs
@@ -16,8 +16,9 @@
use crate::{
database::{
- BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, KeyEntry, KeyEntryLoadBits,
- KeyIdGuard, KeyMetaData, KeyMetaEntry, KeyType, KeystoreDB, SubComponentType, Uuid,
+ BlobInfo, BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, KeyEntry,
+ KeyEntryLoadBits, KeyIdGuard, KeyMetaData, KeyMetaEntry, KeyType, KeystoreDB,
+ SubComponentType, Uuid,
},
error::{map_km_error, Error, ErrorCode},
globals::get_keymint_device,
@@ -125,7 +126,7 @@
key_desc,
key_type,
&key_parameters,
- &(&creation_result.keyBlob, &blob_metadata),
+ &BlobInfo::new(&creation_result.keyBlob, &blob_metadata),
&CertificateInfo::new(None, None),
&key_metadata,
&self.km_uuid,
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
index 66e1988..afbf475 100644
--- a/keystore2/src/remote_provisioning.rs
+++ b/keystore2/src/remote_provisioning.rs
@@ -30,11 +30,14 @@
};
use android_security_remoteprovisioning::aidl::android::security::remoteprovisioning::{
AttestationPoolStatus::AttestationPoolStatus, IRemoteProvisioning::BnRemoteProvisioning,
- IRemoteProvisioning::IRemoteProvisioning, ImplInfo::ImplInfo,
+ IRemoteProvisioning::IRemoteProvisioning,
+ IRemotelyProvisionedKeyPool::BnRemotelyProvisionedKeyPool,
+ IRemotelyProvisionedKeyPool::IRemotelyProvisionedKeyPool, ImplInfo::ImplInfo,
+ RemotelyProvisionedKey::RemotelyProvisionedKey,
};
use android_security_remoteprovisioning::binder::{BinderFeatures, Strong};
use android_system_keystore2::aidl::android::system::keystore2::{
- Domain::Domain, KeyDescriptor::KeyDescriptor,
+ Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode,
};
use anyhow::{Context, Result};
use keystore2_crypto::parse_subject_from_certificate;
@@ -42,11 +45,12 @@
use std::collections::BTreeMap;
use std::sync::atomic::{AtomicBool, Ordering};
-use crate::database::{CertificateChain, KeystoreDB, Uuid};
+use crate::database::{CertificateChain, KeyIdGuard, KeystoreDB, Uuid};
use crate::error::{self, map_or_log_err, map_rem_prov_error, Error};
use crate::globals::{get_keymint_device, get_remotely_provisioned_component, DB};
use crate::metrics_store::log_rkp_error_stats;
-use crate::utils::watchdog as wd;
+use crate::permission::KeystorePerm;
+use crate::utils::{check_keystore_permission, watchdog as wd};
use android_security_metrics::aidl::android::security::metrics::RkpError::RkpError as MetricsRkpError;
/// Contains helper functions to check if remote provisioning is enabled on the system and, if so,
@@ -56,6 +60,7 @@
security_level: SecurityLevel,
km_uuid: Uuid,
is_hal_present: AtomicBool,
+ is_rkp_only: bool,
}
static COSE_KEY_XCOORD: Value = Value::Integer(-2);
@@ -66,7 +71,30 @@
impl RemProvState {
/// Creates a RemProvState struct.
pub fn new(security_level: SecurityLevel, km_uuid: Uuid) -> Self {
- Self { security_level, km_uuid, is_hal_present: AtomicBool::new(true) }
+ Self {
+ security_level,
+ km_uuid,
+ is_hal_present: AtomicBool::new(true),
+ is_rkp_only: Self::read_is_rkp_only_property(security_level),
+ }
+ }
+
+ /// Returns the uuid for the KM instance attached to this RemProvState struct.
+ pub fn get_uuid(&self) -> Uuid {
+ self.km_uuid
+ }
+
+ fn read_is_rkp_only_property(security_level: SecurityLevel) -> bool {
+ let default_value = false;
+
+ let property_name = match security_level {
+ SecurityLevel::STRONGBOX => "ro.remote_provisioning.strongbox.rkp_only",
+ SecurityLevel::TRUSTED_ENVIRONMENT => "ro.remote_provisioning.tee.rkp_only",
+ _ => return default_value,
+ };
+
+ rustutils::system_properties::read_bool(property_name, default_value)
+ .unwrap_or(default_value)
}
/// Checks if remote provisioning is enabled and partially caches the result. On a hybrid system
@@ -90,70 +118,6 @@
Ok(pool_status.total != 0)
}
- /// Fetches a remote provisioning attestation key and certificate chain inside of the
- /// returned `CertificateChain` struct if one exists for the given caller_uid. If one has not
- /// been assigned, this function will assign it. If there are no signed attestation keys
- /// available to be assigned, it will return the ResponseCode `OUT_OF_KEYS`
- fn get_rem_prov_attest_key(
- &self,
- key: &KeyDescriptor,
- caller_uid: u32,
- db: &mut KeystoreDB,
- ) -> Result<Option<CertificateChain>> {
- match key.domain {
- Domain::APP => {
- // Attempt to get an Attestation Key once. If it fails, then the app doesn't
- // have a valid chain assigned to it. The helper function will return None after
- // attempting to assign a key. An error will be thrown if the pool is simply out
- // of usable keys. Then another attempt to fetch the just-assigned key will be
- // made. If this fails too, something is very wrong.
- self.get_rem_prov_attest_key_helper(key, caller_uid, db)
- .context("In get_rem_prov_attest_key: Failed to get a key")?
- .map_or_else(
- || self.get_rem_prov_attest_key_helper(key, caller_uid, db),
- |v| Ok(Some(v)),
- )
- .context(concat!(
- "In get_rem_prov_attest_key: Failed to get a key after",
- "attempting to assign one."
- ))?
- .map_or_else(
- || {
- Err(Error::sys()).context(concat!(
- "In get_rem_prov_attest_key: Attempted to assign a ",
- "key and failed silently. Something is very wrong."
- ))
- },
- |cert_chain| Ok(Some(cert_chain)),
- )
- }
- _ => Ok(None),
- }
- }
-
- /// Returns None if an AttestationKey fails to be assigned. Errors if no keys are available.
- fn get_rem_prov_attest_key_helper(
- &self,
- key: &KeyDescriptor,
- caller_uid: u32,
- db: &mut KeystoreDB,
- ) -> Result<Option<CertificateChain>> {
- let cert_chain = db
- .retrieve_attestation_key_and_cert_chain(key.domain, caller_uid as i64, &self.km_uuid)
- .context("In get_rem_prov_attest_key_helper: Failed to retrieve a key + cert chain")?;
- match cert_chain {
- Some(cert_chain) => Ok(Some(cert_chain)),
- // Either this app needs to be assigned a key, or the pool is empty. An error will
- // be thrown if there is no key available to assign. This will indicate that the app
- // should be nudged to provision more keys so keystore can retry.
- None => {
- db.assign_attestation_key(key.domain, caller_uid as i64, &self.km_uuid)
- .context("In get_rem_prov_attest_key_helper: Failed to assign a key")?;
- Ok(None)
- }
- }
- }
-
fn is_asymmetric_key(&self, params: &[KeyParameter]) -> bool {
params.iter().any(|kp| {
matches!(
@@ -181,7 +145,7 @@
caller_uid: u32,
params: &[KeyParameter],
db: &mut KeystoreDB,
- ) -> Result<Option<(AttestationKey, Certificate)>> {
+ ) -> Result<Option<(KeyIdGuard, AttestationKey, Certificate)>> {
if !self.is_asymmetric_key(params) || !self.check_rem_prov_enabled(db)? {
// There is no remote provisioning component for this security level on the
// device. Return None so the underlying KM instance knows to use its
@@ -189,20 +153,21 @@
// and therefore will not be attested.
Ok(None)
} else {
- match self.get_rem_prov_attest_key(key, caller_uid, db) {
+ match get_rem_prov_attest_key(key.domain, caller_uid, db, &self.km_uuid) {
Err(e) => {
log::error!(
- concat!(
- "In get_remote_provisioning_key_and_certs: Failed to get ",
- "attestation key. {:?}"
- ),
+ "In get_remote_provisioning_key_and_certs: Error occurred: {:?}",
e
);
+ if self.is_rkp_only {
+ return Err(e);
+ }
log_rkp_error_stats(MetricsRkpError::FALL_BACK_DURING_HYBRID);
Ok(None)
}
Ok(v) => match v {
- Some(cert_chain) => Ok(Some((
+ Some((guard, cert_chain)) => Ok(Some((
+ guard,
AttestationKey {
keyBlob: cert_chain.private_key.to_vec(),
attestKeyParams: vec![],
@@ -233,9 +198,9 @@
fn get_dev_by_sec_level(
&self,
sec_level: &SecurityLevel,
- ) -> Result<Strong<dyn IRemotelyProvisionedComponent>> {
+ ) -> Result<&dyn IRemotelyProvisionedComponent> {
if let Some(dev) = self.device_by_sec_level.get(sec_level) {
- Ok(dev.clone())
+ Ok(dev.as_ref())
} else {
Err(error::Error::sys()).context(concat!(
"In get_dev_by_sec_level: Remote instance for requested security level",
@@ -346,23 +311,21 @@
/// here.
pub fn provision_cert_chain(
&self,
+ db: &mut KeystoreDB,
public_key: &[u8],
batch_cert: &[u8],
certs: &[u8],
expiration_date: i64,
sec_level: SecurityLevel,
) -> Result<()> {
- DB.with::<_, Result<()>>(|db| {
- let mut db = db.borrow_mut();
- let (_, _, uuid) = get_keymint_device(&sec_level)?;
- db.store_signed_attestation_certificate_chain(
- public_key,
- batch_cert,
- certs, /* DER encoded certificate chain */
- expiration_date,
- &uuid,
- )
- })
+ let (_, _, uuid) = get_keymint_device(&sec_level)?;
+ db.store_signed_attestation_certificate_chain(
+ public_key,
+ batch_cert,
+ certs, /* DER encoded certificate chain */
+ expiration_date,
+ &uuid,
+ )
}
fn parse_cose_mac0_for_coords(data: &[u8]) -> Result<Vec<u8>> {
@@ -429,19 +392,25 @@
/// `is_test_mode` indicates whether or not the returned public key should be marked as being
/// for testing in order to differentiate them from private keys. If the call is successful,
/// the key pair is then added to the database.
- pub fn generate_key_pair(&self, is_test_mode: bool, sec_level: SecurityLevel) -> Result<()> {
+ pub fn generate_key_pair(
+ &self,
+ db: &mut KeystoreDB,
+ is_test_mode: bool,
+ sec_level: SecurityLevel,
+ ) -> Result<()> {
let (_, _, uuid) = get_keymint_device(&sec_level)?;
- let dev = self.get_dev_by_sec_level(&sec_level)?;
+ let dev = self.get_dev_by_sec_level(&sec_level).context(format!(
+ "In generate_key_pair: Failed to get device for security level {:?}",
+ sec_level
+ ))?;
let mut maced_key = MacedPublicKey { macedKey: Vec::new() };
let priv_key =
map_rem_prov_error(dev.generateEcdsaP256KeyPair(is_test_mode, &mut maced_key))
.context("In generate_key_pair: Failed to generated ECDSA keypair.")?;
let raw_key = Self::parse_cose_mac0_for_coords(&maced_key.macedKey)
.context("In generate_key_pair: Failed to parse raw key")?;
- DB.with::<_, Result<()>>(|db| {
- let mut db = db.borrow_mut();
- db.create_attestation_key_entry(&maced_key.macedKey, &raw_key, &priv_key, &uuid)
- })
+ db.create_attestation_key_entry(&maced_key.macedKey, &raw_key, &priv_key, &uuid)
+ .context("In generate_key_pair: Failed to insert attestation key entry")
}
/// Checks the security level of each available IRemotelyProvisionedComponent hal and returns
@@ -480,6 +449,70 @@
})
}
+/// Fetches a remote provisioning attestation key and certificate chain inside of the
+/// returned `CertificateChain` struct if one exists for the given caller_uid. If one has not
+/// been assigned, this function will assign it. If there are no signed attestation keys
+/// available to be assigned, it will return the ResponseCode `OUT_OF_KEYS`
+fn get_rem_prov_attest_key(
+ domain: Domain,
+ caller_uid: u32,
+ db: &mut KeystoreDB,
+ km_uuid: &Uuid,
+) -> Result<Option<(KeyIdGuard, CertificateChain)>> {
+ match domain {
+ Domain::APP => {
+ // Attempt to get an Attestation Key once. If it fails, then the app doesn't
+ // have a valid chain assigned to it. The helper function will return None after
+ // attempting to assign a key. An error will be thrown if the pool is simply out
+ // of usable keys. Then another attempt to fetch the just-assigned key will be
+ // made. If this fails too, something is very wrong.
+ get_rem_prov_attest_key_helper(domain, caller_uid, db, km_uuid)
+ .context("In get_rem_prov_attest_key: Failed to get a key")?
+ .map_or_else(
+ || get_rem_prov_attest_key_helper(domain, caller_uid, db, km_uuid),
+ |v| Ok(Some(v)),
+ )
+ .context(concat!(
+ "In get_rem_prov_attest_key: Failed to get a key after",
+ "attempting to assign one."
+ ))?
+ .map_or_else(
+ || {
+ Err(Error::sys()).context(concat!(
+ "In get_rem_prov_attest_key: Attempted to assign a ",
+ "key and failed silently. Something is very wrong."
+ ))
+ },
+ |(guard, cert_chain)| Ok(Some((guard, cert_chain))),
+ )
+ }
+ _ => Ok(None),
+ }
+}
+
+/// Returns None if an AttestationKey fails to be assigned. Errors if no keys are available.
+fn get_rem_prov_attest_key_helper(
+ domain: Domain,
+ caller_uid: u32,
+ db: &mut KeystoreDB,
+ km_uuid: &Uuid,
+) -> Result<Option<(KeyIdGuard, CertificateChain)>> {
+ let guard_and_chain = db
+ .retrieve_attestation_key_and_cert_chain(domain, caller_uid as i64, km_uuid)
+ .context("In get_rem_prov_attest_key_helper: Failed to retrieve a key + cert chain")?;
+ match guard_and_chain {
+ Some((guard, cert_chain)) => Ok(Some((guard, cert_chain))),
+ // Either this app needs to be assigned a key, or the pool is empty. An error will
+ // be thrown if there is no key available to assign. This will indicate that the app
+ // should be nudged to provision more keys so keystore can retry.
+ None => {
+ db.assign_attestation_key(domain, caller_uid as i64, km_uuid)
+ .context("In get_rem_prov_attest_key_helper: Failed to assign a key")?;
+ Ok(None)
+ }
+ }
+}
+
impl binder::Interface for RemoteProvisioningService {}
// Implementation of IRemoteProvisioning. See AIDL spec at
@@ -489,7 +522,7 @@
&self,
expired_by: i64,
sec_level: SecurityLevel,
- ) -> binder::public_api::Result<AttestationPoolStatus> {
+ ) -> binder::Result<AttestationPoolStatus> {
let _wp = wd::watch_millis("IRemoteProvisioning::getPoolStatus", 500);
map_or_log_err(get_pool_status(expired_by, sec_level), Ok)
}
@@ -503,7 +536,7 @@
sec_level: SecurityLevel,
protected_data: &mut ProtectedData,
device_info: &mut DeviceInfo,
- ) -> binder::public_api::Result<Vec<u8>> {
+ ) -> binder::Result<Vec<u8>> {
let _wp = wd::watch_millis("IRemoteProvisioning::generateCsr", 500);
map_or_log_err(
self.generate_csr(
@@ -526,39 +559,242 @@
certs: &[u8],
expiration_date: i64,
sec_level: SecurityLevel,
- ) -> binder::public_api::Result<()> {
+ ) -> binder::Result<()> {
let _wp = wd::watch_millis("IRemoteProvisioning::provisionCertChain", 500);
- map_or_log_err(
- self.provision_cert_chain(public_key, batch_cert, certs, expiration_date, sec_level),
- Ok,
- )
+ DB.with::<_, binder::Result<()>>(|db| {
+ map_or_log_err(
+ self.provision_cert_chain(
+ &mut db.borrow_mut(),
+ public_key,
+ batch_cert,
+ certs,
+ expiration_date,
+ sec_level,
+ ),
+ Ok,
+ )
+ })
}
- fn generateKeyPair(
- &self,
- is_test_mode: bool,
- sec_level: SecurityLevel,
- ) -> binder::public_api::Result<()> {
+ fn generateKeyPair(&self, is_test_mode: bool, sec_level: SecurityLevel) -> binder::Result<()> {
let _wp = wd::watch_millis("IRemoteProvisioning::generateKeyPair", 500);
- map_or_log_err(self.generate_key_pair(is_test_mode, sec_level), Ok)
+ DB.with::<_, binder::Result<()>>(|db| {
+ map_or_log_err(
+ self.generate_key_pair(&mut db.borrow_mut(), is_test_mode, sec_level),
+ Ok,
+ )
+ })
}
- fn getImplementationInfo(&self) -> binder::public_api::Result<Vec<ImplInfo>> {
+ fn getImplementationInfo(&self) -> binder::Result<Vec<ImplInfo>> {
let _wp = wd::watch_millis("IRemoteProvisioning::getSecurityLevels", 500);
map_or_log_err(self.get_implementation_info(), Ok)
}
- fn deleteAllKeys(&self) -> binder::public_api::Result<i64> {
+ fn deleteAllKeys(&self) -> binder::Result<i64> {
let _wp = wd::watch_millis("IRemoteProvisioning::deleteAllKeys", 500);
map_or_log_err(self.delete_all_keys(), Ok)
}
}
+/// Implementation of the IRemotelyProvisionedKeyPool service.
+#[derive(Default)]
+pub struct RemotelyProvisionedKeyPoolService {
+ unique_id_to_sec_level: HashMap<String, SecurityLevel>,
+}
+
+impl RemotelyProvisionedKeyPoolService {
+ /// Fetches a remotely provisioned certificate chain and key for the given client uid that
+ /// was provisioned using the IRemotelyProvisionedComponent with the given id. The same key
+ /// will be returned for a given caller_uid on every request. If there are no attestation keys
+ /// available, `OUT_OF_KEYS` is returned.
+ fn get_attestation_key(
+ &self,
+ db: &mut KeystoreDB,
+ caller_uid: i32,
+ irpc_id: &str,
+ ) -> Result<RemotelyProvisionedKey> {
+ log::info!("get_attestation_key(self, {}, {}", caller_uid, irpc_id);
+
+ let sec_level = self
+ .unique_id_to_sec_level
+ .get(irpc_id)
+ .ok_or(Error::Rc(ResponseCode::INVALID_ARGUMENT))
+ .context(format!("In get_attestation_key: unknown irpc id '{}'", irpc_id))?;
+ let (_, _, km_uuid) = get_keymint_device(sec_level)?;
+
+ let guard_and_cert_chain =
+ get_rem_prov_attest_key(Domain::APP, caller_uid as u32, db, &km_uuid)
+ .context("In get_attestation_key")?;
+ match guard_and_cert_chain {
+ Some((_, chain)) => Ok(RemotelyProvisionedKey {
+ keyBlob: chain.private_key.to_vec(),
+ encodedCertChain: chain.cert_chain,
+ }),
+ // It should be impossible to get `None`, but handle it just in case as a
+ // precaution against future behavioral changes in `get_rem_prov_attest_key`.
+ None => Err(error::Error::Rc(ResponseCode::OUT_OF_KEYS))
+ .context("In get_attestation_key: No available attestation keys"),
+ }
+ }
+
+ /// Creates a new instance of the remotely provisioned key pool service, used for fetching
+ /// remotely provisioned attestation keys.
+ pub fn new_native_binder() -> Result<Strong<dyn IRemotelyProvisionedKeyPool>> {
+ let mut result: Self = Default::default();
+
+ let dev = get_remotely_provisioned_component(&SecurityLevel::TRUSTED_ENVIRONMENT)
+ .context("In new_native_binder: Failed to get TEE Remote Provisioner instance.")?;
+ if let Some(id) = dev.getHardwareInfo()?.uniqueId {
+ result.unique_id_to_sec_level.insert(id, SecurityLevel::TRUSTED_ENVIRONMENT);
+ }
+
+ if let Ok(dev) = get_remotely_provisioned_component(&SecurityLevel::STRONGBOX) {
+ if let Some(id) = dev.getHardwareInfo()?.uniqueId {
+ if result.unique_id_to_sec_level.contains_key(&id) {
+ anyhow::bail!("In new_native_binder: duplicate irpc id found: '{}'", id)
+ }
+ result.unique_id_to_sec_level.insert(id, SecurityLevel::STRONGBOX);
+ }
+ }
+
+ // If none of the remotely provisioned components have unique ids, then we shouldn't
+ // bother publishing the service, as it's impossible to match keys with their backends.
+ if result.unique_id_to_sec_level.is_empty() {
+ anyhow::bail!(
+ "In new_native_binder: No remotely provisioned components have unique ids"
+ )
+ }
+
+ Ok(BnRemotelyProvisionedKeyPool::new_binder(
+ result,
+ BinderFeatures { set_requesting_sid: true, ..BinderFeatures::default() },
+ ))
+ }
+}
+
+impl binder::Interface for RemotelyProvisionedKeyPoolService {}
+
+// Implementation of IRemotelyProvisionedKeyPool. See AIDL spec at
+// :aidl/android/security/remoteprovisioning/IRemotelyProvisionedKeyPool.aidl
+impl IRemotelyProvisionedKeyPool for RemotelyProvisionedKeyPoolService {
+ fn getAttestationKey(
+ &self,
+ caller_uid: i32,
+ irpc_id: &str,
+ ) -> binder::Result<RemotelyProvisionedKey> {
+ let _wp = wd::watch_millis("IRemotelyProvisionedKeyPool::getAttestationKey", 500);
+ map_or_log_err(check_keystore_permission(KeystorePerm::GetAttestationKey), Ok)?;
+ DB.with::<_, binder::Result<RemotelyProvisionedKey>>(|db| {
+ map_or_log_err(self.get_attestation_key(&mut db.borrow_mut(), caller_uid, irpc_id), Ok)
+ })
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
use serde_cbor::Value;
use std::collections::BTreeMap;
+ use std::sync::{Arc, Mutex};
+ use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ RpcHardwareInfo::RpcHardwareInfo,
+ };
+
+ #[derive(Default)]
+ struct MockRemotelyProvisionedComponentValues {
+ hw_info: RpcHardwareInfo,
+ private_key: Vec<u8>,
+ maced_public_key: Vec<u8>,
+ }
+
+ // binder::Interface requires the Send trait, so we have to use a Mutex even though the test
+ // is single threaded.
+ #[derive(Default)]
+ struct MockRemotelyProvisionedComponent(Arc<Mutex<MockRemotelyProvisionedComponentValues>>);
+
+ impl binder::Interface for MockRemotelyProvisionedComponent {}
+
+ impl IRemotelyProvisionedComponent for MockRemotelyProvisionedComponent {
+ fn getHardwareInfo(&self) -> binder::Result<RpcHardwareInfo> {
+ Ok(self.0.lock().unwrap().hw_info.clone())
+ }
+
+ fn generateEcdsaP256KeyPair(
+ &self,
+ test_mode: bool,
+ maced_public_key: &mut MacedPublicKey,
+ ) -> binder::Result<Vec<u8>> {
+ assert!(test_mode);
+ maced_public_key.macedKey = self.0.lock().unwrap().maced_public_key.clone();
+ Ok(self.0.lock().unwrap().private_key.clone())
+ }
+
+ fn generateCertificateRequest(
+ &self,
+ _test_mode: bool,
+ _keys_to_sign: &[MacedPublicKey],
+ _eek: &[u8],
+ _challenge: &[u8],
+ _device_info: &mut DeviceInfo,
+ _protected_data: &mut ProtectedData,
+ ) -> binder::Result<Vec<u8>> {
+ Err(binder::StatusCode::INVALID_OPERATION.into())
+ }
+ }
+
+ // Hard coded cert that can be parsed -- the content doesn't matter for testing, only that it's valid.
+ fn get_fake_cert() -> Vec<u8> {
+ vec![
+ 0x30, 0x82, 0x01, 0xbb, 0x30, 0x82, 0x01, 0x61, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02,
+ 0x14, 0x3a, 0xd5, 0x67, 0xce, 0xfe, 0x93, 0xe1, 0xea, 0xb7, 0xe4, 0xbf, 0x64, 0x19,
+ 0xa4, 0x11, 0xe1, 0x87, 0x40, 0x20, 0x37, 0x30, 0x0a, 0x06, 0x08, 0x2a, 0x86, 0x48,
+ 0xce, 0x3d, 0x04, 0x03, 0x02, 0x30, 0x33, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55,
+ 0x04, 0x06, 0x13, 0x02, 0x55, 0x54, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04,
+ 0x08, 0x0c, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31,
+ 0x0f, 0x30, 0x0d, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x06, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x30, 0x1e, 0x17, 0x0d, 0x32, 0x31, 0x31, 0x32, 0x31, 0x30, 0x32, 0x32,
+ 0x30, 0x38, 0x35, 0x32, 0x5a, 0x17, 0x0d, 0x34, 0x39, 0x30, 0x34, 0x32, 0x36, 0x32,
+ 0x32, 0x30, 0x38, 0x35, 0x32, 0x5a, 0x30, 0x33, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03,
+ 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, 0x54, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55,
+ 0x04, 0x08, 0x0c, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x31, 0x0f, 0x30, 0x0d, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x06, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x30, 0x59, 0x30, 0x13, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d,
+ 0x02, 0x01, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, 0x03, 0x42,
+ 0x00, 0x04, 0x1e, 0xac, 0x0c, 0xe0, 0x0d, 0xc5, 0x25, 0x84, 0x1b, 0xd2, 0x77, 0x2d,
+ 0xe7, 0xba, 0xf1, 0xde, 0xa7, 0xf6, 0x39, 0x7f, 0x38, 0x91, 0xbf, 0xa4, 0x58, 0xf5,
+ 0x62, 0x6b, 0xce, 0x06, 0xcf, 0xb9, 0x73, 0x91, 0x0d, 0x8a, 0x60, 0xa0, 0xc6, 0xa2,
+ 0x22, 0xe6, 0x51, 0x2e, 0x58, 0xd6, 0x43, 0x02, 0x80, 0x43, 0x44, 0x29, 0x38, 0x9a,
+ 0x99, 0xf3, 0xa4, 0xdd, 0xd0, 0xb4, 0x6f, 0x8b, 0x44, 0x2d, 0xa3, 0x53, 0x30, 0x51,
+ 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0xdb, 0x13, 0x68,
+ 0xe0, 0x0e, 0x47, 0x10, 0xf8, 0xcb, 0x88, 0x83, 0xfe, 0x42, 0x3c, 0xd9, 0x3f, 0x1a,
+ 0x33, 0xe9, 0xaa, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16,
+ 0x80, 0x14, 0xdb, 0x13, 0x68, 0xe0, 0x0e, 0x47, 0x10, 0xf8, 0xcb, 0x88, 0x83, 0xfe,
+ 0x42, 0x3c, 0xd9, 0x3f, 0x1a, 0x33, 0xe9, 0xaa, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x1d,
+ 0x13, 0x01, 0x01, 0xff, 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0a, 0x06,
+ 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x03, 0x02, 0x03, 0x48, 0x00, 0x30, 0x45,
+ 0x02, 0x20, 0x10, 0xdf, 0x40, 0xc3, 0x20, 0x54, 0x36, 0xb5, 0xc9, 0x3c, 0x70, 0xe3,
+ 0x55, 0x37, 0xd2, 0x04, 0x51, 0xeb, 0x0f, 0x18, 0x83, 0xd0, 0x58, 0xa1, 0x08, 0x77,
+ 0x8d, 0x4d, 0xa4, 0x20, 0xee, 0x33, 0x02, 0x21, 0x00, 0x8d, 0xe3, 0xa6, 0x6c, 0x0d,
+ 0x86, 0x25, 0xdc, 0x59, 0x0d, 0x21, 0x43, 0x22, 0x3a, 0xb9, 0xa1, 0x73, 0x28, 0xc9,
+ 0x16, 0x9e, 0x91, 0x15, 0xc4, 0xc3, 0xd7, 0xeb, 0xe5, 0xce, 0xdc, 0x1c, 0x1b,
+ ]
+ }
+
+ // Generate a fake COSE_Mac0 with a key that's just `byte` repeated
+ fn generate_maced_pubkey(byte: u8) -> Vec<u8> {
+ vec![
+ 0x84, 0x43, 0xA1, 0x01, 0x05, 0xA0, 0x58, 0x4D, 0xA5, 0x01, 0x02, 0x03, 0x26, 0x20,
+ 0x01, 0x21, 0x58, 0x20, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte,
+ byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte,
+ byte, byte, byte, byte, byte, byte, byte, byte, 0x22, 0x58, 0x20, byte, byte, byte,
+ byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte,
+ byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte,
+ byte, 0x58, 0x20, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte,
+ byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte, byte,
+ byte, byte, byte, byte, byte, byte, byte,
+ ]
+ }
#[test]
fn test_parse_cose_mac0_for_coords_raw_bytes() -> Result<()> {
@@ -657,4 +893,182 @@
assert!(extracted_payload.is_err());
Ok(())
}
+
+ #[test]
+ #[ignore] // b/215746308
+ fn test_get_attestation_key_no_keys_provisioned() {
+ let mut db = crate::database::tests::new_test_db().unwrap();
+ let mock_rpc = Box::<MockRemotelyProvisionedComponent>::default();
+ mock_rpc.0.lock().unwrap().hw_info.uniqueId = Some(String::from("mallory"));
+
+ let mut service: RemotelyProvisionedKeyPoolService = Default::default();
+ service
+ .unique_id_to_sec_level
+ .insert(String::from("mallory"), SecurityLevel::TRUSTED_ENVIRONMENT);
+
+ assert_eq!(
+ service
+ .get_attestation_key(&mut db, 0, "mallory")
+ .unwrap_err()
+ .downcast::<error::Error>()
+ .unwrap(),
+ error::Error::Rc(ResponseCode::OUT_OF_KEYS)
+ );
+ }
+
+ #[test]
+ #[ignore] // b/215746308
+ fn test_get_attestation_key() {
+ let mut db = crate::database::tests::new_test_db().unwrap();
+ let sec_level = SecurityLevel::TRUSTED_ENVIRONMENT;
+ let irpc_id = "paul";
+ let caller_uid = 0;
+
+ let mock_rpc = Box::<MockRemotelyProvisionedComponent>::default();
+ let mock_values = mock_rpc.0.clone();
+ let mut remote_provisioning: RemoteProvisioningService = Default::default();
+ remote_provisioning.device_by_sec_level.insert(sec_level, Strong::new(mock_rpc));
+ let mut key_pool: RemotelyProvisionedKeyPoolService = Default::default();
+ key_pool.unique_id_to_sec_level.insert(String::from(irpc_id), sec_level);
+
+ mock_values.lock().unwrap().hw_info.uniqueId = Some(String::from(irpc_id));
+ mock_values.lock().unwrap().private_key = vec![8, 6, 7, 5, 3, 0, 9];
+ mock_values.lock().unwrap().maced_public_key = generate_maced_pubkey(0x11);
+ remote_provisioning.generate_key_pair(&mut db, true, sec_level).unwrap();
+
+ let public_key = RemoteProvisioningService::parse_cose_mac0_for_coords(
+ mock_values.lock().unwrap().maced_public_key.as_slice(),
+ )
+ .unwrap();
+ let batch_cert = get_fake_cert();
+ let certs = &[5, 6, 7, 8];
+ assert!(remote_provisioning
+ .provision_cert_chain(
+ &mut db,
+ public_key.as_slice(),
+ batch_cert.as_slice(),
+ certs,
+ 0,
+ sec_level
+ )
+ .is_ok());
+
+ // ensure we got the key we expected
+ let first_key = key_pool
+ .get_attestation_key(&mut db, caller_uid, irpc_id)
+ .context("get first key")
+ .unwrap();
+ assert_eq!(first_key.keyBlob, mock_values.lock().unwrap().private_key);
+ assert_eq!(first_key.encodedCertChain, certs);
+
+ // ensure that multiple calls get the same key
+ assert_eq!(
+ first_key,
+ key_pool
+ .get_attestation_key(&mut db, caller_uid, irpc_id)
+ .context("get second key")
+ .unwrap()
+ );
+
+ // no more keys for new clients
+ assert_eq!(
+ key_pool
+ .get_attestation_key(&mut db, caller_uid + 1, irpc_id)
+ .unwrap_err()
+ .downcast::<error::Error>()
+ .unwrap(),
+ error::Error::Rc(ResponseCode::OUT_OF_KEYS)
+ );
+ }
+
+ #[test]
+ #[ignore] // b/215746308
+ fn test_get_attestation_key_gets_different_key_for_different_client() {
+ let mut db = crate::database::tests::new_test_db().unwrap();
+ let sec_level = SecurityLevel::TRUSTED_ENVIRONMENT;
+ let irpc_id = "ringo";
+ let first_caller = 0;
+ let second_caller = first_caller + 1;
+
+ let mock_rpc = Box::<MockRemotelyProvisionedComponent>::default();
+ let mock_values = mock_rpc.0.clone();
+ let mut remote_provisioning: RemoteProvisioningService = Default::default();
+ remote_provisioning.device_by_sec_level.insert(sec_level, Strong::new(mock_rpc));
+ let mut key_pool: RemotelyProvisionedKeyPoolService = Default::default();
+ key_pool.unique_id_to_sec_level.insert(String::from(irpc_id), sec_level);
+
+ // generate two distinct keys and provision them with certs
+ mock_values.lock().unwrap().hw_info.uniqueId = Some(String::from(irpc_id));
+ mock_values.lock().unwrap().private_key = vec![3, 1, 4, 1, 5];
+ mock_values.lock().unwrap().maced_public_key = generate_maced_pubkey(0x11);
+ assert!(remote_provisioning.generate_key_pair(&mut db, true, sec_level).is_ok());
+ let public_key = RemoteProvisioningService::parse_cose_mac0_for_coords(
+ mock_values.lock().unwrap().maced_public_key.as_slice(),
+ )
+ .unwrap();
+ assert!(remote_provisioning
+ .provision_cert_chain(
+ &mut db,
+ public_key.as_slice(),
+ get_fake_cert().as_slice(),
+ &[1],
+ 0,
+ sec_level
+ )
+ .is_ok());
+
+ mock_values.lock().unwrap().hw_info.uniqueId = Some(String::from(irpc_id));
+ mock_values.lock().unwrap().private_key = vec![9, 0, 2, 1, 0];
+ mock_values.lock().unwrap().maced_public_key = generate_maced_pubkey(0x22);
+ assert!(remote_provisioning.generate_key_pair(&mut db, true, sec_level).is_ok());
+ let public_key = RemoteProvisioningService::parse_cose_mac0_for_coords(
+ mock_values.lock().unwrap().maced_public_key.as_slice(),
+ )
+ .unwrap();
+ assert!(remote_provisioning
+ .provision_cert_chain(
+ &mut db,
+ public_key.as_slice(),
+ get_fake_cert().as_slice(),
+ &[2],
+ 0,
+ sec_level
+ )
+ .is_ok());
+
+ // make sure each caller gets a distinct key
+ assert_ne!(
+ key_pool
+ .get_attestation_key(&mut db, first_caller, irpc_id)
+ .context("get first key")
+ .unwrap(),
+ key_pool
+ .get_attestation_key(&mut db, second_caller, irpc_id)
+ .context("get second key")
+ .unwrap()
+ );
+
+ // repeated calls should return the same key for a given caller
+ assert_eq!(
+ key_pool
+ .get_attestation_key(&mut db, first_caller, irpc_id)
+ .context("first caller a")
+ .unwrap(),
+ key_pool
+ .get_attestation_key(&mut db, first_caller, irpc_id)
+ .context("first caller b")
+ .unwrap(),
+ );
+
+ assert_eq!(
+ key_pool
+ .get_attestation_key(&mut db, second_caller, irpc_id)
+ .context("second caller a")
+ .unwrap(),
+ key_pool
+ .get_attestation_key(&mut db, second_caller, irpc_id)
+ .context("second caller b")
+ .unwrap()
+ );
+ }
}
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index 31bf294..28de1ec 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -18,16 +18,17 @@
use crate::audit_log::{
log_key_deleted, log_key_generated, log_key_imported, log_key_integrity_violation,
};
-use crate::database::{CertificateInfo, KeyIdGuard};
+use crate::database::{BlobInfo, CertificateInfo, KeyIdGuard};
use crate::error::{self, map_km_error, map_or_log_err, Error, ErrorCode};
-use crate::globals::{DB, ENFORCEMENTS, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::globals::{DB, ENFORCEMENTS, LEGACY_IMPORTER, SUPER_KEY};
use crate::key_parameter::KeyParameter as KsKeyParam;
use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
use crate::metrics_store::log_key_creation_event_stats;
use crate::remote_provisioning::RemProvState;
use crate::super_key::{KeyBlob, SuperKeyManager};
use crate::utils::{
- check_device_attestation_permissions, check_key_permission, is_device_id_attestation_tag,
+ check_device_attestation_permissions, check_key_permission,
+ check_unique_id_attestation_permissions, is_device_id_attestation_tag,
key_characteristics_to_internal, uid_to_android_user, watchdog as wd,
};
use crate::{
@@ -132,8 +133,7 @@
_ => Some(
certificate_chain
.iter()
- .map(|c| c.encodedCertificate.iter())
- .flatten()
+ .flat_map(|c| c.encodedCertificate.iter())
.copied()
.collect(),
),
@@ -160,9 +160,11 @@
let mut db = db.borrow_mut();
let (key_blob, mut blob_metadata) = SUPER_KEY
+ .read()
+ .unwrap()
.handle_super_encryption_on_key_init(
&mut db,
- &LEGACY_MIGRATOR,
+ &LEGACY_IMPORTER,
&(key.domain),
&key_parameters,
flags,
@@ -180,7 +182,7 @@
&key,
KeyType::Client,
&key_parameters,
- &(&key_blob, &blob_metadata),
+ &BlobInfo::new(&key_blob, &blob_metadata),
&cert_info,
&key_metadata,
&self.km_uuid,
@@ -241,9 +243,13 @@
)
}
_ => {
+ let super_key = SUPER_KEY
+ .read()
+ .unwrap()
+ .get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
let (key_id_guard, mut key_entry) = DB
.with::<_, Result<(KeyIdGuard, KeyEntry)>>(|db| {
- LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+ LEGACY_IMPORTER.with_try_import(key, caller_uid, super_key, || {
db.borrow_mut().load_key_entry(
key,
KeyType::Client,
@@ -303,6 +309,8 @@
.context("In create_operation.")?;
let km_blob = SUPER_KEY
+ .read()
+ .unwrap()
.unwrap_key_if_required(&blob_metadata, km_blob)
.context("In create_operation. Failed to handle super encryption.")?;
@@ -311,7 +319,7 @@
&*self.keymint,
key_id_guard,
&km_blob,
- &blob_metadata,
+ blob_metadata.km_uuid().copied(),
operation_parameters,
|blob| loop {
match map_km_error({
@@ -368,7 +376,7 @@
}
};
- let op_binder: binder::public_api::Strong<dyn IKeystoreOperation> =
+ let op_binder: binder::Strong<dyn IKeystoreOperation> =
KeystoreOperation::new_native_binder(operation)
.as_binder()
.into_interface()
@@ -445,10 +453,14 @@
}
if params.iter().any(|kp| kp.tag == Tag::INCLUDE_UNIQUE_ID) {
- check_key_permission(KeyPerm::GenUniqueId, key, &None).context(concat!(
- "In add_required_parameters: ",
- "Caller does not have the permission to generate a unique ID"
- ))?;
+ if check_key_permission(KeyPerm::GenUniqueId, key, &None).is_err()
+ && check_unique_id_attestation_permissions().is_err()
+ {
+ return Err(Error::perm()).context(
+ "In add_required_parameters: \
+ Caller does not have the permission to generate a unique ID",
+ );
+ }
if self.id_rotation_state.had_factory_reset_since_id_rotation().context(
"In add_required_parameters: Call to had_factory_reset_since_id_rotation failed.",
)? {
@@ -549,7 +561,7 @@
&*self.keymint,
Some(key_id_guard),
&KeyBlob::Ref(&blob),
- &blob_metadata,
+ blob_metadata.km_uuid().copied(),
¶ms,
|blob| {
let attest_key = Some(AttestationKey {
@@ -571,23 +583,40 @@
)
.context("In generate_key: Using user generated attestation key.")
.map(|(result, _)| result),
- Some(AttestationKeyInfo::RemoteProvisioned { attestation_key, attestation_certs }) => {
- map_km_error({
- let _wp = self.watch_millis(
- concat!(
- "In KeystoreSecurityLevel::generate_key (RemoteProvisioned): ",
- "calling generate_key.",
- ),
- 5000, // Generate can take a little longer.
- );
- self.keymint.generateKey(¶ms, Some(&attestation_key))
- })
+ Some(AttestationKeyInfo::RemoteProvisioned {
+ key_id_guard,
+ attestation_key,
+ attestation_certs,
+ }) => self
+ .upgrade_keyblob_if_required_with(
+ &*self.keymint,
+ Some(key_id_guard),
+ &KeyBlob::Ref(&attestation_key.keyBlob),
+ Some(self.rem_prov_state.get_uuid()),
+ &[],
+ |blob| {
+ map_km_error({
+ let _wp = self.watch_millis(
+ concat!(
+ "In KeystoreSecurityLevel::generate_key (RemoteProvisioned): ",
+ "calling generate_key.",
+ ),
+ 5000, // Generate can take a little longer.
+ );
+ let dynamic_attest_key = Some(AttestationKey {
+ keyBlob: blob.to_vec(),
+ attestKeyParams: vec![],
+ issuerSubjectName: attestation_key.issuerSubjectName.clone(),
+ });
+ self.keymint.generateKey(¶ms, dynamic_attest_key.as_ref())
+ })
+ },
+ )
.context("While generating Key with remote provisioned attestation key.")
- .map(|mut creation_result| {
- creation_result.certificateChain.push(attestation_certs);
- creation_result
- })
- }
+ .map(|(mut result, _)| {
+ result.certificateChain.push(attestation_certs);
+ result
+ }),
None => map_km_error({
let _wp = self.watch_millis(
concat!(
@@ -717,9 +746,11 @@
// Import_wrapped_key requires the rebind permission for the new key.
check_key_permission(KeyPerm::Rebind, &key, &None).context("In import_wrapped_key.")?;
+ let super_key = SUPER_KEY.read().unwrap().get_per_boot_key_by_user_id(user_id);
+
let (wrapping_key_id_guard, mut wrapping_key_entry) = DB
.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_IMPORTER.with_try_import(&key, caller_uid, super_key, || {
db.borrow_mut().load_key_entry(
wrapping_key,
KeyType::Client,
@@ -736,8 +767,11 @@
.ok_or_else(error::Error::sys)
.context("No km_blob after successfully loading key. This should never happen.")?;
- let wrapping_key_blob =
- SUPER_KEY.unwrap_key_if_required(&wrapping_blob_metadata, &wrapping_key_blob).context(
+ let wrapping_key_blob = SUPER_KEY
+ .read()
+ .unwrap()
+ .unwrap_key_if_required(&wrapping_blob_metadata, &wrapping_key_blob)
+ .context(
"In import_wrapped_key. Failed to handle super encryption for wrapping key.",
)?;
@@ -768,7 +802,7 @@
&*self.keymint,
Some(wrapping_key_id_guard),
&wrapping_key_blob,
- &wrapping_blob_metadata,
+ wrapping_blob_metadata.km_uuid().copied(),
&[],
|wrapping_blob| {
let _wp = self.watch_millis(
@@ -794,7 +828,7 @@
fn store_upgraded_keyblob(
key_id_guard: KeyIdGuard,
- km_uuid: Option<&Uuid>,
+ km_uuid: Option<Uuid>,
key_blob: &KeyBlob,
upgraded_blob: &[u8],
) -> Result<()> {
@@ -804,7 +838,7 @@
let mut new_blob_metadata = new_blob_metadata.unwrap_or_default();
if let Some(uuid) = km_uuid {
- new_blob_metadata.add(BlobMetaEntry::KmUuid(*uuid));
+ new_blob_metadata.add(BlobMetaEntry::KmUuid(uuid));
}
DB.with(|db| {
@@ -822,69 +856,46 @@
fn upgrade_keyblob_if_required_with<T, F>(
&self,
km_dev: &dyn IKeyMintDevice,
- key_id_guard: Option<KeyIdGuard>,
+ mut key_id_guard: Option<KeyIdGuard>,
key_blob: &KeyBlob,
- blob_metadata: &BlobMetaData,
+ km_uuid: Option<Uuid>,
params: &[KeyParameter],
f: F,
) -> Result<(T, Option<Vec<u8>>)>
where
F: Fn(&[u8]) -> Result<T, Error>,
{
- match f(key_blob) {
- Err(Error::Km(ErrorCode::KEY_REQUIRES_UPGRADE)) => {
- let upgraded_blob = {
- let _wp = self.watch_millis(
- concat!(
- "In KeystoreSecurityLevel::upgrade_keyblob_if_required_with: ",
- "calling upgradeKey."
- ),
- 500,
- );
- map_km_error(km_dev.upgradeKey(key_blob, params))
- }
- .context("In upgrade_keyblob_if_required_with: Upgrade failed.")?;
-
- if let Some(kid) = key_id_guard {
- Self::store_upgraded_keyblob(
- kid,
- blob_metadata.km_uuid(),
- key_blob,
- &upgraded_blob,
- )
- .context(
+ let (v, upgraded_blob) = crate::utils::upgrade_keyblob_if_required_with(
+ km_dev,
+ key_blob,
+ params,
+ f,
+ |upgraded_blob| {
+ if key_id_guard.is_some() {
+ // Unwrap cannot panic, because the is_some was true.
+ let kid = key_id_guard.take().unwrap();
+ Self::store_upgraded_keyblob(kid, km_uuid, key_blob, upgraded_blob).context(
"In upgrade_keyblob_if_required_with: store_upgraded_keyblob failed",
- )?;
+ )
+ } else {
+ Ok(())
}
+ },
+ )
+ .context("In KeystoreSecurityLevel::upgrade_keyblob_if_required_with.")?;
- match f(&upgraded_blob) {
- Ok(v) => Ok((v, Some(upgraded_blob))),
- Err(e) => Err(e).context(concat!(
- "In upgrade_keyblob_if_required_with: ",
- "Failed to perform operation on second try."
- )),
- }
- }
- result => {
- if let Some(kid) = key_id_guard {
- if key_blob.force_reencrypt() {
- Self::store_upgraded_keyblob(
- kid,
- blob_metadata.km_uuid(),
- key_blob,
- key_blob,
- )
- .context(concat!(
- "In upgrade_keyblob_if_required_with: ",
- "store_upgraded_keyblob failed in forced reencrypt"
- ))?;
- }
- }
- result
- .map(|v| (v, None))
- .context("In upgrade_keyblob_if_required_with: Called closure failed.")
+ // If no upgrade was needed, use the opportunity to reencrypt the blob if required
+ // and if the a key_id_guard is held. Note: key_id_guard can only be Some if no
+ // upgrade was performed above and if one was given in the first place.
+ if key_blob.force_reencrypt() {
+ if let Some(kid) = key_id_guard {
+ Self::store_upgraded_keyblob(kid, km_uuid, key_blob, key_blob).context(concat!(
+ "In upgrade_keyblob_if_required_with: ",
+ "store_upgraded_keyblob failed in forced reencrypt"
+ ))?;
}
}
+ Ok((v, upgraded_blob))
}
fn convert_storage_key_to_ephemeral(
@@ -985,7 +996,7 @@
key: &KeyDescriptor,
operation_parameters: &[KeyParameter],
forced: bool,
- ) -> binder::public_api::Result<CreateOperationResponse> {
+ ) -> binder::Result<CreateOperationResponse> {
let _wp = self.watch_millis("IKeystoreSecurityLevel::createOperation", 500);
map_or_log_err(self.create_operation(key, operation_parameters, forced), Ok)
}
@@ -996,7 +1007,7 @@
params: &[KeyParameter],
flags: i32,
entropy: &[u8],
- ) -> binder::public_api::Result<KeyMetadata> {
+ ) -> binder::Result<KeyMetadata> {
// Duration is set to 5 seconds, because generateKey - especially for RSA keys, takes more
// time than other operations
let _wp = self.watch_millis("IKeystoreSecurityLevel::generateKey", 5000);
@@ -1012,7 +1023,7 @@
params: &[KeyParameter],
flags: i32,
key_data: &[u8],
- ) -> binder::public_api::Result<KeyMetadata> {
+ ) -> binder::Result<KeyMetadata> {
let _wp = self.watch_millis("IKeystoreSecurityLevel::importKey", 500);
let result = self.import_key(key, attestation_key, params, flags, key_data);
log_key_creation_event_stats(self.security_level, params, &result);
@@ -1026,7 +1037,7 @@
masking_key: Option<&[u8]>,
params: &[KeyParameter],
authenticators: &[AuthenticatorSpec],
- ) -> binder::public_api::Result<KeyMetadata> {
+ ) -> binder::Result<KeyMetadata> {
let _wp = self.watch_millis("IKeystoreSecurityLevel::importWrappedKey", 500);
let result =
self.import_wrapped_key(key, wrapping_key, masking_key, params, authenticators);
@@ -1037,11 +1048,11 @@
fn convertStorageKeyToEphemeral(
&self,
storage_key: &KeyDescriptor,
- ) -> binder::public_api::Result<EphemeralStorageKeyResponse> {
+ ) -> binder::Result<EphemeralStorageKeyResponse> {
let _wp = self.watch_millis("IKeystoreSecurityLevel::convertStorageKeyToEphemeral", 500);
map_or_log_err(self.convert_storage_key_to_ephemeral(storage_key), Ok)
}
- fn deleteKey(&self, key: &KeyDescriptor) -> binder::public_api::Result<()> {
+ fn deleteKey(&self, key: &KeyDescriptor) -> binder::Result<()> {
let _wp = self.watch_millis("IKeystoreSecurityLevel::deleteKey", 500);
let result = self.delete_key(key);
log_key_deleted(key, ThreadState::get_calling_uid(), result.is_ok());
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index 13723f0..79e7692 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -22,11 +22,11 @@
use crate::security_level::KeystoreSecurityLevel;
use crate::utils::{
check_grant_permission, check_key_permission, check_keystore_permission,
- key_parameters_to_authorizations, list_key_entries, watchdog as wd,
+ key_parameters_to_authorizations, list_key_entries, uid_to_android_user, watchdog as wd,
};
use crate::{
database::Uuid,
- globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_MIGRATOR},
+ globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_IMPORTER, SUPER_KEY},
};
use crate::{database::KEYSTORE_UUID, permission};
use crate::{
@@ -81,7 +81,7 @@
}
let uuid_by_sec_level = result.uuid_by_sec_level.clone();
- LEGACY_MIGRATOR
+ LEGACY_IMPORTER
.set_init(move || {
(create_thread_local_db(), uuid_by_sec_level, LEGACY_BLOB_LOADER.clone())
})
@@ -130,9 +130,13 @@
fn get_key_entry(&self, key: &KeyDescriptor) -> Result<KeyEntryResponse> {
let caller_uid = ThreadState::get_calling_uid();
+
+ let super_key =
+ SUPER_KEY.read().unwrap().get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
+
let (key_id_guard, mut key_entry) = DB
.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+ LEGACY_IMPORTER.with_try_import(key, caller_uid, super_key, || {
db.borrow_mut().load_key_entry(
key,
KeyType::Client,
@@ -182,8 +186,11 @@
certificate_chain: Option<&[u8]>,
) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
+ let super_key =
+ SUPER_KEY.read().unwrap().get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
+
DB.with::<_, Result<()>>(|db| {
- let entry = match LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+ let entry = match LEGACY_IMPORTER.with_try_import(key, caller_uid, super_key, || {
db.borrow_mut().load_key_entry(
key,
KeyType::Client,
@@ -291,8 +298,11 @@
fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
+ let super_key =
+ SUPER_KEY.read().unwrap().get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
+
DB.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+ LEGACY_IMPORTER.with_try_import(key, caller_uid, super_key, || {
db.borrow_mut().unbind_key(key, KeyType::Client, caller_uid, |k, av| {
check_key_permission(KeyPerm::Delete, k, &av).context("During delete_key.")
})
@@ -309,8 +319,11 @@
access_vector: permission::KeyPermSet,
) -> Result<KeyDescriptor> {
let caller_uid = ThreadState::get_calling_uid();
+ let super_key =
+ SUPER_KEY.read().unwrap().get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
+
DB.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(key, caller_uid, || {
+ LEGACY_IMPORTER.with_try_import(key, caller_uid, super_key, || {
db.borrow_mut().grant(
key,
caller_uid,
@@ -341,13 +354,13 @@
fn getSecurityLevel(
&self,
security_level: SecurityLevel,
- ) -> binder::public_api::Result<Strong<dyn IKeystoreSecurityLevel>> {
+ ) -> binder::Result<Strong<dyn IKeystoreSecurityLevel>> {
let _wp = wd::watch_millis_with("IKeystoreService::getSecurityLevel", 500, move || {
format!("security_level: {}", security_level.0)
});
map_or_log_err(self.get_security_level(security_level), Ok)
}
- fn getKeyEntry(&self, key: &KeyDescriptor) -> binder::public_api::Result<KeyEntryResponse> {
+ fn getKeyEntry(&self, key: &KeyDescriptor) -> binder::Result<KeyEntryResponse> {
let _wp = wd::watch_millis("IKeystoreService::get_key_entry", 500);
map_or_log_err(self.get_key_entry(key), Ok)
}
@@ -356,19 +369,15 @@
key: &KeyDescriptor,
public_cert: Option<&[u8]>,
certificate_chain: Option<&[u8]>,
- ) -> binder::public_api::Result<()> {
+ ) -> binder::Result<()> {
let _wp = wd::watch_millis("IKeystoreService::updateSubcomponent", 500);
map_or_log_err(self.update_subcomponent(key, public_cert, certificate_chain), Ok)
}
- fn listEntries(
- &self,
- domain: Domain,
- namespace: i64,
- ) -> binder::public_api::Result<Vec<KeyDescriptor>> {
+ fn listEntries(&self, domain: Domain, namespace: i64) -> binder::Result<Vec<KeyDescriptor>> {
let _wp = wd::watch_millis("IKeystoreService::listEntries", 500);
map_or_log_err(self.list_entries(domain, namespace), Ok)
}
- fn deleteKey(&self, key: &KeyDescriptor) -> binder::public_api::Result<()> {
+ fn deleteKey(&self, key: &KeyDescriptor) -> binder::Result<()> {
let _wp = wd::watch_millis("IKeystoreService::deleteKey", 500);
let result = self.delete_key(key);
log_key_deleted(key, ThreadState::get_calling_uid(), result.is_ok());
@@ -379,11 +388,11 @@
key: &KeyDescriptor,
grantee_uid: i32,
access_vector: i32,
- ) -> binder::public_api::Result<KeyDescriptor> {
+ ) -> binder::Result<KeyDescriptor> {
let _wp = wd::watch_millis("IKeystoreService::grant", 500);
map_or_log_err(self.grant(key, grantee_uid, access_vector.into()), Ok)
}
- fn ungrant(&self, key: &KeyDescriptor, grantee_uid: i32) -> binder::public_api::Result<()> {
+ fn ungrant(&self, key: &KeyDescriptor, grantee_uid: i32) -> binder::Result<()> {
let _wp = wd::watch_millis("IKeystoreService::ungrant", 500);
map_or_log_err(self.ungrant(key, grantee_uid), Ok)
}
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index ca5e593..74e3e56 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -26,11 +26,9 @@
error::ResponseCode,
key_parameter::{KeyParameter, KeyParameterValue},
legacy_blob::LegacyBlobLoader,
- legacy_migrator::LegacyMigrator,
+ legacy_importer::LegacyImporter,
raw_device::KeyMintDevice,
- try_insert::TryInsert,
- utils::watchdog as wd,
- utils::AID_KEYSTORE,
+ utils::{watchdog as wd, AesGcm, AID_KEYSTORE},
};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
Algorithm::Algorithm, BlockMode::BlockMode, HardwareAuthToken::HardwareAuthToken,
@@ -50,7 +48,7 @@
use std::{
collections::HashMap,
sync::Arc,
- sync::{Mutex, Weak},
+ sync::{Mutex, RwLock, Weak},
};
use std::{convert::TryFrom, ops::Deref};
@@ -76,7 +74,7 @@
/// different purpose, distinguished by alias. Each is associated with a static
/// constant of this type.
pub struct SuperKeyType<'a> {
- /// Alias used to look the key up in the `persistent.keyentry` table.
+ /// Alias used to look up the key in the `persistent.keyentry` table.
pub alias: &'a str,
/// Encryption algorithm
pub algorithm: SuperEncryptionAlgorithm,
@@ -155,15 +153,22 @@
reencrypt_with: Option<Arc<SuperKey>>,
}
-impl SuperKey {
- /// For most purposes `unwrap_key` handles decryption,
- /// but legacy handling and some tests need to assume AES and decrypt directly.
- pub fn aes_gcm_decrypt(&self, data: &[u8], iv: &[u8], tag: &[u8]) -> Result<ZVec> {
+impl AesGcm for SuperKey {
+ fn decrypt(&self, data: &[u8], iv: &[u8], tag: &[u8]) -> Result<ZVec> {
if self.algorithm == SuperEncryptionAlgorithm::Aes256Gcm {
aes_gcm_decrypt(data, iv, tag, &self.key)
- .context("In aes_gcm_decrypt: decryption failed")
+ .context("In SuperKey::decrypt: Decryption failed.")
} else {
- Err(Error::sys()).context("In aes_gcm_decrypt: Key is not an AES key")
+ Err(Error::sys()).context("In SuperKey::decrypt: Key is not an AES key.")
+ }
+ }
+
+ fn encrypt(&self, plaintext: &[u8]) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>)> {
+ if self.algorithm == SuperEncryptionAlgorithm::Aes256Gcm {
+ aes_gcm_encrypt(plaintext, &self.key)
+ .context("In SuperKey::encrypt: Encryption failed.")
+ } else {
+ Err(Error::sys()).context("In SuperKey::encrypt: Key is not an AES key.")
}
}
}
@@ -256,7 +261,7 @@
struct SkmState {
user_keys: HashMap<UserId, UserSuperKeys>,
key_index: HashMap<i64, Weak<SuperKey>>,
- boot_level_key_cache: Option<BootLevelKeyCache>,
+ boot_level_key_cache: Option<Mutex<BootLevelKeyCache>>,
}
impl SkmState {
@@ -275,24 +280,24 @@
#[derive(Default)]
pub struct SuperKeyManager {
- data: Mutex<SkmState>,
+ data: SkmState,
}
impl SuperKeyManager {
- pub fn set_up_boot_level_cache(self: &Arc<Self>, db: &mut KeystoreDB) -> Result<()> {
- let mut data = self.data.lock().unwrap();
- if data.boot_level_key_cache.is_some() {
+ pub fn set_up_boot_level_cache(skm: &Arc<RwLock<Self>>, db: &mut KeystoreDB) -> Result<()> {
+ let mut skm_guard = skm.write().unwrap();
+ if skm_guard.data.boot_level_key_cache.is_some() {
log::info!("In set_up_boot_level_cache: called for a second time");
return Ok(());
}
let level_zero_key = get_level_zero_key(db)
.context("In set_up_boot_level_cache: get_level_zero_key failed")?;
- data.boot_level_key_cache = Some(BootLevelKeyCache::new(level_zero_key));
+ skm_guard.data.boot_level_key_cache =
+ Some(Mutex::new(BootLevelKeyCache::new(level_zero_key)));
log::info!("Starting boot level watcher.");
- let clone = self.clone();
+ let clone = skm.clone();
std::thread::spawn(move || {
- clone
- .watch_boot_level()
+ Self::watch_boot_level(clone)
.unwrap_or_else(|e| log::error!("watch_boot_level failed:\n{:?}", e));
});
Ok(())
@@ -300,32 +305,40 @@
/// Watch the `keystore.boot_level` system property, and keep boot level up to date.
/// Blocks waiting for system property changes, so must be run in its own thread.
- fn watch_boot_level(&self) -> Result<()> {
+ fn watch_boot_level(skm: Arc<RwLock<Self>>) -> Result<()> {
let mut w = PropertyWatcher::new("keystore.boot_level")
.context("In watch_boot_level: PropertyWatcher::new failed")?;
loop {
let level = w
.read(|_n, v| v.parse::<usize>().map_err(std::convert::Into::into))
.context("In watch_boot_level: read of property failed")?;
- // watch_boot_level should only be called once data.boot_level_key_cache is Some,
- // so it's safe to unwrap in the branches below.
- if level < MAX_MAX_BOOT_LEVEL {
- log::info!("Read keystore.boot_level value {}", level);
- let mut data = self.data.lock().unwrap();
- data.boot_level_key_cache
+
+ // This scope limits the skm_guard life, so we don't hold the skm_guard while
+ // waiting.
+ {
+ let mut skm_guard = skm.write().unwrap();
+ let boot_level_key_cache = skm_guard
+ .data
+ .boot_level_key_cache
.as_mut()
- .unwrap()
- .advance_boot_level(level)
- .context("In watch_boot_level: advance_boot_level failed")?;
- } else {
- log::info!(
- "keystore.boot_level {} hits maximum {}, finishing.",
- level,
- MAX_MAX_BOOT_LEVEL
- );
- let mut data = self.data.lock().unwrap();
- data.boot_level_key_cache.as_mut().unwrap().finish();
- break;
+ .ok_or_else(Error::sys)
+ .context("In watch_boot_level: Boot level cache not initialized")?
+ .get_mut()
+ .unwrap();
+ if level < MAX_MAX_BOOT_LEVEL {
+ log::info!("Read keystore.boot_level value {}", level);
+ boot_level_key_cache
+ .advance_boot_level(level)
+ .context("In watch_boot_level: advance_boot_level failed")?;
+ } else {
+ log::info!(
+ "keystore.boot_level {} hits maximum {}, finishing.",
+ level,
+ MAX_MAX_BOOT_LEVEL
+ );
+ boot_level_key_cache.finish();
+ break;
+ }
}
w.wait().context("In watch_boot_level: property wait failed")?;
}
@@ -334,34 +347,37 @@
pub fn level_accessible(&self, boot_level: i32) -> bool {
self.data
- .lock()
- .unwrap()
.boot_level_key_cache
.as_ref()
- .map_or(false, |c| c.level_accessible(boot_level as usize))
+ .map_or(false, |c| c.lock().unwrap().level_accessible(boot_level as usize))
}
- pub fn forget_all_keys_for_user(&self, user: UserId) {
- let mut data = self.data.lock().unwrap();
- data.user_keys.remove(&user);
+ pub fn forget_all_keys_for_user(&mut self, user: UserId) {
+ self.data.user_keys.remove(&user);
}
- fn install_per_boot_key_for_user(&self, user: UserId, super_key: Arc<SuperKey>) -> Result<()> {
- let mut data = self.data.lock().unwrap();
- data.add_key_to_key_index(&super_key)
+ fn install_per_boot_key_for_user(
+ &mut self,
+ user: UserId,
+ super_key: Arc<SuperKey>,
+ ) -> Result<()> {
+ self.data
+ .add_key_to_key_index(&super_key)
.context("In install_per_boot_key_for_user: add_key_to_key_index failed")?;
- data.user_keys.entry(user).or_default().per_boot = Some(super_key);
+ self.data.user_keys.entry(user).or_default().per_boot = Some(super_key);
Ok(())
}
fn lookup_key(&self, key_id: &SuperKeyIdentifier) -> Result<Option<Arc<SuperKey>>> {
- let mut data = self.data.lock().unwrap();
Ok(match key_id {
- SuperKeyIdentifier::DatabaseId(id) => data.key_index.get(id).and_then(|k| k.upgrade()),
- SuperKeyIdentifier::BootLevel(level) => data
+ SuperKeyIdentifier::DatabaseId(id) => {
+ self.data.key_index.get(id).and_then(|k| k.upgrade())
+ }
+ SuperKeyIdentifier::BootLevel(level) => self
+ .data
.boot_level_key_cache
- .as_mut()
- .map(|b| b.aes_key(*level as usize))
+ .as_ref()
+ .map(|b| b.lock().unwrap().aes_key(*level as usize))
.transpose()
.context("In lookup_key: aes_key failed")?
.flatten()
@@ -376,9 +392,16 @@
})
}
- pub fn get_per_boot_key_by_user_id(&self, user_id: UserId) -> Option<Arc<SuperKey>> {
- let data = self.data.lock().unwrap();
- data.user_keys.get(&user_id).and_then(|e| e.per_boot.as_ref().cloned())
+ pub fn get_per_boot_key_by_user_id(
+ &self,
+ user_id: UserId,
+ ) -> Option<Arc<dyn AesGcm + Send + Sync>> {
+ self.get_per_boot_key_by_user_id_internal(user_id)
+ .map(|sk| -> Arc<dyn AesGcm + Send + Sync> { sk })
+ }
+
+ fn get_per_boot_key_by_user_id_internal(&self, user_id: UserId) -> Option<Arc<SuperKey>> {
+ self.data.user_keys.get(&user_id).and_then(|e| e.per_boot.as_ref().cloned())
}
/// This function unlocks the super keys for a given user.
@@ -386,7 +409,7 @@
/// super key cache. If there is no such key a new key is created, encrypted with
/// a key derived from the given password and stored in the database.
pub fn unlock_user_key(
- &self,
+ &mut self,
db: &mut KeystoreDB,
user: UserId,
pw: &Password,
@@ -455,7 +478,7 @@
match key.algorithm {
SuperEncryptionAlgorithm::Aes256Gcm => match (metadata.iv(), metadata.aead_tag()) {
(Some(iv), Some(tag)) => key
- .aes_gcm_decrypt(blob, iv, tag)
+ .decrypt(blob, iv, tag)
.context("In unwrap_key_with_key: Failed to decrypt the key blob."),
(iv, tag) => Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
concat!(
@@ -493,9 +516,12 @@
}
/// Checks if user has setup LSKF, even when super key cache is empty for the user.
- pub fn super_key_exists_in_db_for_user(
+ /// The reference to self is unused but it is required to prevent calling this function
+ /// concurrently with skm state database changes.
+ fn super_key_exists_in_db_for_user(
+ &self,
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
user_id: UserId,
) -> Result<bool> {
let key_in_db = db
@@ -505,7 +531,7 @@
if key_in_db {
Ok(key_in_db)
} else {
- legacy_migrator
+ legacy_importer
.has_super_key(user_id)
.context("In super_key_exists_in_db_for_user: Trying to query legacy db.")
}
@@ -515,15 +541,15 @@
/// legacy database). If not, return Uninitialized state.
/// Otherwise, decrypt the super key from the password and return LskfUnlocked state.
pub fn check_and_unlock_super_key(
- &self,
+ &mut self,
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
user_id: UserId,
pw: &Password,
) -> Result<UserState> {
let alias = &USER_SUPER_KEY;
- let result = legacy_migrator
- .with_try_migrate_super_key(user_id, pw, || db.load_super_key(alias, user_id))
+ let result = legacy_importer
+ .with_try_import_super_key(user_id, pw, || db.load_super_key(alias, user_id))
.context("In check_and_unlock_super_key. Failed to load super key")?;
match result {
@@ -544,24 +570,23 @@
/// and return LskfUnlocked state.
/// If the password is not provided, return Uninitialized state.
pub fn check_and_initialize_super_key(
- &self,
+ &mut self,
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
user_id: UserId,
pw: Option<&Password>,
) -> Result<UserState> {
- let super_key_exists_in_db =
- Self::super_key_exists_in_db_for_user(db, legacy_migrator, user_id).context(
- "In check_and_initialize_super_key. Failed to check if super key exists.",
- )?;
+ let super_key_exists_in_db = self
+ .super_key_exists_in_db_for_user(db, legacy_importer, user_id)
+ .context("In check_and_initialize_super_key. Failed to check if super key exists.")?;
if super_key_exists_in_db {
Ok(UserState::LskfLocked)
} else if let Some(pw) = pw {
- //generate a new super key.
+ // Generate a new super key.
let super_key = generate_aes256_key()
.context("In check_and_initialize_super_key: Failed to generate AES 256 key.")?;
- //derive an AES256 key from the password and re-encrypt the super key
- //before we insert it in the database.
+ // Derive an AES256 key from the password and re-encrypt the super key
+ // before we insert it in the database.
let (encrypted_super_key, blob_metadata) = Self::encrypt_with_password(&super_key, pw)
.context("In check_and_initialize_super_key.")?;
@@ -589,9 +614,9 @@
}
}
- //helper function to populate super key cache from the super key blob loaded from the database
+ // Helper function to populate super key cache from the super key blob loaded from the database.
fn populate_cache_from_super_key_blob(
- &self,
+ &mut self,
user_id: UserId,
algorithm: SuperEncryptionAlgorithm,
entry: KeyEntry,
@@ -605,7 +630,7 @@
Ok(super_key)
}
- /// Extracts super key from the entry loaded from the database
+ /// Extracts super key from the entry loaded from the database.
pub fn extract_super_key_from_key_entry(
algorithm: SuperEncryptionAlgorithm,
entry: KeyEntry,
@@ -620,7 +645,7 @@
metadata.aead_tag(),
) {
(Some(&EncryptedBy::Password), Some(salt), Some(iv), Some(tag)) => {
- // Note that password encryption is AES no matter the value of algorithm
+ // Note that password encryption is AES no matter the value of algorithm.
let key = pw.derive_key(Some(salt), AES_256_KEY_LENGTH).context(
"In extract_super_key_from_key_entry: Failed to generate key from password.",
)?;
@@ -680,11 +705,12 @@
fn super_encrypt_on_key_init(
&self,
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
user_id: UserId,
key_blob: &[u8],
) -> Result<(Vec<u8>, BlobMetaData)> {
- match UserState::get(db, legacy_migrator, self, user_id)
+ match self
+ .get_user_state(db, legacy_importer, user_id)
.context("In super_encrypt. Failed to get user state.")?
{
UserState::LskfUnlocked(super_key) => {
@@ -699,9 +725,9 @@
}
}
- //Helper function to encrypt a key with the given super key. Callers should select which super
- //key to be used. This is called when a key is super encrypted at its creation as well as at its
- //upgrade.
+ // Helper function to encrypt a key with the given super key. Callers should select which super
+ // key to be used. This is called when a key is super encrypted at its creation as well as at
+ // its upgrade.
fn encrypt_with_aes_super_key(
key_blob: &[u8],
super_key: &SuperKey,
@@ -725,7 +751,7 @@
pub fn handle_super_encryption_on_key_init(
&self,
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
domain: &Domain,
key_parameters: &[KeyParameter],
flags: Option<i32>,
@@ -735,15 +761,15 @@
match Enforcements::super_encryption_required(domain, key_parameters, flags) {
SuperEncryptionType::None => Ok((key_blob.to_vec(), BlobMetaData::new())),
SuperEncryptionType::LskfBound => self
- .super_encrypt_on_key_init(db, legacy_migrator, user_id, key_blob)
+ .super_encrypt_on_key_init(db, legacy_importer, user_id, key_blob)
.context(concat!(
"In handle_super_encryption_on_key_init. ",
"Failed to super encrypt with LskfBound key."
)),
SuperEncryptionType::ScreenLockBound => {
- let mut data = self.data.lock().unwrap();
- let entry = data.user_keys.entry(user_id).or_default();
- if let Some(super_key) = entry.screen_lock_bound.as_ref() {
+ let entry =
+ self.data.user_keys.get(&user_id).and_then(|e| e.screen_lock_bound.as_ref());
+ if let Some(super_key) = entry {
Self::encrypt_with_aes_super_key(key_blob, super_key).context(concat!(
"In handle_super_encryption_on_key_init. ",
"Failed to encrypt with ScreenLockBound key."
@@ -813,6 +839,7 @@
/// When this is called, the caller must hold the lock on the SuperKeyManager.
/// So it's OK that the check and creation are different DB transactions.
fn get_or_create_super_key(
+ &mut self,
db: &mut KeystoreDB,
user_id: UserId,
key_type: &SuperKeyType,
@@ -847,8 +874,8 @@
)
}
};
- //derive an AES256 key from the password and re-encrypt the super key
- //before we insert it in the database.
+ // Derive an AES256 key from the password and re-encrypt the super key
+ // before we insert it in the database.
let (encrypted_super_key, blob_metadata) =
Self::encrypt_with_password(&super_key, password)
.context("In get_or_create_super_key.")?;
@@ -876,52 +903,64 @@
/// Decrypt the screen-lock bound keys for this user using the password and store in memory.
pub fn unlock_screen_lock_bound_key(
- &self,
+ &mut self,
db: &mut KeystoreDB,
user_id: UserId,
password: &Password,
) -> Result<()> {
- let mut data = self.data.lock().unwrap();
- let entry = data.user_keys.entry(user_id).or_default();
- let aes = entry
- .screen_lock_bound
- .get_or_try_to_insert_with(|| {
- Self::get_or_create_super_key(
- db,
- user_id,
- &USER_SCREEN_LOCK_BOUND_KEY,
- password,
- None,
- )
- })?
- .clone();
- let ecdh = entry
- .screen_lock_bound_private
- .get_or_try_to_insert_with(|| {
- Self::get_or_create_super_key(
- db,
- user_id,
- &USER_SCREEN_LOCK_BOUND_P521_KEY,
- password,
- Some(aes.clone()),
- )
- })?
- .clone();
- data.add_key_to_key_index(&aes)?;
- data.add_key_to_key_index(&ecdh)?;
+ let (screen_lock_bound, screen_lock_bound_private) = self
+ .data
+ .user_keys
+ .get(&user_id)
+ .map(|e| (e.screen_lock_bound.clone(), e.screen_lock_bound_private.clone()))
+ .unwrap_or((None, None));
+
+ if screen_lock_bound.is_some() && screen_lock_bound_private.is_some() {
+ // Already unlocked.
+ return Ok(());
+ }
+
+ let aes = if let Some(screen_lock_bound) = screen_lock_bound {
+ // This is weird. If this point is reached only one of the screen locked keys was
+ // initialized. This should never happen.
+ screen_lock_bound
+ } else {
+ self.get_or_create_super_key(db, user_id, &USER_SCREEN_LOCK_BOUND_KEY, password, None)
+ .context("In unlock_screen_lock_bound_key: Trying to get or create symmetric key.")?
+ };
+
+ let ecdh = if let Some(screen_lock_bound_private) = screen_lock_bound_private {
+ // This is weird. If this point is reached only one of the screen locked keys was
+ // initialized. This should never happen.
+ screen_lock_bound_private
+ } else {
+ self.get_or_create_super_key(
+ db,
+ user_id,
+ &USER_SCREEN_LOCK_BOUND_P521_KEY,
+ password,
+ Some(aes.clone()),
+ )
+ .context("In unlock_screen_lock_bound_key: Trying to get or create asymmetric key.")?
+ };
+
+ self.data.add_key_to_key_index(&aes)?;
+ self.data.add_key_to_key_index(&ecdh)?;
+ let entry = self.data.user_keys.entry(user_id).or_default();
+ entry.screen_lock_bound = Some(aes);
+ entry.screen_lock_bound_private = Some(ecdh);
Ok(())
}
/// Wipe the screen-lock bound keys for this user from memory.
pub fn lock_screen_lock_bound_key(
- &self,
+ &mut self,
db: &mut KeystoreDB,
user_id: UserId,
unlocking_sids: &[i64],
) {
log::info!("Locking screen bound for user {} sids {:?}", user_id, unlocking_sids);
- let mut data = self.data.lock().unwrap();
- let mut entry = data.user_keys.entry(user_id).or_default();
+ let mut entry = self.data.user_keys.entry(user_id).or_default();
if !unlocking_sids.is_empty() {
if let (Some(aes), Some(ecdh)) = (
entry.screen_lock_bound.as_ref().cloned(),
@@ -993,12 +1032,11 @@
/// User has unlocked, not using a password. See if any of our stored auth tokens can be used
/// to unlock the keys protecting UNLOCKED_DEVICE_REQUIRED keys.
pub fn try_unlock_user_with_biometric(
- &self,
+ &mut self,
db: &mut KeystoreDB,
user_id: UserId,
) -> Result<()> {
- let mut data = self.data.lock().unwrap();
- let mut entry = data.user_keys.entry(user_id).or_default();
+ let mut entry = self.data.user_keys.entry(user_id).or_default();
if let Some(biometric) = entry.biometric_unlock.as_ref() {
let (key_id_guard, key_entry) = db
.load_key_entry(
@@ -1038,8 +1076,8 @@
Ok((slb, slbp)) => {
entry.screen_lock_bound = Some(slb.clone());
entry.screen_lock_bound_private = Some(slbp.clone());
- data.add_key_to_key_index(&slb)?;
- data.add_key_to_key_index(&slbp)?;
+ self.data.add_key_to_key_index(&slb)?;
+ self.data.add_key_to_key_index(&slbp)?;
log::info!(concat!(
"In try_unlock_user_with_biometric: ",
"Successfully unlocked with biometric"
@@ -1055,6 +1093,122 @@
}
Ok(())
}
+
+ /// Returns the keystore locked state of the given user. It requires the thread local
+ /// keystore database and a reference to the legacy migrator because it may need to
+ /// import the super key from the legacy blob database to the keystore database.
+ pub fn get_user_state(
+ &self,
+ db: &mut KeystoreDB,
+ legacy_importer: &LegacyImporter,
+ user_id: UserId,
+ ) -> Result<UserState> {
+ match self.get_per_boot_key_by_user_id_internal(user_id) {
+ Some(super_key) => Ok(UserState::LskfUnlocked(super_key)),
+ None => {
+ // Check if a super key exists in the database or legacy database.
+ // If so, return locked user state.
+ if self
+ .super_key_exists_in_db_for_user(db, legacy_importer, user_id)
+ .context("In get_user_state.")?
+ {
+ Ok(UserState::LskfLocked)
+ } else {
+ Ok(UserState::Uninitialized)
+ }
+ }
+ }
+ }
+
+ /// If the given user is unlocked:
+ /// * and `password` is None, the user is reset, all authentication bound keys are deleted and
+ /// `Ok(UserState::Uninitialized)` is returned.
+ /// * and `password` is Some, `Ok(UserState::LskfUnlocked)` is returned.
+ /// If the given user is locked:
+ /// * and the user was initialized before, `Ok(UserState::Locked)` is returned.
+ /// * and the user was not initialized before:
+ /// * and `password` is None, `Ok(Uninitialized)` is returned.
+ /// * and `password` is Some, super keys are generated and `Ok(UserState::LskfUnlocked)` is
+ /// returned.
+ pub fn reset_or_init_user_and_get_user_state(
+ &mut self,
+ db: &mut KeystoreDB,
+ legacy_importer: &LegacyImporter,
+ user_id: UserId,
+ password: Option<&Password>,
+ ) -> Result<UserState> {
+ match self.get_per_boot_key_by_user_id_internal(user_id) {
+ Some(_) if password.is_none() => {
+ // Transitioning to swiping, delete only the super key in database and cache,
+ // and super-encrypted keys in database (and in KM).
+ self.reset_user(db, legacy_importer, user_id, true).context(
+ "In reset_or_init_user_and_get_user_state: Trying to delete keys from the db.",
+ )?;
+ // Lskf is now removed in Keystore.
+ Ok(UserState::Uninitialized)
+ }
+ Some(super_key) => {
+ // Keystore won't be notified when changing to a new password when LSKF is
+ // already setup. Therefore, ideally this path wouldn't be reached.
+ Ok(UserState::LskfUnlocked(super_key))
+ }
+ None => {
+ // Check if a super key exists in the database or legacy database.
+ // If so, return LskfLocked state.
+ // Otherwise, i) if the password is provided, initialize the super key and return
+ // LskfUnlocked state ii) if password is not provided, return Uninitialized state.
+ self.check_and_initialize_super_key(db, legacy_importer, user_id, password)
+ }
+ }
+ }
+
+ /// Unlocks the given user with the given password. If the key was already unlocked or unlocking
+ /// was successful, `Ok(UserState::LskfUnlocked)` is returned.
+ /// If the user was never initialized `Ok(UserState::Uninitialized)` is returned.
+ pub fn unlock_and_get_user_state(
+ &mut self,
+ db: &mut KeystoreDB,
+ legacy_importer: &LegacyImporter,
+ user_id: UserId,
+ password: &Password,
+ ) -> Result<UserState> {
+ match self.get_per_boot_key_by_user_id_internal(user_id) {
+ Some(super_key) => {
+ log::info!("In unlock_and_get_user_state. Trying to unlock when already unlocked.");
+ Ok(UserState::LskfUnlocked(super_key))
+ }
+ None => {
+ // Check if a super key exists in the database or legacy database.
+ // If not, return Uninitialized state.
+ // Otherwise, try to unlock the super key and if successful,
+ // return LskfUnlocked.
+ self.check_and_unlock_super_key(db, legacy_importer, user_id, password)
+ .context("In unlock_and_get_user_state. Failed to unlock super key.")
+ }
+ }
+ }
+
+ /// Delete all the keys created on behalf of the user.
+ /// If 'keep_non_super_encrypted_keys' is set to true, delete only the super key and super
+ /// encrypted keys.
+ pub fn reset_user(
+ &mut self,
+ db: &mut KeystoreDB,
+ legacy_importer: &LegacyImporter,
+ user_id: UserId,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ // Mark keys created on behalf of the user as unreferenced.
+ legacy_importer
+ .bulk_delete_user(user_id, keep_non_super_encrypted_keys)
+ .context("In reset_user: Trying to delete legacy keys.")?;
+ db.unbind_keys_for_user(user_id, keep_non_super_encrypted_keys)
+ .context("In reset user. Error in unbinding keys.")?;
+
+ // Delete super key in cache, if exists.
+ self.forget_all_keys_for_user(user_id);
+ Ok(())
+ }
}
/// This enum represents different states of the user's life cycle in the device.
@@ -1072,110 +1226,6 @@
Uninitialized,
}
-impl UserState {
- pub fn get(
- db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
- skm: &SuperKeyManager,
- user_id: UserId,
- ) -> Result<UserState> {
- match skm.get_per_boot_key_by_user_id(user_id) {
- Some(super_key) => Ok(UserState::LskfUnlocked(super_key)),
- None => {
- //Check if a super key exists in the database or legacy database.
- //If so, return locked user state.
- if SuperKeyManager::super_key_exists_in_db_for_user(db, legacy_migrator, user_id)
- .context("In get.")?
- {
- Ok(UserState::LskfLocked)
- } else {
- Ok(UserState::Uninitialized)
- }
- }
- }
- }
-
- /// Queries user state when serving password change requests.
- pub fn get_with_password_changed(
- db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
- skm: &SuperKeyManager,
- user_id: UserId,
- password: Option<&Password>,
- ) -> Result<UserState> {
- match skm.get_per_boot_key_by_user_id(user_id) {
- Some(super_key) => {
- if password.is_none() {
- //transitioning to swiping, delete only the super key in database and cache, and
- //super-encrypted keys in database (and in KM)
- Self::reset_user(db, skm, legacy_migrator, user_id, true).context(
- "In get_with_password_changed: Trying to delete keys from the db.",
- )?;
- //Lskf is now removed in Keystore
- Ok(UserState::Uninitialized)
- } else {
- //Keystore won't be notified when changing to a new password when LSKF is
- //already setup. Therefore, ideally this path wouldn't be reached.
- Ok(UserState::LskfUnlocked(super_key))
- }
- }
- None => {
- //Check if a super key exists in the database or legacy database.
- //If so, return LskfLocked state.
- //Otherwise, i) if the password is provided, initialize the super key and return
- //LskfUnlocked state ii) if password is not provided, return Uninitialized state.
- skm.check_and_initialize_super_key(db, legacy_migrator, user_id, password)
- }
- }
- }
-
- /// Queries user state when serving password unlock requests.
- pub fn get_with_password_unlock(
- db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
- skm: &SuperKeyManager,
- user_id: UserId,
- password: &Password,
- ) -> Result<UserState> {
- match skm.get_per_boot_key_by_user_id(user_id) {
- Some(super_key) => {
- log::info!("In get_with_password_unlock. Trying to unlock when already unlocked.");
- Ok(UserState::LskfUnlocked(super_key))
- }
- None => {
- //Check if a super key exists in the database or legacy database.
- //If not, return Uninitialized state.
- //Otherwise, try to unlock the super key and if successful,
- //return LskfUnlocked state
- skm.check_and_unlock_super_key(db, legacy_migrator, user_id, password)
- .context("In get_with_password_unlock. Failed to unlock super key.")
- }
- }
- }
-
- /// Delete all the keys created on behalf of the user.
- /// If 'keep_non_super_encrypted_keys' is set to true, delete only the super key and super
- /// encrypted keys.
- pub fn reset_user(
- db: &mut KeystoreDB,
- skm: &SuperKeyManager,
- legacy_migrator: &LegacyMigrator,
- user_id: UserId,
- keep_non_super_encrypted_keys: bool,
- ) -> Result<()> {
- // mark keys created on behalf of the user as unreferenced.
- legacy_migrator
- .bulk_delete_user(user_id, keep_non_super_encrypted_keys)
- .context("In reset_user: Trying to delete legacy keys.")?;
- db.unbind_keys_for_user(user_id, keep_non_super_encrypted_keys)
- .context("In reset user. Error in unbinding keys.")?;
-
- //delete super key in cache, if exists
- skm.forget_all_keys_for_user(user_id);
- Ok(())
- }
-}
-
/// This enum represents three states a KeyMint Blob can be in, w.r.t super encryption.
/// `Sensitive` holds the non encrypted key and a reference to its super key.
/// `NonSensitive` holds a non encrypted key that is never supposed to be encrypted.
diff --git a/keystore2/src/try_insert.rs b/keystore2/src/try_insert.rs
deleted file mode 100644
index 6dd3962..0000000
--- a/keystore2/src/try_insert.rs
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! The TryInsert trait adds to Option<T> the method
-//! get_or_try_to_insert_with, which is analogous to
-//! get_or_insert_with, but allows the called function to fail and propagates the failure.
-
-/// The TryInsert trait adds to Option<T> the method
-/// get_or_try_to_insert_with, which is analogous to
-/// get_or_insert_with, but allows the called function to fail and propagates the failure.
-pub trait TryInsert {
- /// Type of the Ok branch of the Result
- type Item;
- /// Inserts a value computed from `f` into the option if it is [`None`],
- /// then returns a mutable reference to the contained value. If `f`
- /// returns Err, the Option is unchanged.
- ///
- /// # Examples
- ///
- /// ```
- /// let mut x = None;
- /// assert_eq!(x.get_or_try_to_insert_with(Err("oops".to_string())), Err("oops".to_string()))
- /// {
- /// let y: &mut u32 = x.get_or_try_to_insert_with(|| Ok(5))?;
- /// assert_eq!(y, &5);
- ///
- /// *y = 7;
- /// }
- ///
- /// assert_eq!(x, Some(7));
- /// ```
- fn get_or_try_to_insert_with<E, F: FnOnce() -> Result<Self::Item, E>>(
- &mut self,
- f: F,
- ) -> Result<&mut Self::Item, E>;
-}
-
-impl<T> TryInsert for Option<T> {
- type Item = T;
- fn get_or_try_to_insert_with<E, F: FnOnce() -> Result<Self::Item, E>>(
- &mut self,
- f: F,
- ) -> Result<&mut Self::Item, E> {
- if self.is_none() {
- *self = Some(f()?);
- }
-
- match self {
- Some(v) => Ok(v),
- // SAFETY: a `None` variant for `self` would have been replaced by a `Some`
- // variant in the code above.
- None => unsafe { std::hint::unreachable_unchecked() },
- }
- }
-}
-
-#[cfg(test)]
-mod test {
- use super::*;
-
- fn fails() -> Result<i32, String> {
- Err("fail".to_string())
- }
-
- fn succeeds() -> Result<i32, String> {
- Ok(99)
- }
-
- #[test]
- fn test() {
- let mut x = None;
- assert_eq!(x.get_or_try_to_insert_with(fails), Err("fail".to_string()));
- assert_eq!(x, None);
- assert_eq!(*x.get_or_try_to_insert_with(succeeds).unwrap(), 99);
- assert_eq!(x, Some(99));
- x = Some(42);
- assert_eq!(*x.get_or_try_to_insert_with(fails).unwrap(), 42);
- assert_eq!(x, Some(42));
- assert_eq!(*x.get_or_try_to_insert_with(succeeds).unwrap(), 42);
- assert_eq!(x, Some(42));
- *x.get_or_try_to_insert_with(fails).unwrap() = 2;
- assert_eq!(x, Some(2));
- *x.get_or_try_to_insert_with(succeeds).unwrap() = 3;
- assert_eq!(x, Some(3));
- x = None;
- *x.get_or_try_to_insert_with(succeeds).unwrap() = 5;
- assert_eq!(x, Some(5));
- }
-}
diff --git a/keystore2/src/utils.rs b/keystore2/src/utils.rs
index 82e6700..9db2eb9 100644
--- a/keystore2/src/utils.rs
+++ b/keystore2/src/utils.rs
@@ -15,15 +15,17 @@
//! This module implements utility functions used by the Keystore 2.0 service
//! implementation.
-use crate::error::{map_binder_status, Error, ErrorCode};
+use crate::error::{map_binder_status, map_km_error, Error, ErrorCode};
+use crate::key_parameter::KeyParameter;
use crate::permission;
use crate::permission::{KeyPerm, KeyPermSet, KeystorePerm};
use crate::{
database::{KeyType, KeystoreDB},
- globals::LEGACY_MIGRATOR,
+ globals::LEGACY_IMPORTER,
};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- KeyCharacteristics::KeyCharacteristics, Tag::Tag,
+ IKeyMintDevice::IKeyMintDevice, KeyCharacteristics::KeyCharacteristics,
+ KeyParameter::KeyParameter as KmKeyParameter, Tag::Tag,
};
use android_os_permissions_aidl::aidl::android::os::IPermissionController;
use android_security_apc::aidl::android::security::apc::{
@@ -40,6 +42,8 @@
APC_COMPAT_ERROR_IGNORED, APC_COMPAT_ERROR_OK, APC_COMPAT_ERROR_OPERATION_PENDING,
APC_COMPAT_ERROR_SYSTEM_ERROR,
};
+use keystore2_crypto::{aes_gcm_decrypt, aes_gcm_encrypt, ZVec};
+use std::iter::IntoIterator;
/// This function uses its namesake in the permission module and in
/// combination with with_calling_sid from the binder crate to check
@@ -103,9 +107,20 @@
}
/// This function checks whether the calling app has the Android permissions needed to attest device
-/// identifiers. It throws an error if the permissions cannot be verified, or if the caller doesn't
-/// have the right permissions, and returns silently otherwise.
+/// identifiers. It throws an error if the permissions cannot be verified or if the caller doesn't
+/// have the right permissions. Otherwise it returns silently.
pub fn check_device_attestation_permissions() -> anyhow::Result<()> {
+ check_android_permission("android.permission.READ_PRIVILEGED_PHONE_STATE")
+}
+
+/// This function checks whether the calling app has the Android permissions needed to attest the
+/// device-unique identifier. It throws an error if the permissions cannot be verified or if the
+/// caller doesn't have the right permissions. Otherwise it returns silently.
+pub fn check_unique_id_attestation_permissions() -> anyhow::Result<()> {
+ check_android_permission("android.permission.REQUEST_UNIQUE_ID_ATTESTATION")
+}
+
+fn check_android_permission(permission: &str) -> anyhow::Result<()> {
let permission_controller: Strong<dyn IPermissionController::IPermissionController> =
binder::get_interface("permission")?;
@@ -115,7 +130,7 @@
500,
);
permission_controller.checkPermission(
- "android.permission.READ_PRIVILEGED_PHONE_STATE",
+ permission,
ThreadState::get_calling_pid(),
ThreadState::get_calling_uid() as i32,
)
@@ -135,18 +150,60 @@
/// representation of the keystore service.
pub fn key_characteristics_to_internal(
key_characteristics: Vec<KeyCharacteristics>,
-) -> Vec<crate::key_parameter::KeyParameter> {
+) -> Vec<KeyParameter> {
key_characteristics
.into_iter()
.flat_map(|aidl_key_char| {
let sec_level = aidl_key_char.securityLevel;
- aidl_key_char.authorizations.into_iter().map(move |aidl_kp| {
- crate::key_parameter::KeyParameter::new(aidl_kp.into(), sec_level)
- })
+ aidl_key_char
+ .authorizations
+ .into_iter()
+ .map(move |aidl_kp| KeyParameter::new(aidl_kp.into(), sec_level))
})
.collect()
}
+/// This function can be used to upgrade key blobs on demand. The return value of
+/// `km_op` is inspected and if ErrorCode::KEY_REQUIRES_UPGRADE is encountered,
+/// an attempt is made to upgrade the key blob. On success `new_blob_handler` is called
+/// with the upgraded blob as argument. Then `km_op` is called a second time with the
+/// upgraded blob as argument. On success a tuple of the `km_op`s result and the
+/// optional upgraded blob is returned.
+pub fn upgrade_keyblob_if_required_with<T, KmOp, NewBlobHandler>(
+ km_dev: &dyn IKeyMintDevice,
+ key_blob: &[u8],
+ upgrade_params: &[KmKeyParameter],
+ km_op: KmOp,
+ new_blob_handler: NewBlobHandler,
+) -> Result<(T, Option<Vec<u8>>)>
+where
+ KmOp: Fn(&[u8]) -> Result<T, Error>,
+ NewBlobHandler: FnOnce(&[u8]) -> Result<()>,
+{
+ match km_op(key_blob) {
+ Err(Error::Km(ErrorCode::KEY_REQUIRES_UPGRADE)) => {
+ let upgraded_blob = {
+ let _wp = watchdog::watch_millis(
+ "In utils::upgrade_keyblob_if_required_with: calling upgradeKey.",
+ 500,
+ );
+ map_km_error(km_dev.upgradeKey(key_blob, upgrade_params))
+ }
+ .context("In utils::upgrade_keyblob_if_required_with: Upgrade failed.")?;
+
+ new_blob_handler(&upgraded_blob)
+ .context("In utils::upgrade_keyblob_if_required_with: calling new_blob_handler.")?;
+
+ km_op(&upgraded_blob)
+ .map(|v| (v, Some(upgraded_blob)))
+ .context("In utils::upgrade_keyblob_if_required_with: Calling km_op after upgrade.")
+ }
+ r => r
+ .map(|v| (v, None))
+ .context("In utils::upgrade_keyblob_if_required_with: Calling km_op."),
+ }
+}
+
/// Converts a set of key characteristics from the internal representation into a set of
/// Authorizations as they are used to convey key characteristics to the clients of keystore.
pub fn key_parameters_to_authorizations(
@@ -211,7 +268,7 @@
) -> Result<Vec<KeyDescriptor>> {
let mut result = Vec::new();
result.append(
- &mut LEGACY_MIGRATOR
+ &mut LEGACY_IMPORTER
.list_uid(domain, namespace)
.context("In list_key_entries: Trying to list legacy keys.")?,
);
@@ -255,6 +312,36 @@
}
}
+/// Trait implemented by objects that can be used to decrypt cipher text using AES-GCM.
+pub trait AesGcm {
+ /// Deciphers `data` using the initialization vector `iv` and AEAD tag `tag`
+ /// and AES-GCM. The implementation provides the key material and selects
+ /// the implementation variant, e.g., AES128 or AES265.
+ fn decrypt(&self, data: &[u8], iv: &[u8], tag: &[u8]) -> Result<ZVec>;
+
+ /// Encrypts `data` and returns the ciphertext, the initialization vector `iv`
+ /// and AEAD tag `tag`. The implementation provides the key material and selects
+ /// the implementation variant, e.g., AES128 or AES265.
+ fn encrypt(&self, plaintext: &[u8]) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>)>;
+}
+
+/// Marks an object as AES-GCM key.
+pub trait AesGcmKey {
+ /// Provides access to the raw key material.
+ fn key(&self) -> &[u8];
+}
+
+impl<T: AesGcmKey> AesGcm for T {
+ fn decrypt(&self, data: &[u8], iv: &[u8], tag: &[u8]) -> Result<ZVec> {
+ aes_gcm_decrypt(data, iv, tag, self.key())
+ .context("In AesGcm<T>::decrypt: Decryption failed")
+ }
+
+ fn encrypt(&self, plaintext: &[u8]) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>)> {
+ aes_gcm_encrypt(plaintext, self.key()).context("In AesGcm<T>::encrypt: Encryption failed.")
+ }
+}
+
/// This module provides empty/noop implementations of the watch dog utility functions.
#[cfg(not(feature = "watchdog"))]
pub mod watchdog {
diff --git a/keystore2/src/watchdog.rs b/keystore2/src/watchdog.rs
index 9cca171..a26b632 100644
--- a/keystore2/src/watchdog.rs
+++ b/keystore2/src/watchdog.rs
@@ -111,11 +111,44 @@
}
self.last_report = Instant::now();
self.has_overdue = has_overdue;
- log::warn!("Keystore Watchdog report:");
- log::warn!("Overdue records:");
+ log::warn!("### Keystore Watchdog report - BEGIN ###");
+
let now = Instant::now();
- for (i, r) in self.records.iter() {
- if r.deadline.saturating_duration_since(now) == Duration::new(0, 0) {
+ let mut overdue_records: Vec<(&Index, &Record)> = self
+ .records
+ .iter()
+ .filter(|(_, r)| r.deadline.saturating_duration_since(now) == Duration::new(0, 0))
+ .collect();
+
+ log::warn!("When extracting from a bug report, please include this header");
+ log::warn!("and all {} records below.", overdue_records.len());
+
+ // Watch points can be nested, i.e., a single thread may have multiple armed
+ // watch points. And the most recent on each thread (thread recent) is closest to the point
+ // where something is blocked. Furthermore, keystore2 has various critical section
+ // and common backend resources KeyMint that can only be entered serialized. So if one
+ // thread hangs, the others will soon follow suite. Thus the oldest "thread recent" watch
+ // point is most likely pointing toward the culprit.
+ // Thus, sort by start time first.
+ overdue_records.sort_unstable_by(|(_, r1), (_, r2)| r1.started.cmp(&r2.started));
+ // Then we groups all of the watch points per thread preserving the order within
+ // groups.
+ let groups = overdue_records.iter().fold(
+ HashMap::<thread::ThreadId, Vec<(&Index, &Record)>>::new(),
+ |mut acc, (i, r)| {
+ acc.entry(i.tid).or_default().push((i, r));
+ acc
+ },
+ );
+ // Put the groups back into a vector.
+ let mut groups: Vec<Vec<(&Index, &Record)>> = groups.into_iter().map(|(_, v)| v).collect();
+ // Sort the groups by start time of the most recent (.last()) of each group.
+ // It is panic safe to use unwrap() here because we never add empty vectors to
+ // the map.
+ groups.sort_by(|v1, v2| v1.last().unwrap().1.started.cmp(&v2.last().unwrap().1.started));
+
+ for g in groups.iter() {
+ for (i, r) in g.iter() {
match &r.callback {
Some(cb) => {
log::warn!(
@@ -139,6 +172,7 @@
}
}
}
+ log::warn!("### Keystore Watchdog report - END ###");
true
}
diff --git a/keystore2/test_utils/authorizations.rs b/keystore2/test_utils/authorizations.rs
new file mode 100644
index 0000000..4fbe124
--- /dev/null
+++ b/keystore2/test_utils/authorizations.rs
@@ -0,0 +1,88 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module implements test utils to create Autherizations.
+
+use std::ops::Deref;
+
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ Algorithm::Algorithm, Digest::Digest, EcCurve::EcCurve, KeyParameter::KeyParameter,
+ KeyParameterValue::KeyParameterValue, KeyPurpose::KeyPurpose, Tag::Tag,
+};
+
+/// Helper struct to create set of Authorizations.
+pub struct AuthSetBuilder(Vec<KeyParameter>);
+
+impl Default for AuthSetBuilder {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl AuthSetBuilder {
+ /// Creates new Authorizations list.
+ pub fn new() -> Self {
+ Self(Vec::new())
+ }
+
+ /// Add Purpose.
+ pub fn purpose(mut self, p: KeyPurpose) -> Self {
+ self.0.push(KeyParameter { tag: Tag::PURPOSE, value: KeyParameterValue::KeyPurpose(p) });
+ self
+ }
+
+ /// Add Digest.
+ pub fn digest(mut self, d: Digest) -> Self {
+ self.0.push(KeyParameter { tag: Tag::DIGEST, value: KeyParameterValue::Digest(d) });
+ self
+ }
+
+ /// Add Algorithm.
+ pub fn algorithm(mut self, a: Algorithm) -> Self {
+ self.0.push(KeyParameter { tag: Tag::ALGORITHM, value: KeyParameterValue::Algorithm(a) });
+ self
+ }
+
+ /// Add EC-Curve.
+ pub fn ec_curve(mut self, e: EcCurve) -> Self {
+ self.0.push(KeyParameter { tag: Tag::EC_CURVE, value: KeyParameterValue::EcCurve(e) });
+ self
+ }
+
+ /// Add Attestation-Challenge.
+ pub fn attestation_challenge(mut self, b: Vec<u8>) -> Self {
+ self.0.push(KeyParameter {
+ tag: Tag::ATTESTATION_CHALLENGE,
+ value: KeyParameterValue::Blob(b),
+ });
+ self
+ }
+
+ /// Add Attestation-ID.
+ pub fn attestation_app_id(mut self, b: Vec<u8>) -> Self {
+ self.0.push(KeyParameter {
+ tag: Tag::ATTESTATION_APPLICATION_ID,
+ value: KeyParameterValue::Blob(b),
+ });
+ self
+ }
+}
+
+impl Deref for AuthSetBuilder {
+ type Target = Vec<KeyParameter>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
diff --git a/keystore2/test_utils/key_generations.rs b/keystore2/test_utils/key_generations.rs
new file mode 100644
index 0000000..f49aa9f
--- /dev/null
+++ b/keystore2/test_utils/key_generations.rs
@@ -0,0 +1,68 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module implements test utils to generate various types of keys.
+
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ Algorithm::Algorithm, Digest::Digest, EcCurve::EcCurve, KeyPurpose::KeyPurpose,
+};
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, IKeystoreSecurityLevel::IKeystoreSecurityLevel, KeyDescriptor::KeyDescriptor,
+ KeyMetadata::KeyMetadata,
+};
+
+use crate::authorizations::AuthSetBuilder;
+
+const SELINUX_SHELL_NAMESPACE: i64 = 1;
+
+/// Generate attested EC Key blob using given security level with below key parameters -
+/// Purposes: SIGN and VERIFY
+/// Digest: SHA_2_256
+/// Curve: P_256
+pub fn generate_ec_p256_signing_key_with_attestation(
+ sec_level: &binder::Strong<dyn IKeystoreSecurityLevel>,
+) -> binder::Result<KeyMetadata> {
+ let att_challenge: &[u8] = b"foo";
+ let att_app_id: &[u8] = b"bar";
+ let gen_params = AuthSetBuilder::new()
+ .algorithm(Algorithm::EC)
+ .purpose(KeyPurpose::SIGN)
+ .purpose(KeyPurpose::VERIFY)
+ .digest(Digest::SHA_2_256)
+ .ec_curve(EcCurve::P_256)
+ .attestation_challenge(att_challenge.to_vec())
+ .attestation_app_id(att_app_id.to_vec());
+
+ match sec_level.generateKey(
+ &KeyDescriptor {
+ domain: Domain::BLOB,
+ nspace: SELINUX_SHELL_NAMESPACE,
+ alias: None,
+ blob: None,
+ },
+ None,
+ &gen_params,
+ 0,
+ b"entropy",
+ ) {
+ Ok(key_metadata) => {
+ assert!(key_metadata.certificate.is_some());
+ assert!(key_metadata.certificateChain.is_some());
+ assert!(key_metadata.key.blob.is_some());
+
+ Ok(key_metadata)
+ }
+ Err(e) => Err(e),
+ }
+}
diff --git a/keystore2/test_utils/lib.rs b/keystore2/test_utils/lib.rs
index a355544..c63bfac 100644
--- a/keystore2/test_utils/lib.rs
+++ b/keystore2/test_utils/lib.rs
@@ -19,8 +19,14 @@
use std::path::{Path, PathBuf};
use std::{env::temp_dir, ops::Deref};
+use android_system_keystore2::aidl::android::system::keystore2::IKeystoreService::IKeystoreService;
+
+pub mod authorizations;
+pub mod key_generations;
pub mod run_as;
+static KS2_SERVICE_NAME: &str = "android.system.keystore2.IKeystoreService/default";
+
/// Represents the lifecycle of a temporary directory for testing.
#[derive(Debug)]
pub struct TempDir {
@@ -104,3 +110,8 @@
&self.0
}
}
+
+/// Get Keystore2 service.
+pub fn get_keystore_service() -> binder::Strong<dyn IKeystoreService> {
+ binder::get_interface(KS2_SERVICE_NAME).unwrap()
+}
diff --git a/keystore2/test_utils/run_as.rs b/keystore2/test_utils/run_as.rs
index d42303d..2485ab5 100644
--- a/keystore2/test_utils/run_as.rs
+++ b/keystore2/test_utils/run_as.rs
@@ -30,9 +30,11 @@
use nix::sys::wait::{waitpid, WaitStatus};
use nix::unistd::{
close, fork, pipe as nix_pipe, read as nix_read, setgid, setuid, write as nix_write,
- ForkResult, Gid, Uid,
+ ForkResult, Gid, Pid, Uid,
};
use serde::{de::DeserializeOwned, Serialize};
+use std::io::{Read, Write};
+use std::marker::PhantomData;
use std::os::unix::io::RawFd;
fn transition(se_context: selinux::Context, uid: Uid, gid: Gid) {
@@ -48,17 +50,10 @@
/// reads from the pipe into an expending vector, until no more data can be read.
struct PipeReader(RawFd);
-impl PipeReader {
- pub fn read_all(&self) -> Result<Vec<u8>, nix::Error> {
- let mut buffer = [0u8; 128];
- let mut result = Vec::<u8>::new();
- loop {
- let bytes = nix_read(self.0, &mut buffer)?;
- if bytes == 0 {
- return Ok(result);
- }
- result.extend_from_slice(&buffer[0..bytes]);
- }
+impl Read for PipeReader {
+ fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
+ let bytes = nix_read(self.0, buf)?;
+ Ok(bytes)
}
}
@@ -73,46 +68,264 @@
/// writes the given buffer into the pipe, returning the number of bytes written.
struct PipeWriter(RawFd);
-impl PipeWriter {
- pub fn write(&self, data: &[u8]) -> Result<usize, nix::Error> {
- nix_write(self.0, data)
- }
-}
-
impl Drop for PipeWriter {
fn drop(&mut self) {
close(self.0).expect("Failed to close writer pipe fd.");
}
}
+impl Write for PipeWriter {
+ fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
+ let written = nix_write(self.0, buf)?;
+ Ok(written)
+ }
+
+ fn flush(&mut self) -> std::io::Result<()> {
+ // Flush is a NO-OP.
+ Ok(())
+ }
+}
+
+/// Denotes the sender side of a serializing channel.
+pub struct ChannelWriter<T: Serialize + DeserializeOwned>(PipeWriter, PhantomData<T>);
+
+impl<T: Serialize + DeserializeOwned> ChannelWriter<T> {
+ /// Sends a serializable object to a the corresponding ChannelReader.
+ /// Sending is always non blocking. Panics if any error occurs during io or serialization.
+ pub fn send(&mut self, value: &T) {
+ let serialized = serde_cbor::to_vec(value)
+ .expect("In ChannelWriter::send: Failed to serialize to vector.");
+ let size = serialized.len().to_be_bytes();
+ match self.0.write(&size).expect("In ChannelWriter::send: Failed to write serialized size.")
+ {
+ w if w != std::mem::size_of::<usize>() => {
+ panic!(
+ "In ChannelWriter::send: Failed to write serialized size. (written: {}).",
+ w
+ );
+ }
+ _ => {}
+ };
+ match self
+ .0
+ .write(&serialized)
+ .expect("In ChannelWriter::send: Failed to write serialized data.")
+ {
+ w if w != serialized.len() => {
+ panic!(
+ "In ChannelWriter::send: Failed to write serialized data. (written: {}).",
+ w
+ );
+ }
+ _ => {}
+ };
+ }
+}
+
+/// Represents the receiving and of a serializing channel.
+pub struct ChannelReader<T>(PipeReader, PhantomData<T>);
+
+impl<T: Serialize + DeserializeOwned> ChannelReader<T> {
+ /// Receives a serializable object from the corresponding ChannelWriter.
+ /// Receiving blocks until an object of type T has been read from the channel.
+ /// Panics if an error occurs during io or deserialization.
+ pub fn recv(&mut self) -> T {
+ let mut size_buffer = [0u8; std::mem::size_of::<usize>()];
+ match self.0.read(&mut size_buffer).expect("In ChannelReader::recv: Failed to read size.") {
+ r if r != size_buffer.len() => {
+ panic!("In ChannelReader::recv: Failed to read size. Insufficient data: {}", r);
+ }
+ _ => {}
+ };
+ let size = usize::from_be_bytes(size_buffer);
+ let mut data_buffer = vec![0u8; size];
+ match self
+ .0
+ .read(&mut data_buffer)
+ .expect("In ChannelReader::recv: Failed to read serialized data.")
+ {
+ r if r != data_buffer.len() => {
+ panic!(
+ "In ChannelReader::recv: Failed to read serialized data. Insufficient data: {}",
+ r
+ );
+ }
+ _ => {}
+ };
+
+ serde_cbor::from_slice(&data_buffer)
+ .expect("In ChannelReader::recv: Failed to deserialize data.")
+ }
+}
+
fn pipe() -> Result<(PipeReader, PipeWriter), nix::Error> {
let (read_fd, write_fd) = nix_pipe()?;
Ok((PipeReader(read_fd), PipeWriter(write_fd)))
}
+fn pipe_channel<T>() -> Result<(ChannelReader<T>, ChannelWriter<T>), nix::Error>
+where
+ T: Serialize + DeserializeOwned,
+{
+ let (reader, writer) = pipe()?;
+ Ok((
+ ChannelReader::<T>(reader, Default::default()),
+ ChannelWriter::<T>(writer, Default::default()),
+ ))
+}
+
+/// Handle for handling child processes.
+pub struct ChildHandle<R: Serialize + DeserializeOwned, M: Serialize + DeserializeOwned> {
+ pid: Pid,
+ result_reader: ChannelReader<R>,
+ cmd_writer: ChannelWriter<M>,
+ response_reader: ChannelReader<M>,
+ exit_status: Option<WaitStatus>,
+}
+
+impl<R: Serialize + DeserializeOwned, M: Serialize + DeserializeOwned> ChildHandle<R, M> {
+ /// Send a command message to the child.
+ pub fn send(&mut self, data: &M) {
+ self.cmd_writer.send(data)
+ }
+
+ /// Receive a response from the child.
+ pub fn recv(&mut self) -> M {
+ self.response_reader.recv()
+ }
+
+ /// Get child result. Panics if the child did not exit with status 0 or if a serialization
+ /// error occurred.
+ pub fn get_result(mut self) -> R {
+ let status =
+ waitpid(self.pid, None).expect("ChildHandle::wait: Failed while waiting for child.");
+ match status {
+ WaitStatus::Exited(pid, 0) => {
+ // Child exited successfully.
+ // Read the result from the pipe.
+ self.exit_status = Some(WaitStatus::Exited(pid, 0));
+ self.result_reader.recv()
+ }
+ WaitStatus::Exited(pid, c) => {
+ panic!("Child did not exit as expected: {:?}", WaitStatus::Exited(pid, c));
+ }
+ status => {
+ panic!("Child did not exit at all: {:?}", status);
+ }
+ }
+ }
+}
+
+impl<R: Serialize + DeserializeOwned, M: Serialize + DeserializeOwned> Drop for ChildHandle<R, M> {
+ fn drop(&mut self) {
+ if self.exit_status.is_none() {
+ panic!("Child result not checked.")
+ }
+ }
+}
+
+/// Run the given closure in a new process running with the new identity given as
+/// `uid`, `gid`, and `se_context`. Parent process will run without waiting for child status.
+///
+/// # Safety
+/// run_as_child runs the given closure in the client branch of fork. And it uses non
+/// async signal safe API. This means that calling this function in a multi threaded program
+/// yields undefined behavior in the child. As of this writing, it is safe to call this function
+/// from a Rust device test, because every test itself is spawned as a separate process.
+///
+/// # Safety Binder
+/// It is okay for the closure to use binder services, however, this does not work
+/// if the parent initialized libbinder already. So do not use binder outside of the closure
+/// in your test.
+pub unsafe fn run_as_child<F, R, M>(
+ se_context: &str,
+ uid: Uid,
+ gid: Gid,
+ f: F,
+) -> Result<ChildHandle<R, M>, nix::Error>
+where
+ R: Serialize + DeserializeOwned,
+ M: Serialize + DeserializeOwned,
+ F: 'static + Send + FnOnce(&mut ChannelReader<M>, &mut ChannelWriter<M>) -> R,
+{
+ let se_context =
+ selinux::Context::new(se_context).expect("Unable to construct selinux::Context.");
+ let (result_reader, mut result_writer) = pipe_channel().expect("Failed to create pipe.");
+ let (mut cmd_reader, cmd_writer) = pipe_channel().expect("Failed to create cmd pipe.");
+ let (response_reader, mut response_writer) =
+ pipe_channel().expect("Failed to create cmd pipe.");
+
+ match fork() {
+ Ok(ForkResult::Parent { child, .. }) => {
+ drop(response_writer);
+ drop(cmd_reader);
+ drop(result_writer);
+
+ Ok(ChildHandle::<R, M> {
+ pid: child,
+ result_reader,
+ response_reader,
+ cmd_writer,
+ exit_status: None,
+ })
+ }
+ Ok(ForkResult::Child) => {
+ drop(cmd_writer);
+ drop(response_reader);
+ drop(result_reader);
+
+ // This will panic on error or insufficient privileges.
+ transition(se_context, uid, gid);
+
+ // Run the closure.
+ let result = f(&mut cmd_reader, &mut response_writer);
+
+ // Serialize the result of the closure.
+ result_writer.send(&result);
+
+ // Set exit status to `0`.
+ std::process::exit(0);
+ }
+ Err(errno) => {
+ panic!("Failed to fork: {:?}", errno);
+ }
+ }
+}
+
/// Run the given closure in a new process running with the new identity given as
/// `uid`, `gid`, and `se_context`.
-pub fn run_as<F, R>(se_context: &str, uid: Uid, gid: Gid, f: F) -> R
+///
+/// # Safety
+/// run_as runs the given closure in the client branch of fork. And it uses non
+/// async signal safe API. This means that calling this function in a multi threaded program
+/// yields undefined behavior in the child. As of this writing, it is safe to call this function
+/// from a Rust device test, because every test itself is spawned as a separate process.
+///
+/// # Safety Binder
+/// It is okay for the closure to use binder services, however, this does not work
+/// if the parent initialized libbinder already. So do not use binder outside of the closure
+/// in your test.
+pub unsafe fn run_as<F, R>(se_context: &str, uid: Uid, gid: Gid, f: F) -> R
where
R: Serialize + DeserializeOwned,
F: 'static + Send + FnOnce() -> R,
{
let se_context =
selinux::Context::new(se_context).expect("Unable to construct selinux::Context.");
- let (reader, writer) = pipe().expect("Failed to create pipe.");
+ let (mut reader, mut writer) = pipe_channel::<R>().expect("Failed to create pipe.");
- match unsafe { fork() } {
+ match fork() {
Ok(ForkResult::Parent { child, .. }) => {
drop(writer);
let status = waitpid(child, None).expect("Failed while waiting for child.");
if let WaitStatus::Exited(_, 0) = status {
// Child exited successfully.
// Read the result from the pipe.
- let serialized_result =
- reader.read_all().expect("Failed to read result from child.");
+ // let serialized_result =
+ // reader.read_all().expect("Failed to read result from child.");
// Deserialize the result and return it.
- serde_cbor::from_slice(&serialized_result).expect("Failed to deserialize result.")
+ reader.recv()
} else {
panic!("Child did not exit as expected {:?}", status);
}
@@ -125,10 +338,7 @@
let result = f();
// Serialize the result of the closure.
- let vec = serde_cbor::to_vec(&result).expect("Result serialization failed");
-
- // Send the result to the parent using the pipe.
- writer.write(&vec).expect("Failed to send serialized result to parent.");
+ writer.send(&result);
// Set exit status to `0`.
std::process::exit(0);
@@ -151,9 +361,13 @@
#[test]
#[should_panic]
fn test_run_as_panics_on_closure_panic() {
- run_as(selinux::getcon().unwrap().to_str().unwrap(), getuid(), getgid(), || {
- panic!("Closure must panic.")
- });
+ // Safety: run_as must be called from a single threaded process.
+ // This device test is run as a separate single threaded process.
+ unsafe {
+ run_as(selinux::getcon().unwrap().to_str().unwrap(), getuid(), getgid(), || {
+ panic!("Closure must panic.")
+ })
+ };
}
static TARGET_UID: Uid = Uid::from_raw(10020);
@@ -163,11 +377,15 @@
/// Tests that the closure is running as the target identity.
#[test]
fn test_transition_to_untrusted_app() {
- run_as(TARGET_CTX, TARGET_UID, TARGET_GID, || {
- assert_eq!(TARGET_UID, getuid());
- assert_eq!(TARGET_GID, getgid());
- assert_eq!(TARGET_CTX, selinux::getcon().unwrap().to_str().unwrap());
- });
+ // Safety: run_as must be called from a single threaded process.
+ // This device test is run as a separate single threaded process.
+ unsafe {
+ run_as(TARGET_CTX, TARGET_UID, TARGET_GID, || {
+ assert_eq!(TARGET_UID, getuid());
+ assert_eq!(TARGET_GID, getgid());
+ assert_eq!(TARGET_CTX, selinux::getcon().unwrap().to_str().unwrap());
+ })
+ };
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
@@ -185,7 +403,72 @@
c: "supercalifragilisticexpialidocious".to_owned(),
};
let test_result_clone = test_result.clone();
- let result = run_as(TARGET_CTX, TARGET_UID, TARGET_GID, || test_result_clone);
+ // Safety: run_as must be called from a single threaded process.
+ // This device test is run as a separate single threaded process.
+ let result = unsafe { run_as(TARGET_CTX, TARGET_UID, TARGET_GID, || test_result_clone) };
assert_eq!(test_result, result);
}
+
+ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
+ enum PingPong {
+ Ping,
+ Pong,
+ }
+
+ /// Tests that closure is running under given user identity and communicates with calling
+ /// process using pipe.
+ #[test]
+ fn test_run_as_child() {
+ let test_result = SomeResult {
+ a: 5,
+ b: 0xffffffffffffffff,
+ c: "supercalifragilisticexpialidocious".to_owned(),
+ };
+ let test_result_clone = test_result.clone();
+
+ // Safety: run_as_child must be called from a single threaded process.
+ // This device test is run as a separate single threaded process.
+ let mut child_handle: ChildHandle<SomeResult, PingPong> = unsafe {
+ run_as_child(TARGET_CTX, TARGET_UID, TARGET_GID, |cmd_reader, response_writer| {
+ assert_eq!(TARGET_UID, getuid());
+ assert_eq!(TARGET_GID, getgid());
+ assert_eq!(TARGET_CTX, selinux::getcon().unwrap().to_str().unwrap());
+
+ let ping: PingPong = cmd_reader.recv();
+ assert_eq!(ping, PingPong::Ping);
+
+ response_writer.send(&PingPong::Pong);
+
+ let ping: PingPong = cmd_reader.recv();
+ assert_eq!(ping, PingPong::Ping);
+ let pong: PingPong = cmd_reader.recv();
+ assert_eq!(pong, PingPong::Pong);
+
+ response_writer.send(&PingPong::Pong);
+ response_writer.send(&PingPong::Ping);
+
+ test_result_clone
+ })
+ .unwrap()
+ };
+
+ // Send one ping.
+ child_handle.send(&PingPong::Ping);
+
+ // Expect one pong.
+ let pong = child_handle.recv();
+ assert_eq!(pong, PingPong::Pong);
+
+ // Send ping and pong.
+ child_handle.send(&PingPong::Ping);
+ child_handle.send(&PingPong::Pong);
+
+ // Expect pong and ping.
+ let pong = child_handle.recv();
+ assert_eq!(pong, PingPong::Pong);
+ let ping = child_handle.recv();
+ assert_eq!(ping, PingPong::Ping);
+
+ assert_eq!(child_handle.get_result(), test_result);
+ }
}
diff --git a/keystore2/tests/legacy_blobs/Android.bp b/keystore2/tests/legacy_blobs/Android.bp
new file mode 100644
index 0000000..9322a41
--- /dev/null
+++ b/keystore2/tests/legacy_blobs/Android.bp
@@ -0,0 +1,51 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
+rust_test {
+ name: "keystore2_legacy_blobs_test",
+ srcs: ["keystore2_legacy_blob_tests.rs"],
+ test_suites: [
+ "general-tests",
+ ],
+ // auto_gen_config: true,
+ test_config: "AndroidTest.xml",
+
+ rustlibs: [
+ "libkeystore2_with_test_utils",
+ "libkeystore2_crypto_rust",
+ "android.system.keystore2-V2-rust",
+ "android.hardware.security.keymint-V2-rust",
+ "android.security.maintenance-rust",
+ "android.security.authorization-rust",
+ "librustutils",
+ "libkeystore2_test_utils",
+ "libnix",
+ "libanyhow",
+ "libbinder_rs",
+ "liblazy_static",
+ "liblibc",
+ "libserde",
+ "libthiserror",
+ ],
+ require_root: true,
+}
diff --git a/keystore2/tests/legacy_blobs/AndroidTest.xml b/keystore2/tests/legacy_blobs/AndroidTest.xml
new file mode 100644
index 0000000..ea83fbf
--- /dev/null
+++ b/keystore2/tests/legacy_blobs/AndroidTest.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2022 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Config to run keystore2_legacy_blobs_test device tests.">
+
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option
+ name="push"
+ value="keystore2_legacy_blobs_test->/data/local/tmp/keystore2_legacy_blobs_test"
+ />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.rust.RustBinaryTest" >
+ <option name="test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="keystore2_legacy_blobs_test" />
+ <option name="native-test-flag" value="--test-threads=1" />
+ </test>
+</configuration>
diff --git a/keystore2/tests/legacy_blobs/keystore2_legacy_blob_tests.rs b/keystore2/tests/legacy_blobs/keystore2_legacy_blob_tests.rs
new file mode 100644
index 0000000..6def39e
--- /dev/null
+++ b/keystore2/tests/legacy_blobs/keystore2_legacy_blob_tests.rs
@@ -0,0 +1,579 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use nix::unistd::{getuid, Gid, Uid};
+use rustutils::users::AID_USER_OFFSET;
+use serde::{Deserialize, Serialize};
+
+use std::ops::Deref;
+use std::path::PathBuf;
+
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel;
+
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, KeyDescriptor::KeyDescriptor,
+};
+
+use android_security_maintenance::aidl::android::security::maintenance::{
+ IKeystoreMaintenance::IKeystoreMaintenance, UserState::UserState,
+};
+
+use android_security_authorization::aidl::android::security::authorization::{
+ IKeystoreAuthorization::IKeystoreAuthorization, LockScreenEvent::LockScreenEvent,
+};
+
+use keystore2::key_parameter::KeyParameter as KsKeyparameter;
+use keystore2::legacy_blob::test_utils::legacy_blob_test_vectors::*;
+use keystore2::legacy_blob::test_utils::*;
+use keystore2::legacy_blob::LegacyKeyCharacteristics;
+use keystore2::utils::AesGcm;
+use keystore2_crypto::{Password, ZVec};
+
+use keystore2_test_utils::get_keystore_service;
+use keystore2_test_utils::key_generations;
+use keystore2_test_utils::run_as;
+
+static USER_MANAGER_SERVICE_NAME: &str = "android.security.maintenance";
+static AUTH_SERVICE_NAME: &str = "android.security.authorization";
+const SELINUX_SHELL_NAMESPACE: i64 = 1;
+
+fn get_maintenance() -> binder::Strong<dyn IKeystoreMaintenance> {
+ binder::get_interface(USER_MANAGER_SERVICE_NAME).unwrap()
+}
+
+fn get_authorization() -> binder::Strong<dyn IKeystoreAuthorization> {
+ binder::get_interface(AUTH_SERVICE_NAME).unwrap()
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
+struct KeygenResult {
+ cert: Vec<u8>,
+ cert_chain: Vec<u8>,
+ key_parameters: Vec<KsKeyparameter>,
+}
+
+struct TestKey(ZVec);
+
+impl keystore2::utils::AesGcmKey for TestKey {
+ fn key(&self) -> &[u8] {
+ &self.0
+ }
+}
+
+impl Deref for TestKey {
+ type Target = [u8];
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+fn keystore2_restart_service() {
+ let output = std::process::Command::new("pidof")
+ .arg("keystore2")
+ .output()
+ .expect("failed to execute pidof keystore2");
+
+ let id = String::from_utf8(output.stdout).unwrap();
+ let id: String = id.chars().filter(|c| c.is_digit(10)).collect();
+
+ let _status = std::process::Command::new("kill").arg("-9").arg(id).status().unwrap();
+
+ // Loop till we find keystore2 service up and running.
+ loop {
+ let output = std::process::Command::new("pidof")
+ .arg("keystore2")
+ .output()
+ .expect("failed to execute pidof keystore2");
+
+ if output.status.code() == Some(0) {
+ break;
+ }
+ }
+}
+
+/// Create legacy blobs file layout for a user with user-id 99 and app-id 10001 with
+/// user-cert, ca-certs and encrypted key-characteristics files and tries to import
+/// these legacy blobs under user context.
+///
+/// Expected File layout for user with user-id "98" and app-id "10001" and key-alias
+/// "authbound":
+/// /data/misc/keystore/user_99/.masterkey
+/// /data/misc/keystore/user_99/9910001_USRPKEY_authbound
+/// /data/misc/keystore/user_99/.9910001_chr_USRPKEY_authbound
+/// /data/misc/keystore/user_99/9910001_USRCERT_authbound
+/// /data/misc/keystore/user_99/9910001_CACERT_authbound
+///
+/// Test performs below tasks -
+/// With su context it performs following tasks -
+/// 1. Remove this user if already exist.
+/// 2. Generate a key-blob, user cert-blob and ca-cert-blob to store it in legacy blobs file
+/// layout.
+/// 3. Prepare file layout using generated key-blob, user cert and ca certs.
+/// 4. Restart the keystore2 service to make it detect the populated legacy blobs.
+/// 5. Inform the keystore2 service about the user and unlock the user.
+/// With user-99 context it performs following tasks -
+/// 6. To load and import the legacy key using its alias.
+/// 7. After successful key import validate the user cert and cert-chain with initially
+/// generated blobs.
+/// 8. Validate imported key perameters. Imported key parameters list should be the combination
+/// of the key-parameters in characteristics file and the characteristics according to
+/// the augmentation rules. There might be duplicate entries with different values for the
+/// parameters like OS_VERSION, OS_VERSION, BOOT_PATCHLEVEL, VENDOR_PATCHLEVEL etc.
+/// 9. Confirm keystore2 service cleanup the legacy blobs after successful import.
+#[test]
+fn keystore2_encrypted_characteristics() -> anyhow::Result<()> {
+ let auid = 99 * AID_USER_OFFSET + 10001;
+ let agid = 99 * AID_USER_OFFSET + 10001;
+ static TARGET_CTX: &str = "u:r:untrusted_app:s0:c91,c256,c10,c20";
+ static TARGET_SU_CTX: &str = "u:r:su:s0";
+
+ // Cleanup user directory if it exists
+ let path_buf = PathBuf::from("/data/misc/keystore/user_99");
+ if path_buf.as_path().is_dir() {
+ std::fs::remove_dir_all(path_buf.as_path()).unwrap();
+ }
+
+ // Safety: run_as must be called from a single threaded process.
+ // This device test is run as a separate single threaded process.
+ let mut gen_key_result = unsafe {
+ run_as::run_as(TARGET_SU_CTX, Uid::from_raw(0), Gid::from_raw(0), || {
+ // Remove user if already exist.
+ let maint_service = get_maintenance();
+ match maint_service.onUserRemoved(99) {
+ Ok(_) => {
+ println!("User was existed, deleted successfully");
+ }
+ Err(e) => {
+ println!("onUserRemoved error: {:#?}", e);
+ }
+ }
+
+ let keystore2 = get_keystore_service();
+ let sec_level = keystore2
+ .getSecurityLevel(SecurityLevel::SecurityLevel::TRUSTED_ENVIRONMENT)
+ .unwrap();
+ // Generate Key BLOB and prepare legacy keystore blob files.
+ let key_metadata =
+ key_generations::generate_ec_p256_signing_key_with_attestation(&sec_level)
+ .expect("Failed to generate key blob");
+
+ // Create keystore file layout for user_99.
+ let pw: Password = PASSWORD.into();
+ let pw_key = TestKey(pw.derive_key(Some(SUPERKEY_SALT), 32).unwrap());
+ let super_key =
+ TestKey(pw_key.decrypt(SUPERKEY_PAYLOAD, SUPERKEY_IV, SUPERKEY_TAG).unwrap());
+
+ let mut path_buf = PathBuf::from("/data/misc/keystore/user_99");
+ if !path_buf.as_path().is_dir() {
+ std::fs::create_dir(path_buf.as_path()).unwrap();
+ }
+ path_buf.push(".masterkey");
+ if !path_buf.as_path().is_file() {
+ std::fs::write(path_buf.as_path(), SUPERKEY).unwrap();
+ }
+
+ let mut path_buf = PathBuf::from("/data/misc/keystore/user_99");
+ path_buf.push("9910001_USRPKEY_authbound");
+ if !path_buf.as_path().is_file() {
+ make_encrypted_key_file(
+ path_buf.as_path(),
+ &super_key,
+ &key_metadata.key.blob.unwrap(),
+ )
+ .unwrap();
+ }
+
+ let mut path_buf = PathBuf::from("/data/misc/keystore/user_99");
+ path_buf.push(".9910001_chr_USRPKEY_authbound");
+ if !path_buf.as_path().is_file() {
+ make_encrypted_characteristics_file(path_buf.as_path(), &super_key, KEY_PARAMETERS)
+ .unwrap();
+ }
+
+ let mut path_buf = PathBuf::from("/data/misc/keystore/user_99");
+ path_buf.push("9910001_USRCERT_authbound");
+ if !path_buf.as_path().is_file() {
+ make_cert_blob_file(path_buf.as_path(), key_metadata.certificate.as_ref().unwrap())
+ .unwrap();
+ }
+
+ let mut path_buf = PathBuf::from("/data/misc/keystore/user_99");
+ path_buf.push("9910001_CACERT_authbound");
+ if !path_buf.as_path().is_file() {
+ make_cert_blob_file(
+ path_buf.as_path(),
+ key_metadata.certificateChain.as_ref().unwrap(),
+ )
+ .unwrap();
+ }
+
+ // Keystore2 disables the legacy importer when it finds the legacy database empty.
+ // However, if the device boots with an empty legacy database, the optimization kicks in
+ // and keystore2 never checks the legacy file system layout.
+ // So, restart keystore2 service to detect populated legacy database.
+ keystore2_restart_service();
+
+ let auth_service = get_authorization();
+ match auth_service.onLockScreenEvent(LockScreenEvent::UNLOCK, 99, Some(PASSWORD), None)
+ {
+ Ok(result) => {
+ println!("Unlock Result: {:?}", result);
+ }
+ Err(e) => {
+ panic!("Unlock should have succeeded: {:?}", e);
+ }
+ }
+
+ let maint_service = get_maintenance();
+ assert_eq!(Ok(UserState(1)), maint_service.getState(99));
+
+ let mut key_params: Vec<KsKeyparameter> = Vec::new();
+ for param in key_metadata.authorizations {
+ let key_param = KsKeyparameter::new(param.keyParameter.into(), param.securityLevel);
+ key_params.push(key_param);
+ }
+
+ KeygenResult {
+ cert: key_metadata.certificate.unwrap(),
+ cert_chain: key_metadata.certificateChain.unwrap(),
+ key_parameters: key_params,
+ }
+ })
+ };
+
+ // Safety: run_as must be called from a single threaded process.
+ // This device test is run as a separate single threaded process.
+ unsafe {
+ run_as::run_as(TARGET_CTX, Uid::from_raw(auid), Gid::from_raw(agid), move || {
+ println!("UID: {}", getuid());
+ println!("Android User ID: {}", rustutils::users::multiuser_get_user_id(9910001));
+ println!("Android app ID: {}", rustutils::users::multiuser_get_app_id(9910001));
+
+ let test_alias = "authbound";
+ let keystore2 = get_keystore_service();
+
+ match keystore2.getKeyEntry(&KeyDescriptor {
+ domain: Domain::APP,
+ nspace: SELINUX_SHELL_NAMESPACE,
+ alias: Some(test_alias.to_string()),
+ blob: None,
+ }) {
+ Ok(key_entry_response) => {
+ assert_eq!(
+ key_entry_response.metadata.certificate.unwrap(),
+ gen_key_result.cert
+ );
+ assert_eq!(
+ key_entry_response.metadata.certificateChain.unwrap(),
+ gen_key_result.cert_chain
+ );
+ assert_eq!(key_entry_response.metadata.key.domain, Domain::KEY_ID);
+ assert_ne!(key_entry_response.metadata.key.nspace, 0);
+ assert_eq!(
+ key_entry_response.metadata.keySecurityLevel,
+ SecurityLevel::SecurityLevel::TRUSTED_ENVIRONMENT
+ );
+
+ // Preapare KsKeyParameter list from getKeEntry response Authorizations.
+ let mut key_params: Vec<KsKeyparameter> = Vec::new();
+ for param in key_entry_response.metadata.authorizations {
+ let key_param =
+ KsKeyparameter::new(param.keyParameter.into(), param.securityLevel);
+ key_params.push(key_param);
+ }
+
+ // Combine keyparameters from gen_key_result and keyparameters
+ // from legacy key-char file.
+ let mut legacy_file_key_params: Vec<KsKeyparameter> = Vec::new();
+ match structured_test_params() {
+ LegacyKeyCharacteristics::File(legacy_key_params) => {
+ for param in &legacy_key_params {
+ let mut present_in_gen_params = false;
+ for gen_param in &gen_key_result.key_parameters {
+ if param.get_tag() == gen_param.get_tag() {
+ present_in_gen_params = true;
+ }
+ }
+ if !present_in_gen_params {
+ legacy_file_key_params.push(param.clone());
+ }
+ }
+ }
+ _ => {
+ panic!("Expecting file characteristics");
+ }
+ }
+
+ // Remove Key-Params which have security levels other than TRUSTED_ENVIRONMENT
+ gen_key_result.key_parameters.retain(|in_element| {
+ *in_element.security_level()
+ == SecurityLevel::SecurityLevel::TRUSTED_ENVIRONMENT
+ });
+
+ println!("GetKeyEntry response key params: {:#?}", key_params);
+ println!("Generated key params: {:#?}", gen_key_result.key_parameters);
+
+ gen_key_result.key_parameters.append(&mut legacy_file_key_params);
+
+ println!("Combined key params: {:#?}", gen_key_result.key_parameters);
+
+ // Validate all keyparameters present in getKeyEntry response.
+ for param in &key_params {
+ gen_key_result.key_parameters.retain(|in_element| *in_element != *param);
+ }
+
+ println!(
+ "GetKeyEntry response unmatched key params: {:#?}",
+ gen_key_result.key_parameters
+ );
+ assert_eq!(gen_key_result.key_parameters.len(), 0);
+ }
+ Err(s) => {
+ panic!("getKeyEntry should have succeeded. {:?}", s);
+ }
+ };
+ })
+ };
+
+ // Make sure keystore2 clean up imported legacy db.
+ let path_buf = PathBuf::from("/data/misc/keystore/user_99");
+ if path_buf.as_path().is_dir() {
+ panic!("Keystore service should have deleted this dir {:?}", path_buf);
+ }
+ Ok(())
+}
+
+/// Create legacy blobs file layout for a user with user-id 98 and app-id 10001 with encrypted
+/// user-cert and ca-certs files and tries to import these legacy blobs under user context.
+///
+/// Expected File layout for user with user-id "98" and app-id "10001" and key-alias
+/// "authboundcertenc":
+/// /data/misc/keystore/user_98/.masterkey
+/// /data/misc/keystore/user_98/9810001_USRPKEY_authboundcertenc
+/// /data/misc/keystore/user_98/.9810001_chr_USRPKEY_authboundcertenc
+/// /data/misc/keystore/user_98/9810001_USRCERT_authboundcertenc
+/// /data/misc/keystore/user_98/9810001_CACERT_authboundcertenc
+///
+/// Test performs below tasks -
+/// With su context it performs following tasks -
+/// 1. Remove this user if already exist.
+/// 2. Generate a key-blob, user cert-blob and ca-cert-blob to store it in legacy blobs file
+/// layout.
+/// 3. Prepare file layout using generated key-blob, user cert and ca certs.
+/// 4. Restart the keystore2 service to make it detect the populated legacy blobs.
+/// 5. Inform the keystore2 service about the user and unlock the user.
+/// With user-98 context it performs following tasks -
+/// 6. To load and import the legacy key using its alias.
+/// 7. After successful key import validate the user cert and cert-chain with initially
+/// generated blobs.
+/// 8. Validate imported key perameters. Imported key parameters list should be the combination
+/// of the key-parameters in characteristics file and the characteristics according to
+/// the augmentation rules. There might be duplicate entries with different values for the
+/// parameters like OS_VERSION, OS_VERSION, BOOT_PATCHLEVEL, VENDOR_PATCHLEVEL etc.
+/// 9. Confirm keystore2 service cleanup the legacy blobs after successful import.
+#[test]
+fn keystore2_encrypted_certificates() -> anyhow::Result<()> {
+ let auid = 98 * AID_USER_OFFSET + 10001;
+ let agid = 98 * AID_USER_OFFSET + 10001;
+ static TARGET_CTX: &str = "u:r:untrusted_app:s0:c91,c256,c10,c20";
+ static TARGET_SU_CTX: &str = "u:r:su:s0";
+
+ // Cleanup user directory if it exists
+ let path_buf = PathBuf::from("/data/misc/keystore/user_98");
+ if path_buf.as_path().is_dir() {
+ std::fs::remove_dir_all(path_buf.as_path()).unwrap();
+ }
+
+ // Safety: run_as must be called from a single threaded process.
+ // This device test is run as a separate single threaded process.
+ let gen_key_result = unsafe {
+ run_as::run_as(TARGET_SU_CTX, Uid::from_raw(0), Gid::from_raw(0), || {
+ // Remove user if already exist.
+ let maint_service = get_maintenance();
+ match maint_service.onUserRemoved(98) {
+ Ok(_) => {
+ println!("User was existed, deleted successfully");
+ }
+ Err(e) => {
+ println!("onUserRemoved error: {:#?}", e);
+ }
+ }
+
+ let keystore2 = get_keystore_service();
+ let sec_level = keystore2
+ .getSecurityLevel(SecurityLevel::SecurityLevel::TRUSTED_ENVIRONMENT)
+ .unwrap();
+ // Generate Key BLOB and prepare legacy keystore blob files.
+ let key_metadata =
+ key_generations::generate_ec_p256_signing_key_with_attestation(&sec_level)
+ .expect("Failed to generate key blob");
+
+ // Create keystore file layout for user_98.
+ let pw: Password = PASSWORD.into();
+ let pw_key = TestKey(pw.derive_key(Some(SUPERKEY_SALT), 32).unwrap());
+ let super_key =
+ TestKey(pw_key.decrypt(SUPERKEY_PAYLOAD, SUPERKEY_IV, SUPERKEY_TAG).unwrap());
+
+ let mut path_buf = PathBuf::from("/data/misc/keystore/user_98");
+ if !path_buf.as_path().is_dir() {
+ std::fs::create_dir(path_buf.as_path()).unwrap();
+ }
+ path_buf.push(".masterkey");
+ if !path_buf.as_path().is_file() {
+ std::fs::write(path_buf.as_path(), SUPERKEY).unwrap();
+ }
+
+ let mut path_buf = PathBuf::from("/data/misc/keystore/user_98");
+ path_buf.push("9810001_USRPKEY_authboundcertenc");
+ if !path_buf.as_path().is_file() {
+ make_encrypted_key_file(
+ path_buf.as_path(),
+ &super_key,
+ &key_metadata.key.blob.unwrap(),
+ )
+ .unwrap();
+ }
+
+ let mut path_buf = PathBuf::from("/data/misc/keystore/user_98");
+ path_buf.push(".9810001_chr_USRPKEY_authboundcertenc");
+ if !path_buf.as_path().is_file() {
+ std::fs::write(path_buf.as_path(), USRPKEY_AUTHBOUND_CHR).unwrap();
+ }
+
+ let mut path_buf = PathBuf::from("/data/misc/keystore/user_98");
+ path_buf.push("9810001_USRCERT_authboundcertenc");
+ if !path_buf.as_path().is_file() {
+ make_encrypted_usr_cert_file(
+ path_buf.as_path(),
+ &super_key,
+ key_metadata.certificate.as_ref().unwrap(),
+ )
+ .unwrap();
+ }
+
+ let mut path_buf = PathBuf::from("/data/misc/keystore/user_98");
+ path_buf.push("9810001_CACERT_authboundcertenc");
+ if !path_buf.as_path().is_file() {
+ make_encrypted_ca_cert_file(
+ path_buf.as_path(),
+ &super_key,
+ key_metadata.certificateChain.as_ref().unwrap(),
+ )
+ .unwrap();
+ }
+
+ // Keystore2 disables the legacy importer when it finds the legacy database empty.
+ // However, if the device boots with an empty legacy database, the optimization kicks in
+ // and keystore2 never checks the legacy file system layout.
+ // So, restart keystore2 service to detect populated legacy database.
+ keystore2_restart_service();
+
+ let auth_service = get_authorization();
+ match auth_service.onLockScreenEvent(LockScreenEvent::UNLOCK, 98, Some(PASSWORD), None)
+ {
+ Ok(result) => {
+ println!("Unlock Result: {:?}", result);
+ }
+ Err(e) => {
+ panic!("Unlock should have succeeded: {:?}", e);
+ }
+ }
+
+ let maint_service = get_maintenance();
+ assert_eq!(Ok(UserState(1)), maint_service.getState(98));
+
+ let mut key_params: Vec<KsKeyparameter> = Vec::new();
+ for param in key_metadata.authorizations {
+ let key_param = KsKeyparameter::new(param.keyParameter.into(), param.securityLevel);
+ key_params.push(key_param);
+ }
+
+ KeygenResult {
+ cert: key_metadata.certificate.unwrap(),
+ cert_chain: key_metadata.certificateChain.unwrap(),
+ key_parameters: key_params,
+ }
+ })
+ };
+
+ // Safety: run_as must be called from a single threaded process.
+ // This device test is run as a separate single threaded process.
+ unsafe {
+ run_as::run_as(TARGET_CTX, Uid::from_raw(auid), Gid::from_raw(agid), move || {
+ println!("UID: {}", getuid());
+ println!("Android User ID: {}", rustutils::users::multiuser_get_user_id(9810001));
+ println!("Android app ID: {}", rustutils::users::multiuser_get_app_id(9810001));
+
+ let test_alias = "authboundcertenc";
+ let keystore2 = get_keystore_service();
+
+ match keystore2.getKeyEntry(&KeyDescriptor {
+ domain: Domain::APP,
+ nspace: SELINUX_SHELL_NAMESPACE,
+ alias: Some(test_alias.to_string()),
+ blob: None,
+ }) {
+ Ok(key_entry_response) => {
+ assert_eq!(
+ key_entry_response.metadata.certificate.unwrap(),
+ gen_key_result.cert
+ );
+ assert_eq!(
+ key_entry_response.metadata.certificateChain.unwrap(),
+ gen_key_result.cert_chain
+ );
+
+ // Preapare KsKeyParameter list from getKeEntry response Authorizations.
+ let mut key_params: Vec<KsKeyparameter> = Vec::new();
+ for param in key_entry_response.metadata.authorizations {
+ let key_param =
+ KsKeyparameter::new(param.keyParameter.into(), param.securityLevel);
+ key_params.push(key_param);
+ }
+
+ println!("GetKeyEntry response key params: {:#?}", key_params);
+ println!("Generated key params: {:#?}", gen_key_result.key_parameters);
+ match structured_test_params_cache() {
+ LegacyKeyCharacteristics::Cache(legacy_key_params) => {
+ println!("Legacy key-char cache: {:#?}", legacy_key_params);
+ // Validate all keyparameters present in getKeyEntry response.
+ for param in &legacy_key_params {
+ key_params.retain(|in_element| *in_element != *param);
+ }
+
+ println!(
+ "GetKeyEntry response unmatched key params: {:#?}",
+ key_params
+ );
+ assert_eq!(key_params.len(), 0);
+ }
+ _ => {
+ panic!("Expecting file characteristics");
+ }
+ }
+ }
+ Err(s) => {
+ panic!("getKeyEntry should have succeeded. {:?}", s);
+ }
+ };
+ })
+ };
+
+ // Make sure keystore2 clean up imported legacy db.
+ let path_buf = PathBuf::from("/data/misc/keystore/user_98");
+ if path_buf.as_path().is_dir() {
+ panic!("Keystore service should have deleted this dir {:?}", path_buf);
+ }
+ Ok(())
+}
diff --git a/ondevice-signing/Android.bp b/ondevice-signing/Android.bp
index bdc94b7..d73f8fe 100644
--- a/ondevice-signing/Android.bp
+++ b/ondevice-signing/Android.bp
@@ -112,6 +112,7 @@
"KeystoreKey.cpp",
"KeystoreHmacKey.cpp",
"odsign_main.cpp",
+ "StatsReporter.cpp",
],
header_libs: ["odrefresh_headers"],
diff --git a/ondevice-signing/KeystoreHmacKey.cpp b/ondevice-signing/KeystoreHmacKey.cpp
index 09677d7..916cbbc 100644
--- a/ondevice-signing/KeystoreHmacKey.cpp
+++ b/ondevice-signing/KeystoreHmacKey.cpp
@@ -49,17 +49,14 @@
using android::base::unique_fd;
-// Keystore boot level that the odsign key uses
-static const int kOdsignBootLevel = 30;
-
-static KeyDescriptor getHmacKeyDescriptor() {
+static KeyDescriptor getHmacKeyDescriptor(const android::String16& keyAlias, int64_t keyNspace) {
// AIDL parcelable objects don't have constructor
static KeyDescriptor descriptor;
static std::once_flag flag;
std::call_once(flag, [&]() {
descriptor.domain = Domain::SELINUX;
- descriptor.alias = String16("ondevice-signing-hmac");
- descriptor.nspace = 101; // odsign_key
+ descriptor.alias = keyAlias + android::String16("-hmac");
+ descriptor.nspace = keyNspace;
});
return descriptor;
@@ -106,7 +103,7 @@
KeyParameter boot_level;
boot_level.tag = Tag::MAX_BOOT_LEVEL;
- boot_level.value = KeyParameterValue::make<KeyParameterValue::integer>(kOdsignBootLevel);
+ boot_level.value = KeyParameterValue::make<KeyParameterValue::integer>(mKeyBootLevel);
params.push_back(boot_level);
KeyMetadata metadata;
@@ -133,7 +130,7 @@
// Make sure this is an early boot key
for (const auto& auth : keyEntryResponse.metadata.authorizations) {
if (auth.keyParameter.tag == Tag::MAX_BOOT_LEVEL) {
- if (auth.keyParameter.value.get<KeyParameterValue::integer>() == kOdsignBootLevel) {
+ if (auth.keyParameter.value.get<KeyParameterValue::integer>() == mKeyBootLevel) {
keyValid = true;
break;
}
@@ -152,9 +149,9 @@
}
}
-KeystoreHmacKey::KeystoreHmacKey() {
- mDescriptor = getHmacKeyDescriptor();
-}
+KeystoreHmacKey::KeystoreHmacKey(const android::String16& keyAlias, int64_t keyNspace,
+ int keyBootLevel)
+ : mDescriptor(getHmacKeyDescriptor(keyAlias, keyNspace)), mKeyBootLevel(keyBootLevel) {}
static std::vector<KeyParameter> getVerifyOpParameters() {
std::vector<KeyParameter> opParameters;
diff --git a/ondevice-signing/KeystoreHmacKey.h b/ondevice-signing/KeystoreHmacKey.h
index 782969a..1a815a3 100644
--- a/ondevice-signing/KeystoreHmacKey.h
+++ b/ondevice-signing/KeystoreHmacKey.h
@@ -31,7 +31,7 @@
using KeyDescriptor = ::android::system::keystore2::KeyDescriptor;
public:
- KeystoreHmacKey();
+ KeystoreHmacKey(const android::String16& keyAlias, int64_t keyNspace, int keyBootLevel);
android::base::Result<void> initialize(android::sp<IKeystoreService> service,
android::sp<IKeystoreSecurityLevel> securityLevel);
android::base::Result<std::string> sign(const std::string& message) const;
@@ -44,4 +44,6 @@
KeyDescriptor mDescriptor;
android::sp<IKeystoreService> mService;
android::sp<IKeystoreSecurityLevel> mSecurityLevel;
+
+ int mKeyBootLevel;
};
diff --git a/ondevice-signing/KeystoreKey.cpp b/ondevice-signing/KeystoreKey.cpp
index 03bb6d5..6ce65d6 100644
--- a/ondevice-signing/KeystoreKey.cpp
+++ b/ondevice-signing/KeystoreKey.cpp
@@ -50,27 +50,24 @@
using android::base::Error;
using android::base::Result;
-// Keystore boot level that the odsign key uses
-static const int kOdsignBootLevel = 30;
-
-const std::string kPublicKeySignature = "/data/misc/odsign/publickey.signature";
-
-static KeyDescriptor getKeyDescriptor() {
+static KeyDescriptor getKeyDescriptor(const android::String16& keyAlias, int64_t keyNspace) {
// AIDL parcelable objects don't have constructor
static KeyDescriptor descriptor;
static std::once_flag flag;
std::call_once(flag, [&]() {
descriptor.domain = Domain::SELINUX;
- descriptor.alias = String16("ondevice-signing");
- descriptor.nspace = 101; // odsign_key
+ descriptor.alias = keyAlias;
+ descriptor.nspace = keyNspace;
});
return descriptor;
}
-KeystoreKey::KeystoreKey() {
- mDescriptor = getKeyDescriptor();
-}
+KeystoreKey::KeystoreKey(std::string signedPubKeyPath, const android::String16& keyAlias,
+ int64_t keyNspace, int keyBootLevel)
+ : mDescriptor(getKeyDescriptor(keyAlias, keyNspace)),
+ mHmacKey(keyAlias, keyNspace, keyBootLevel), mSignedPubKeyPath(std::move(signedPubKeyPath)),
+ mKeyBootLevel(keyBootLevel) {}
Result<std::vector<uint8_t>> KeystoreKey::createKey() {
std::vector<KeyParameter> params;
@@ -113,7 +110,7 @@
KeyParameter boot_level;
boot_level.tag = Tag::MAX_BOOT_LEVEL;
- boot_level.value = KeyParameterValue::make<KeyParameterValue::integer>(kOdsignBootLevel);
+ boot_level.value = KeyParameterValue::make<KeyParameterValue::integer>(mKeyBootLevel);
params.push_back(boot_level);
KeyMetadata metadata;
@@ -137,7 +134,7 @@
return Error() << "Failed to sign public key.";
}
- if (!android::base::WriteStringToFile(*signature, kPublicKeySignature)) {
+ if (!android::base::WriteStringToFile(*signature, mSignedPubKeyPath)) {
return Error() << "Can't write public key signature.";
}
@@ -206,7 +203,7 @@
bool foundBootLevel = false;
for (const auto& auth : keyEntryResponse.metadata.authorizations) {
if (auth.keyParameter.tag == Tag::MAX_BOOT_LEVEL) {
- if (auth.keyParameter.value.get<KeyParameterValue::integer>() == kOdsignBootLevel) {
+ if (auth.keyParameter.value.get<KeyParameterValue::integer>() == mKeyBootLevel) {
foundBootLevel = true;
break;
}
@@ -232,7 +229,7 @@
std::string publicKeyString = {publicKey->begin(), publicKey->end()};
std::string signature;
- if (!android::base::ReadFileToString(kPublicKeySignature, &signature)) {
+ if (!android::base::ReadFileToString(mSignedPubKeyPath, &signature)) {
return Error() << "Can't find signature for public key.";
}
@@ -256,13 +253,15 @@
return *existingKey;
}
-Result<SigningKey*> KeystoreKey::getInstance() {
- static KeystoreKey keystoreKey;
+Result<SigningKey*> KeystoreKey::getInstance(const std::string& signedPubKeyPath,
+ const android::String16& keyAlias, int64_t keyNspace,
+ int keyBootLevel) {
+ auto keystoreKey = new KeystoreKey(signedPubKeyPath, keyAlias, keyNspace, keyBootLevel);
- if (!keystoreKey.initialize()) {
+ if (!keystoreKey->initialize()) {
return Error() << "Failed to initialize keystore key.";
} else {
- return &keystoreKey;
+ return keystoreKey;
}
}
diff --git a/ondevice-signing/KeystoreKey.h b/ondevice-signing/KeystoreKey.h
index f2fbb70..3c9a0ab 100644
--- a/ondevice-signing/KeystoreKey.h
+++ b/ondevice-signing/KeystoreKey.h
@@ -36,13 +36,16 @@
public:
virtual ~KeystoreKey(){};
- static android::base::Result<SigningKey*> getInstance();
+ static android::base::Result<SigningKey*> getInstance(const std::string& signedPubKeyPath,
+ const android::String16& keyAlias,
+ int64_t KeyNspace, int keyBootLevel);
virtual android::base::Result<std::string> sign(const std::string& message) const;
virtual android::base::Result<std::vector<uint8_t>> getPublicKey() const;
private:
- KeystoreKey();
+ KeystoreKey(std::string signedPubKeyPath, const android::String16& keyAlias, int64_t keyNspace,
+ int keyBootLevel);
bool initialize();
android::base::Result<std::vector<uint8_t>> verifyExistingKey();
android::base::Result<std::vector<uint8_t>> createKey();
@@ -53,4 +56,7 @@
android::sp<IKeystoreService> mService;
android::sp<IKeystoreSecurityLevel> mSecurityLevel;
std::vector<uint8_t> mPublicKey;
+
+ std::string mSignedPubKeyPath;
+ int mKeyBootLevel;
};
diff --git a/ondevice-signing/StatsReporter.cpp b/ondevice-signing/StatsReporter.cpp
new file mode 100644
index 0000000..65e645a
--- /dev/null
+++ b/ondevice-signing/StatsReporter.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "StatsReporter.h"
+#include <android-base/logging.h>
+#include <stdlib.h>
+#include <string>
+#include <sys/stat.h>
+
+// Keep these constant in sync with COMPOS_METRIC_NAME & METRICS_FILE in OdsignStatsLogger.java.
+constexpr const char* kOdsignMetricsFile = "/data/misc/odsign/metrics/odsign-metrics.txt";
+constexpr const char* kComposMetricName = "comp_os_artifacts_check_record";
+
+StatsReporter::~StatsReporter() {
+ if (comp_os_artifacts_check_record_ == nullptr) {
+ LOG(INFO) << "Metrics report is empty";
+
+ // Remove the metrics file if any old version of the file already exists
+ if (std::filesystem::remove(kOdsignMetricsFile) != 0 &&
+ !((errno = ENOENT) || errno == ENOTDIR)) {
+ PLOG(ERROR) << "Could not remove already present file";
+ }
+ return;
+ }
+
+ std::ofstream odsign_metrics_file_;
+ odsign_metrics_file_.open(kOdsignMetricsFile, std::ios::trunc);
+ if (!odsign_metrics_file_) {
+ PLOG(ERROR) << "Could not open file: " << kOdsignMetricsFile;
+ return;
+ }
+
+ odsign_metrics_file_ << kComposMetricName << ' ';
+ odsign_metrics_file_ << comp_os_artifacts_check_record_->current_artifacts_ok << ' ';
+ odsign_metrics_file_ << comp_os_artifacts_check_record_->comp_os_pending_artifacts_exists
+ << ' ';
+ odsign_metrics_file_ << comp_os_artifacts_check_record_->use_comp_os_generated_artifacts
+ << '\n';
+ if (chmod(kOdsignMetricsFile, 0644) != 0) {
+ PLOG(ERROR) << "Could not set correct file permissions for " << kOdsignMetricsFile;
+ return;
+ }
+ odsign_metrics_file_.close();
+ if (!odsign_metrics_file_) {
+ PLOG(ERROR) << "Failed to close the file";
+ }
+}
+
+StatsReporter::CompOsArtifactsCheckRecord* StatsReporter::GetComposArtifactsCheckRecord() {
+ if (comp_os_artifacts_check_record_ == nullptr) {
+ comp_os_artifacts_check_record_ = std::make_unique<CompOsArtifactsCheckRecord>();
+ }
+ return comp_os_artifacts_check_record_.get();
+}
diff --git a/ondevice-signing/StatsReporter.h b/ondevice-signing/StatsReporter.h
new file mode 100644
index 0000000..2682b96
--- /dev/null
+++ b/ondevice-signing/StatsReporter.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <fstream>
+
+// Class to store CompOsArtifactsCheck related metrics.
+// These are flushed to a file kOdsignMetricsFile and consumed by
+// System Server (in class OdsignStatsLogger) & sent to statsd.
+class StatsReporter {
+ public:
+ // Keep sync with EarlyBootCompOsArtifactsCheckReported
+ // definition in proto_logging/stats/atoms.proto.
+ struct CompOsArtifactsCheckRecord {
+ bool current_artifacts_ok = false;
+ bool comp_os_pending_artifacts_exists = false;
+ bool use_comp_os_generated_artifacts = false;
+ };
+
+ // The report is flushed (from buffer) into a file by the destructor.
+ ~StatsReporter();
+
+ // Get pointer to comp_os_artifacts_check_record, caller can then modify it.
+ // Note: pointer remains valid for the lifetime of this StatsReporter.
+ CompOsArtifactsCheckRecord* GetComposArtifactsCheckRecord();
+
+ private:
+ // Temporary buffer which stores the metrics.
+ std::unique_ptr<CompOsArtifactsCheckRecord> comp_os_artifacts_check_record_;
+};
diff --git a/ondevice-signing/VerityUtils.cpp b/ondevice-signing/VerityUtils.cpp
index 24a46b9..cd9a1ea 100644
--- a/ondevice-signing/VerityUtils.cpp
+++ b/ondevice-signing/VerityUtils.cpp
@@ -26,6 +26,7 @@
#include <sys/types.h>
#include <sys/wait.h>
+#include <android-base/file.h>
#include <android-base/logging.h>
#include <android-base/unique_fd.h>
#include <asm/byteorder.h>
@@ -43,6 +44,11 @@
using android::base::unique_fd;
static const char* kFsVerityInitPath = "/system/bin/fsverity_init";
+static const char* kFsVerityProcPath = "/proc/sys/fs/verity";
+
+bool SupportsFsVerity() {
+ return access(kFsVerityProcPath, F_OK) == 0;
+}
static std::string toHex(std::span<const uint8_t> data) {
std::stringstream ss;
@@ -120,6 +126,19 @@
}
}
};
+
+static Result<void> measureFsVerity(int fd, const fsverity_digest* digest) {
+ if (ioctl(fd, FS_IOC_MEASURE_VERITY, digest) != 0) {
+ if (errno == ENODATA) {
+ return Error() << "File is not in fs-verity";
+ } else {
+ return ErrnoError() << "Failed to FS_IOC_MEASURE_VERITY";
+ }
+ }
+
+ return {};
+}
+
} // namespace
template <typename T> using trailing_unique_ptr = std::unique_ptr<T, DeleteAsPODArray<T>>;
@@ -165,7 +184,7 @@
return {};
}
-static Result<std::string> enableFsVerity(int fd, const SigningKey& key) {
+Result<std::string> enableFsVerity(int fd, const SigningKey& key) {
auto digest = createDigest(fd);
if (!digest.ok()) {
return Error() << digest.error();
@@ -193,14 +212,12 @@
static Result<std::string> isFileInVerity(int fd) {
auto d = makeUniqueWithTrailingData<fsverity_digest>(FS_VERITY_MAX_DIGEST_SIZE);
d->digest_size = FS_VERITY_MAX_DIGEST_SIZE;
- auto ret = ioctl(fd, FS_IOC_MEASURE_VERITY, d.get());
- if (ret < 0) {
- if (errno == ENODATA) {
- return Error() << "File is not in fs-verity";
- } else {
- return ErrnoError() << "Failed to FS_IOC_MEASURE_VERITY";
- }
+
+ const auto& status = measureFsVerity(fd, d.get());
+ if (!status.ok()) {
+ return status.error();
}
+
return toHex({&d->digest[0], &d->digest[d->digest_size]});
}
@@ -251,6 +268,31 @@
return digests;
}
+Result<void> enableFsVerity(const std::string& path, const std::string& signature_path) {
+ unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_CLOEXEC)));
+ if (!fd.ok()) {
+ return Error() << "Can't open " << path;
+ }
+
+ std::string signature;
+ android::base::ReadFileToString(signature_path, &signature);
+ std::vector<uint8_t> span = std::vector<uint8_t>(signature.begin(), signature.end());
+
+ const auto& enable = enableFsVerity(fd.get(), span);
+ if (!enable.ok()) {
+ return enable.error();
+ }
+
+ auto digest = makeUniqueWithTrailingData<fsverity_digest>(FS_VERITY_MAX_DIGEST_SIZE);
+ digest->digest_size = FS_VERITY_MAX_DIGEST_SIZE;
+ const auto& measure = measureFsVerity(fd.get(), digest.get());
+ if (!measure.ok()) {
+ return measure.error();
+ }
+
+ return {};
+}
+
Result<std::map<std::string, std::string>> verifyAllFilesInVerity(const std::string& path) {
std::map<std::string, std::string> digests;
std::error_code ec;
diff --git a/ondevice-signing/include/VerityUtils.h b/ondevice-signing/include/VerityUtils.h
index 0559c35..e6e49c7 100644
--- a/ondevice-signing/include/VerityUtils.h
+++ b/ondevice-signing/include/VerityUtils.h
@@ -26,6 +26,8 @@
android::base::Result<void> addCertToFsVerityKeyring(const std::string& path, const char* keyName);
android::base::Result<std::vector<uint8_t>> createDigest(const std::string& path);
+android::base::Result<std::string> enableFsVerity(int fd, const SigningKey& key);
+bool SupportsFsVerity();
android::base::Result<std::map<std::string, std::string>>
verifyAllFilesInVerity(const std::string& path);
@@ -34,6 +36,10 @@
android::base::Result<std::map<std::string, std::string>>
addFilesToVerityRecursive(const std::string& path, const SigningKey& key);
+// Enable verity on the provided file, using the given PKCS7 signature.
+android::base::Result<void> enableFsVerity(const std::string& path,
+ const std::string& signature_path);
+
android::base::Result<void>
verifyAllFilesUsingCompOs(const std::string& directory_path,
const std::map<std::string, std::string>& digests,
diff --git a/ondevice-signing/odsign_main.cpp b/ondevice-signing/odsign_main.cpp
index a324857..04679a5 100644
--- a/ondevice-signing/odsign_main.cpp
+++ b/ondevice-signing/odsign_main.cpp
@@ -33,6 +33,7 @@
#include "CertUtils.h"
#include "KeystoreKey.h"
+#include "StatsReporter.h"
#include "VerityUtils.h"
#include "odsign_info.pb.h"
@@ -44,6 +45,12 @@
using OdsignInfo = ::odsign::proto::OdsignInfo;
+// Keystore boot level that the odsign key uses
+const int kKeyBootLevel = 30;
+const std::string kPublicKeySignature = "/data/misc/odsign/publickey.signature";
+const android::String16 kKeyAlias{"ondevice-signing"};
+constexpr int kKeyNspace = 101; // odsign_key
+
const std::string kSigningKeyCert = "/data/misc/odsign/key.cert";
const std::string kOdsignInfo = "/data/misc/odsign/odsign.info";
const std::string kOdsignInfoSignature = "/data/misc/odsign/odsign.info.signature";
@@ -51,19 +58,11 @@
const std::string kArtArtifactsDir = "/data/misc/apexdata/com.android.art/dalvik-cache";
constexpr const char* kOdrefreshPath = "/apex/com.android.art/bin/odrefresh";
-constexpr const char* kCompOsVerifyPath = "/apex/com.android.compos/bin/compos_verify_key";
-constexpr const char* kFsVerityProcPath = "/proc/sys/fs/verity";
-constexpr const char* kKvmDevicePath = "/dev/kvm";
+constexpr const char* kCompOsVerifyPath = "/apex/com.android.compos/bin/compos_verify";
constexpr bool kForceCompilation = false;
constexpr bool kUseCompOs = true;
-const std::string kCompOsCert = "/data/misc/odsign/compos_key.cert";
-
-const std::string kCompOsCurrentPublicKey =
- "/data/misc/apexdata/com.android.compos/current/key.pubkey";
-const std::string kCompOsPendingPublicKey =
- "/data/misc/apexdata/com.android.compos/pending/key.pubkey";
const std::string kCompOsPendingArtifactsDir = "/data/misc/apexdata/com.android.art/compos-pending";
const std::string kCompOsInfo = kArtArtifactsDir + "/compos.info";
const std::string kCompOsInfoSignature = kCompOsInfo + ".signature";
@@ -86,12 +85,6 @@
namespace {
-std::vector<uint8_t> readBytesFromFile(const std::string& path) {
- std::string str;
- android::base::ReadFileToString(path, &str);
- return std::vector<uint8_t>(str.begin(), str.end());
-}
-
bool rename(const std::string& from, const std::string& to) {
std::error_code ec;
std::filesystem::rename(from, to, ec);
@@ -145,7 +138,8 @@
}
bool compOsPresent() {
- return access(kCompOsVerifyPath, X_OK) == 0 && access(kKvmDevicePath, F_OK) == 0;
+ // We must have the CompOS APEX
+ return access(kCompOsVerifyPath, X_OK) == 0;
}
Result<void> verifyExistingRootCert(const SigningKey& key) {
@@ -182,108 +176,6 @@
return createSelfSignedCertificate(*publicKey, keySignFunction, outPath);
}
-Result<std::vector<uint8_t>> extractRsaPublicKeyFromLeafCert(const SigningKey& key,
- const std::string& certPath,
- const std::string& expectedCn) {
- if (access(certPath.c_str(), F_OK) < 0) {
- return ErrnoError() << "Certificate not found: " << certPath;
- }
- auto trustedPublicKey = key.getPublicKey();
- if (!trustedPublicKey.ok()) {
- return Error() << "Failed to retrieve signing public key: " << trustedPublicKey.error();
- }
-
- auto existingCertInfo = verifyAndExtractCertInfoFromX509(certPath, trustedPublicKey.value());
- if (!existingCertInfo.ok()) {
- return Error() << "Failed to verify certificate at " << certPath << ": "
- << existingCertInfo.error();
- }
-
- auto& actualCn = existingCertInfo.value().subjectCn;
- if (actualCn != expectedCn) {
- return Error() << "CN of existing certificate at " << certPath << " is " << actualCn
- << ", should be " << expectedCn;
- }
-
- return existingCertInfo.value().subjectRsaPublicKey;
-}
-
-// Attempt to start a CompOS VM for the specified instance to get it to
-// verify ita public key & key blob.
-bool startCompOsAndVerifyKey(CompOsInstance instance) {
- bool isCurrent = instance == CompOsInstance::kCurrent;
- const std::string& keyPath = isCurrent ? kCompOsCurrentPublicKey : kCompOsPendingPublicKey;
- if (access(keyPath.c_str(), R_OK) != 0) {
- return false;
- }
-
- const char* const argv[] = {kCompOsVerifyPath, "--instance", isCurrent ? "current" : "pending"};
- int result =
- logwrap_fork_execvp(arraysize(argv), argv, nullptr, false, LOG_ALOG, false, nullptr);
- if (result == 0) {
- return true;
- }
-
- LOG(ERROR) << kCompOsVerifyPath << " returned " << result;
- return false;
-}
-
-Result<std::vector<uint8_t>> verifyCompOsKey(const SigningKey& signingKey) {
- bool verified = false;
-
- // If a pending key has been generated we don't know if it is the correct
- // one for the pending CompOS VM, so we need to start it and ask it.
- if (startCompOsAndVerifyKey(CompOsInstance::kPending)) {
- verified = true;
- }
-
- if (!verified) {
- // Alternatively if we signed a cert for the key on a previous boot, then we
- // can use that straight away.
- auto existing_key =
- extractRsaPublicKeyFromLeafCert(signingKey, kCompOsCert, kCompOsSubject.commonName);
- if (existing_key.ok()) {
- LOG(INFO) << "Found and verified existing CompOS public key certificate: "
- << kCompOsCert;
- return existing_key.value();
- }
- }
-
- // Otherwise, if there is an existing key that we haven't signed yet, then we can sign
- // it now if CompOS confirms it's OK.
- if (!verified && startCompOsAndVerifyKey(CompOsInstance::kCurrent)) {
- verified = true;
- }
-
- if (!verified) {
- return Error() << "No valid CompOS key present.";
- }
-
- // If the pending key was verified it will have been promoted to current, so
- // at this stage if there is a key it will be the current one.
- auto publicKey = readBytesFromFile(kCompOsCurrentPublicKey);
- if (publicKey.empty()) {
- // This shouldn`t really happen.
- return Error() << "Failed to read CompOS key.";
- }
-
- // One way or another we now have a valid public key. Persist a certificate so
- // we can simplify the checks on subsequent boots.
-
- auto signFunction = [&](const std::string& to_be_signed) {
- return signingKey.sign(to_be_signed);
- };
- auto certStatus = createLeafCertificate(kCompOsSubject, publicKey, signFunction,
- kSigningKeyCert, kCompOsCert);
- if (!certStatus.ok()) {
- return Error() << "Failed to create CompOS cert: " << certStatus.error();
- }
-
- LOG(INFO) << "Verified key, wrote new CompOS cert";
-
- return publicKey;
-}
-
Result<std::map<std::string, std::string>> computeDigests(const std::string& path) {
std::error_code ec;
std::map<std::string, std::string> digests;
@@ -314,7 +206,7 @@
for (const auto& path_digest : digests) {
auto path = path_digest.first;
auto digest = path_digest.second;
- if ((trusted_digests.count(path) == 0)) {
+ if (trusted_digests.count(path) == 0) {
return Error() << "Couldn't find digest for " << path;
}
if (trusted_digests.at(path) != digest) {
@@ -349,7 +241,7 @@
return verifyDigests(*result, trusted_digests);
}
-Result<OdsignInfo> getOdsignInfo(const SigningKey& key) {
+Result<OdsignInfo> getAndVerifyOdsignInfo(const SigningKey& key) {
std::string persistedSignature;
OdsignInfo odsignInfo;
@@ -383,6 +275,28 @@
return odsignInfo;
}
+std::map<std::string, std::string> getTrustedDigests(const SigningKey& key) {
+ std::map<std::string, std::string> trusted_digests;
+
+ if (access(kOdsignInfo.c_str(), F_OK) != 0) {
+ // no odsign info file, which is not necessarily an error - just return
+ // an empty list of digests.
+ LOG(INFO) << kOdsignInfo << " not found.";
+ return trusted_digests;
+ }
+ auto signInfo = getAndVerifyOdsignInfo(key);
+
+ if (signInfo.ok()) {
+ trusted_digests.insert(signInfo->file_hashes().begin(), signInfo->file_hashes().end());
+ } else {
+ // This is not expected, since the file did exist. Log an error and
+ // return an empty list of digests.
+ LOG(ERROR) << "Couldn't load trusted digests: " << signInfo.error();
+ }
+
+ return trusted_digests;
+}
+
Result<void> persistDigests(const std::map<std::string, std::string>& digests,
const SigningKey& key) {
OdsignInfo signInfo;
@@ -408,23 +322,8 @@
return {};
}
-Result<void> verifyArtifacts(const SigningKey& key, bool supportsFsVerity) {
- auto signInfo = getOdsignInfo(key);
- // Tell init we're done with the key; this is a boot time optimization
- // in particular for the no fs-verity case, where we need to do a
- // costly verification. If the files haven't been tampered with, which
- // should be the common path, the verification will succeed, and we won't
- // need the key anymore. If it turns out the artifacts are invalid (eg not
- // in fs-verity) or the hash doesn't match, we won't be able to generate
- // new artifacts without the key, so in those cases, remove the artifacts,
- // and use JIT zygote for the current boot. We should recover automatically
- // by the next boot.
- SetProperty(kOdsignKeyDoneProp, "1");
- if (!signInfo.ok()) {
- return signInfo.error();
- }
- std::map<std::string, std::string> trusted_digests(signInfo->file_hashes().begin(),
- signInfo->file_hashes().end());
+Result<void> verifyArtifactsIntegrity(const std::map<std::string, std::string>& trusted_digests,
+ bool supportsFsVerity) {
Result<void> integrityStatus;
if (supportsFsVerity) {
@@ -439,27 +338,12 @@
return {};
}
-Result<std::vector<uint8_t>> addCompOsCertToFsVerityKeyring(const SigningKey& signingKey) {
- auto publicKey = verifyCompOsKey(signingKey);
- if (!publicKey.ok()) {
- return publicKey.error();
- }
-
- auto cert_add_result = addCertToFsVerityKeyring(kCompOsCert, "fsv_compos");
- if (!cert_add_result.ok()) {
- // Best efforts only - nothing we can do if deletion fails.
- unlink(kCompOsCert.c_str());
- return Error() << "Failed to add CompOS certificate to fs-verity keyring: "
- << cert_add_result.error();
- }
-
- return publicKey;
-}
-
-Result<OdsignInfo> getComposInfo(const std::vector<uint8_t>& compos_key) {
- std::string compos_signature;
- if (!android::base::ReadFileToString(kCompOsInfoSignature, &compos_signature)) {
- return ErrnoError() << "Failed to read " << kCompOsInfoSignature;
+Result<OdsignInfo> getComposInfo() {
+ const char* const argv[] = {kCompOsVerifyPath, "--instance", "current"};
+ int result =
+ logwrap_fork_execvp(arraysize(argv), argv, nullptr, false, LOG_ALOG, false, nullptr);
+ if (result != 0) {
+ return Error() << kCompOsVerifyPath << " returned " << result;
}
std::string compos_info_str;
@@ -467,21 +351,12 @@
return ErrnoError() << "Failed to read " << kCompOsInfo;
}
- // Delete the files - if they're valid we don't need them any more, and
- // they'd confuse artifact verification; if they're not we never need to
- // look at them again.
+ // Delete the files - we don't need them any more, and they'd confuse
+ // artifact verification
if (unlink(kCompOsInfo.c_str()) != 0 || unlink(kCompOsInfoSignature.c_str()) != 0) {
return ErrnoError() << "Unable to delete CompOS info/signature file";
}
- // Verify the signature
- auto verified = verifyRsaPublicKeySignature(compos_info_str, compos_signature, compos_key);
- if (!verified.ok()) {
- return Error() << kCompOsInfoSignature << " does not match.";
- } else {
- LOG(INFO) << kCompOsInfoSignature << " matches.";
- }
-
OdsignInfo compos_info;
if (!compos_info.ParseFromString(compos_info_str)) {
return Error() << "Failed to parse " << kCompOsInfo;
@@ -491,19 +366,26 @@
return compos_info;
}
-art::odrefresh::ExitCode checkCompOsPendingArtifacts(const std::vector<uint8_t>& compos_key,
- const SigningKey& signing_key,
- bool* digests_verified) {
+art::odrefresh::ExitCode CheckCompOsPendingArtifacts(const SigningKey& signing_key,
+ bool* digests_verified,
+ StatsReporter* stats_reporter) {
+ StatsReporter::CompOsArtifactsCheckRecord* compos_check_record =
+ stats_reporter->GetComposArtifactsCheckRecord();
+
if (!directoryHasContent(kCompOsPendingArtifactsDir)) {
- return art::odrefresh::ExitCode::kCompilationRequired;
+ // No pending CompOS artifacts, all that matters is the current ones.
+ return checkArtifacts();
}
+ compos_check_record->comp_os_pending_artifacts_exists = true;
+
// CompOS has generated some artifacts that may, or may not, match the
// current state. But if there are already valid artifacts present the
// CompOS ones are redundant.
art::odrefresh::ExitCode odrefresh_status = checkArtifacts();
if (odrefresh_status != art::odrefresh::ExitCode::kCompilationRequired) {
if (odrefresh_status == art::odrefresh::ExitCode::kOkay) {
+ compos_check_record->current_artifacts_ok = true;
LOG(INFO) << "Current artifacts are OK, deleting pending artifacts";
removeDirectory(kCompOsPendingArtifactsDir);
}
@@ -527,7 +409,7 @@
// Make sure the artifacts we have are genuinely produced by the current
// instance of CompOS.
- auto compos_info = getComposInfo(compos_key);
+ auto compos_info = getComposInfo();
if (!compos_info.ok()) {
LOG(WARNING) << compos_info.error();
} else {
@@ -540,9 +422,17 @@
} else {
LOG(INFO) << "CompOS artifacts successfully verified.";
odrefresh_status = checkArtifacts();
- if (odrefresh_status == art::odrefresh::ExitCode::kOkay) {
- // We have digests of all the files, and they aren't going to change, so
- // we can just sign them & save them now, and skip checking them later.
+ switch (odrefresh_status) {
+ case art::odrefresh::ExitCode::kCompilationRequired:
+ // We have verified all the files, and we need to make sure
+ // we don't check them against odsign.info which will be out
+ // of date.
+ *digests_verified = true;
+ return odrefresh_status;
+ case art::odrefresh::ExitCode::kOkay: {
+ // We have digests of all the files, so we can just sign them & save them now.
+ // We need to make sure we don't check them against odsign.info which will
+ // be out of date.
auto persisted = persistDigests(compos_digests, signing_key);
if (!persisted.ok()) {
LOG(ERROR) << persisted.error();
@@ -550,10 +440,14 @@
// are pretty bad.
return art::odrefresh::ExitCode::kCleanupFailed;
}
+ compos_check_record->use_comp_os_generated_artifacts = true;
LOG(INFO) << "Persisted CompOS digests.";
*digests_verified = true;
+ return odrefresh_status;
}
- return odrefresh_status;
+ default:
+ return odrefresh_status;
+ }
}
}
@@ -570,6 +464,9 @@
} // namespace
int main(int /* argc */, char** argv) {
+ // stats_reporter is a pointer so that we can explicitly delete it
+ // instead of waiting for the program to die & its destrcutor be called
+ auto stats_reporter = std::make_unique<StatsReporter>();
android::base::InitLogging(argv, android::base::LogdLogger(android::base::SYSTEM));
auto errorScopeGuard = []() {
@@ -591,15 +488,15 @@
LOG(INFO) << "Device doesn't support updatable APEX, exiting.";
return 0;
}
-
- auto keystoreResult = KeystoreKey::getInstance();
+ auto keystoreResult =
+ KeystoreKey::getInstance(kPublicKeySignature, kKeyAlias, kKeyNspace, kKeyBootLevel);
if (!keystoreResult.ok()) {
LOG(ERROR) << "Could not create keystore key: " << keystoreResult.error();
return -1;
}
SigningKey* key = keystoreResult.value();
- bool supportsFsVerity = access(kFsVerityProcPath, F_OK) == 0;
+ bool supportsFsVerity = SupportsFsVerity();
if (!supportsFsVerity) {
LOG(INFO) << "Device doesn't support fsverity. Falling back to full verification.";
}
@@ -629,39 +526,67 @@
}
}
- art::odrefresh::ExitCode odrefresh_status = art::odrefresh::ExitCode::kCompilationRequired;
bool digests_verified = false;
+ art::odrefresh::ExitCode odrefresh_status =
+ useCompOs ? CheckCompOsPendingArtifacts(*key, &digests_verified, stats_reporter.get())
+ : checkArtifacts();
- if (useCompOs) {
- auto compos_key = addCompOsCertToFsVerityKeyring(*key);
- if (!compos_key.ok()) {
- LOG(WARNING) << compos_key.error();
- } else {
- odrefresh_status =
- checkCompOsPendingArtifacts(compos_key.value(), *key, &digests_verified);
+ // Explicitly reset the pointer - We rely on stats_reporter's
+ // destructor for actually writing the buffered metrics. This will otherwise not be called
+ // if the program doesn't exit normally (for ex, killed by init, which actually happens
+ // because odsign (after it finishes) sets kStopServiceProp instructing init to kill it).
+ stats_reporter.reset();
+
+ // The artifacts dir doesn't necessarily need to exist; if the existing
+ // artifacts on the system partition are valid, those can be used.
+ int err = access(kArtArtifactsDir.c_str(), F_OK);
+ // If we receive any error other than ENOENT, be suspicious
+ bool artifactsPresent = (err == 0) || (err < 0 && errno != ENOENT);
+
+ if (artifactsPresent && !digests_verified &&
+ (odrefresh_status == art::odrefresh::ExitCode::kOkay ||
+ odrefresh_status == art::odrefresh::ExitCode::kCompilationRequired)) {
+ // If we haven't verified the digests yet, we need to validate them. We
+ // need to do this both in case the existing artifacts are okay, but
+ // also if odrefresh said that a recompile is required. In the latter
+ // case, odrefresh may use partial compilation, and leave some
+ // artifacts unchanged.
+ auto trusted_digests = getTrustedDigests(*key);
+
+ if (odrefresh_status == art::odrefresh::ExitCode::kOkay) {
+ // Tell init we're done with the key; this is a boot time optimization
+ // in particular for the no fs-verity case, where we need to do a
+ // costly verification. If the files haven't been tampered with, which
+ // should be the common path, the verification will succeed, and we won't
+ // need the key anymore. If it turns out the artifacts are invalid (eg not
+ // in fs-verity) or the hash doesn't match, we won't be able to generate
+ // new artifacts without the key, so in those cases, remove the artifacts,
+ // and use JIT zygote for the current boot. We should recover automatically
+ // by the next boot.
+ SetProperty(kOdsignKeyDoneProp, "1");
+ }
+
+ auto verificationResult = verifyArtifactsIntegrity(trusted_digests, supportsFsVerity);
+ if (!verificationResult.ok()) {
+ int num_removed = removeDirectory(kArtArtifactsDir);
+ if (num_removed == 0) {
+ // If we can't remove the bad artifacts, we shouldn't continue, and
+ // instead prevent Zygote from using them (which is taken care of
+ // in the exit handler).
+ LOG(ERROR) << "Failed to remove unknown artifacts.";
+ return -1;
+ }
}
}
+ // Now that we verified existing artifacts, compile if we need to.
if (odrefresh_status == art::odrefresh::ExitCode::kCompilationRequired) {
odrefresh_status = compileArtifacts(kForceCompilation);
}
+
if (odrefresh_status == art::odrefresh::ExitCode::kOkay) {
+ // No new artifacts generated, and we verified existing ones above, nothing left to do.
LOG(INFO) << "odrefresh said artifacts are VALID";
- if (!digests_verified) {
- // A post-condition of validating artifacts is that if the ones on /system
- // are used, kArtArtifactsDir is removed. Conversely, if kArtArtifactsDir
- // exists, those are artifacts that will be used, and we should verify them.
- int err = access(kArtArtifactsDir.c_str(), F_OK);
- // If we receive any error other than ENOENT, be suspicious
- bool artifactsPresent = (err == 0) || (err < 0 && errno != ENOENT);
- if (artifactsPresent) {
- auto verificationResult = verifyArtifacts(*key, supportsFsVerity);
- if (!verificationResult.ok()) {
- LOG(ERROR) << verificationResult.error();
- return -1;
- }
- }
- }
} else if (odrefresh_status == art::odrefresh::ExitCode::kCompilationSuccess ||
odrefresh_status == art::odrefresh::ExitCode::kCompilationFailed) {
const bool compiled_all = odrefresh_status == art::odrefresh::ExitCode::kCompilationSuccess;
diff --git a/provisioner/Android.bp b/provisioner/Android.bp
index aac4878..665a9e7 100644
--- a/provisioner/Android.bp
+++ b/provisioner/Android.bp
@@ -47,8 +47,10 @@
name: "rkp_factory_extraction_tool",
vendor: true,
srcs: ["rkp_factory_extraction_tool.cpp"],
+ defaults: [
+ "keymint_use_latest_hal_aidl_ndk_shared",
+ ],
shared_libs: [
- "android.hardware.security.keymint-V1-ndk",
"libbinder",
"libbinder_ndk",
"libcrypto",
diff --git a/provisioner/rkp_factory_extraction_tool.cpp b/provisioner/rkp_factory_extraction_tool.cpp
index 9786c3d..0f45531 100644
--- a/provisioner/rkp_factory_extraction_tool.cpp
+++ b/provisioner/rkp_factory_extraction_tool.cpp
@@ -30,6 +30,7 @@
using aidl::android::hardware::security::keymint::IRemotelyProvisionedComponent;
using aidl::android::hardware::security::keymint::MacedPublicKey;
using aidl::android::hardware::security::keymint::ProtectedData;
+using aidl::android::hardware::security::keymint::RpcHardwareInfo;
using aidl::android::hardware::security::keymint::remote_prov::generateEekChain;
using aidl::android::hardware::security::keymint::remote_prov::getProdEekChain;
using aidl::android::hardware::security::keymint::remote_prov::jsonEncodeCsrWithBuild;
@@ -113,10 +114,10 @@
return certificateRequest;
}
-std::vector<uint8_t> getEekChain() {
+std::vector<uint8_t> getEekChain(uint32_t curve) {
if (FLAGS_test_mode) {
const std::vector<uint8_t> kFakeEekId = {'f', 'a', 'k', 'e', 0};
- auto eekOrErr = generateEekChain(3 /* chainlength */, kFakeEekId);
+ auto eekOrErr = generateEekChain(curve, 3 /* chainlength */, kFakeEekId);
if (!eekOrErr) {
std::cerr << "Failed to generate test EEK somehow: " << eekOrErr.message() << std::endl;
exit(-1);
@@ -128,15 +129,15 @@
return eek;
}
- return getProdEekChain();
+ return getProdEekChain(curve);
}
-void writeOutput(const Array& csr) {
+void writeOutput(const std::string instance_name, const Array& csr) {
if (FLAGS_output_format == kBinaryCsrOutput) {
auto bytes = csr.encode();
std::copy(bytes.begin(), bytes.end(), std::ostream_iterator<char>(std::cout));
} else if (FLAGS_output_format == kBuildPlusCsr) {
- auto [json, error] = jsonEncodeCsrWithBuild(csr);
+ auto [json, error] = jsonEncodeCsrWithBuild(instance_name, csr);
if (!error.empty()) {
std::cerr << "Error JSON encoding the output: " << error;
exit(1);
@@ -169,9 +170,16 @@
std::vector<MacedPublicKey> emptyKeys;
DeviceInfo verifiedDeviceInfo;
ProtectedData protectedData;
- ::ndk::ScopedAStatus status = rkp_service->generateCertificateRequest(
- FLAGS_test_mode, emptyKeys, getEekChain(), challenge, &verifiedDeviceInfo, &protectedData,
- &keysToSignMac);
+ RpcHardwareInfo hwInfo;
+ ::ndk::ScopedAStatus status = rkp_service->getHardwareInfo(&hwInfo);
+ if (!status.isOk()) {
+ std::cerr << "Failed to get hardware info for '" << fullName
+ << "'. Error code: " << status.getServiceSpecificError() << "." << std::endl;
+ exit(-1);
+ }
+ status = rkp_service->generateCertificateRequest(
+ FLAGS_test_mode, emptyKeys, getEekChain(hwInfo.supportedEekCurve), challenge,
+ &verifiedDeviceInfo, &protectedData, &keysToSignMac);
if (!status.isOk()) {
std::cerr << "Bundle extraction failed for '" << fullName
<< "'. Error code: " << status.getServiceSpecificError() << "." << std::endl;
@@ -179,7 +187,7 @@
}
auto request =
composeCertificateRequest(protectedData, verifiedDeviceInfo, challenge, keysToSignMac);
- writeOutput(request);
+ writeOutput(std::string(name), request);
}
} // namespace