Merge "Make key names unique."
diff --git a/Android.bp b/Android.bp
index b44c296..4a0253c 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1 +1,31 @@
+package {
+ default_applicable_licenses: ["system_security_license"],
+}
+
+// Added automatically by a large-scale-change that took the approach of
+// 'apply every license found to every target'. While this makes sure we respect
+// every license restriction, it may not be entirely correct.
+//
+// e.g. GPL in an MIT project might only apply to the contrib/ directory.
+//
+// Please consider splitting the single license below into multiple licenses,
+// taking care not to lose any license_kind information, and overriding the
+// default license using the 'licenses: [...]' property on targets as needed.
+//
+// For unused files, consider creating a 'fileGroup' with "//visibility:private"
+// to attach the license to, and including a comment whether the files may be
+// used in the current project.
+// See: http://go/android-license-faq
+license {
+ name: "system_security_license",
+ visibility: [":__subpackages__"],
+ license_kinds: [
+ "SPDX-license-identifier-Apache-2.0",
+ "SPDX-license-identifier-BSD",
+ ],
+ license_text: [
+ "NOTICE",
+ ],
+}
+
subdirs = ["*"]
diff --git a/OWNERS b/OWNERS
index fca66f8..bb51005 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,4 +1,6 @@
swillden@google.com
cbrubaker@google.com
jdanis@google.com
-kroot@google.com
\ No newline at end of file
+hasinitg@google.com
+kroot@google.com
+zeuthen@google.com
diff --git a/fsverity_init/Android.bp b/fsverity_init/Android.bp
index 3c9ade0..39d4e6b 100644
--- a/fsverity_init/Android.bp
+++ b/fsverity_init/Android.bp
@@ -1,3 +1,12 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_binary {
name: "fsverity_init",
srcs: [
diff --git a/identity/Android.bp b/identity/Android.bp
index dd61930..d66f4ec 100644
--- a/identity/Android.bp
+++ b/identity/Android.bp
@@ -1,3 +1,12 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_defaults {
name: "identity_defaults",
cflags: [
@@ -30,17 +39,20 @@
shared_libs: [
"libbase",
"libbinder",
- "libkeystore_aidl",
+ "libbinder_ndk",
+ "android.hardware.keymaster@4.0",
"libcredstore_aidl",
"libutils",
"libhidlbase",
"android.hardware.identity-support-lib",
"libkeymaster4support",
"libkeystore-attestation-application-id",
+ "android.hardware.security.keymint-V1-ndk_platform",
+ "android.security.authorization-ndk_platform",
],
static_libs: [
- "android.hardware.identity-unstable-cpp",
- "android.hardware.keymaster-unstable-cpp",
+ "android.hardware.identity-V3-cpp",
+ "android.hardware.keymaster-V3-cpp",
"libcppbor",
]
}
diff --git a/identity/Credential.cpp b/identity/Credential.cpp
index 4a2bae1..7c75d8a 100644
--- a/identity/Credential.cpp
+++ b/identity/Credential.cpp
@@ -14,16 +14,14 @@
* limitations under the License.
*/
-#define LOG_TAG "Credential"
+#define LOG_TAG "credstore"
#include <android-base/logging.h>
-
+#include <android/binder_manager.h>
#include <android/hardware/identity/support/IdentityCredentialSupport.h>
#include <android/security/identity/ICredentialStore.h>
-#include <android/security/keystore/BnCredstoreTokenCallback.h>
-#include <android/security/keystore/IKeystoreService.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <keymasterV4_0/keymaster_utils.h>
@@ -33,6 +31,11 @@
#include <future>
#include <tuple>
+#include <aidl/android/hardware/security/keymint/HardwareAuthToken.h>
+#include <aidl/android/hardware/security/secureclock/TimeStampToken.h>
+#include <aidl/android/security/authorization/AuthorizationTokens.h>
+#include <aidl/android/security/authorization/IKeystoreAuthorization.h>
+
#include "Credential.h"
#include "CredentialData.h"
#include "Util.h"
@@ -46,8 +49,6 @@
using std::promise;
using std::tuple;
-using android::security::keystore::IKeystoreService;
-
using ::android::hardware::identity::IWritableIdentityCredential;
using ::android::hardware::identity::support::ecKeyPairGetPkcs12;
@@ -55,11 +56,17 @@
using ::android::hardware::identity::support::ecKeyPairGetPublicKey;
using ::android::hardware::identity::support::sha256;
+using android::hardware::keymaster::SecurityLevel;
using android::hardware::keymaster::V4_0::HardwareAuthToken;
using android::hardware::keymaster::V4_0::VerificationToken;
using AidlHardwareAuthToken = android::hardware::keymaster::HardwareAuthToken;
using AidlVerificationToken = android::hardware::keymaster::VerificationToken;
+using KeyMintAuthToken = ::aidl::android::hardware::security::keymint::HardwareAuthToken;
+using ::aidl::android::hardware::security::secureclock::TimeStampToken;
+using ::aidl::android::security::authorization::AuthorizationTokens;
+using ::aidl::android::security::authorization::IKeystoreAuthorization;
+
Credential::Credential(CipherSuite cipherSuite, const std::string& dataPath,
const std::string& credentialName, uid_t callingUid,
HardwareInformation hwInfo, sp<IIdentityCredentialStore> halStoreBinder,
@@ -117,73 +124,94 @@
"Error loading data for credential");
}
- selectedAuthKey_ = data->selectAuthKey(allowUsingExhaustedKeys, allowUsingExpiredKeys);
- if (selectedAuthKey_ == nullptr) {
+ // We just check if a key is available, we actually don't store it since we
+ // don't keep CredentialData around between binder calls.
+ const AuthKeyData* authKey =
+ data->selectAuthKey(allowUsingExhaustedKeys, allowUsingExpiredKeys);
+ if (authKey == nullptr) {
return Status::fromServiceSpecificError(
ICredentialStore::ERROR_NO_AUTHENTICATION_KEY_AVAILABLE,
"No suitable authentication key available");
}
- int64_t challenge;
- Status status = halBinder_->createAuthChallenge(&challenge);
- if (!status.isOk()) {
- return halStatusToGenericError(status);
- }
- if (challenge == 0) {
+ if (!ensureChallenge()) {
return Status::fromServiceSpecificError(ICredentialStore::ERROR_GENERIC,
- "Returned challenge is 0 (bug in HAL or TA)");
+ "Error getting challenge (bug in HAL or TA)");
}
-
- selectedChallenge_ = challenge;
- *_aidl_return = challenge;
+ *_aidl_return = selectedChallenge_;
return Status::ok();
}
-class CredstoreTokenCallback : public android::security::keystore::BnCredstoreTokenCallback,
- public promise<tuple<bool, vector<uint8_t>, vector<uint8_t>>> {
- public:
- CredstoreTokenCallback() {}
- virtual Status onFinished(bool success, const vector<uint8_t>& authToken,
- const vector<uint8_t>& verificationToken) override {
- this->set_value({success, authToken, verificationToken});
- return Status::ok();
+bool Credential::ensureChallenge() {
+ if (selectedChallenge_ != 0) {
+ return true;
}
-};
+
+ int64_t challenge;
+ Status status = halBinder_->createAuthChallenge(&challenge);
+ if (!status.isOk()) {
+ LOG(ERROR) << "Error getting challenge: " << status.exceptionMessage();
+ return false;
+ }
+ if (challenge == 0) {
+ LOG(ERROR) << "Returned challenge is 0 (bug in HAL or TA)";
+ return false;
+ }
+
+ selectedChallenge_ = challenge;
+ return true;
+}
// Returns false if an error occurred communicating with keystore.
//
-bool getTokensFromKeystore(uint64_t challenge, uint64_t secureUserId,
- unsigned int authTokenMaxAgeMillis, vector<uint8_t>& authToken,
- vector<uint8_t>& verificationToken) {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("android.security.keystore"));
- sp<IKeystoreService> keystore = interface_cast<IKeystoreService>(binder);
- if (keystore == nullptr) {
- return false;
- }
+bool getTokensFromKeystore2(uint64_t challenge, uint64_t secureUserId,
+ unsigned int authTokenMaxAgeMillis,
+ AidlHardwareAuthToken& aidlAuthToken,
+ AidlVerificationToken& aidlVerificationToken) {
+ // try to connect to IKeystoreAuthorization AIDL service first.
+ AIBinder* authzAIBinder = AServiceManager_checkService("android.security.authorization");
+ ::ndk::SpAIBinder authzBinder(authzAIBinder);
+ auto authzService = IKeystoreAuthorization::fromBinder(authzBinder);
+ if (authzService) {
+ AuthorizationTokens authzTokens;
+ auto result = authzService->getAuthTokensForCredStore(challenge, secureUserId,
+ authTokenMaxAgeMillis, &authzTokens);
+ // Convert KeyMint auth token to KeyMaster authtoken, only if tokens are
+ // returned
+ if (result.isOk()) {
+ KeyMintAuthToken keymintAuthToken = authzTokens.authToken;
+ aidlAuthToken.challenge = keymintAuthToken.challenge;
+ aidlAuthToken.userId = keymintAuthToken.userId;
+ aidlAuthToken.authenticatorId = keymintAuthToken.authenticatorId;
+ aidlAuthToken.authenticatorType =
+ ::android::hardware::keymaster::HardwareAuthenticatorType(
+ int32_t(keymintAuthToken.authenticatorType));
+ aidlAuthToken.timestamp.milliSeconds = keymintAuthToken.timestamp.milliSeconds;
+ aidlAuthToken.mac = keymintAuthToken.mac;
- sp<CredstoreTokenCallback> callback = new CredstoreTokenCallback();
- auto future = callback->get_future();
-
- Status status =
- keystore->getTokensForCredstore(challenge, secureUserId, authTokenMaxAgeMillis, callback);
- if (!status.isOk()) {
+ // Convert timestamp token to KeyMaster verification token
+ TimeStampToken timestampToken = authzTokens.timestampToken;
+ aidlVerificationToken.challenge = timestampToken.challenge;
+ aidlVerificationToken.timestamp.milliSeconds = timestampToken.timestamp.milliSeconds;
+ // Legacy verification tokens were always minted by TEE.
+ aidlVerificationToken.securityLevel = SecurityLevel::TRUSTED_ENVIRONMENT;
+ aidlVerificationToken.mac = timestampToken.mac;
+ } else {
+ if (result.getServiceSpecificError() == 0) {
+ // Here we differentiate the errors occurred during communication
+ // from the service specific errors.
+ LOG(ERROR) << "Error getting tokens from keystore2: " << result.getDescription();
+ return false;
+ } else {
+ // Log the reason for not receiving auth tokens from keystore2.
+ LOG(INFO) << "Auth tokens were not received due to: " << result.getDescription();
+ }
+ }
+ return true;
+ } else {
+ LOG(ERROR) << "Error connecting to IKeystoreAuthorization service";
return false;
}
-
- auto fstatus = future.wait_for(std::chrono::milliseconds(5000));
- if (fstatus != std::future_status::ready) {
- LOG(ERROR) << "Waited 5 seconds from tokens for credstore, aborting";
- return false;
- }
- auto [success, returnedAuthToken, returnedVerificationToken] = future.get();
- if (!success) {
- LOG(ERROR) << "Error getting tokens from credstore";
- return false;
- }
- authToken = returnedAuthToken;
- verificationToken = returnedVerificationToken;
- return true;
}
Status Credential::getEntries(const vector<uint8_t>& requestMessage,
@@ -279,13 +307,6 @@
}
}
- // If requesting a challenge-based authToken the idea is that authentication
- // happens as part of the transaction. As such, authTokenMaxAgeMillis should
- // be nearly zero. We'll use 10 seconds for this.
- if (userAuthNeeded && selectedChallenge_ != 0) {
- authTokenMaxAgeMillis = 10 * 1000;
- }
-
// Reset tokens and only get them if they're actually needed, e.g. if user authentication
// is needed in any of the access control profiles for data items being requested.
//
@@ -303,63 +324,58 @@
aidlVerificationToken.securityLevel = ::android::hardware::keymaster::SecurityLevel::SOFTWARE;
aidlVerificationToken.mac.clear();
if (userAuthNeeded) {
- vector<uint8_t> authTokenBytes;
- vector<uint8_t> verificationTokenBytes;
- if (!getTokensFromKeystore(selectedChallenge_, data->getSecureUserId(),
- authTokenMaxAgeMillis, authTokenBytes, verificationTokenBytes)) {
- LOG(ERROR) << "Error getting tokens from keystore";
+ // If user authentication is needed, always get a challenge from the
+ // HAL/TA since it'll need it to check the returned VerificationToken
+ // for freshness.
+ if (!ensureChallenge()) {
return Status::fromServiceSpecificError(ICredentialStore::ERROR_GENERIC,
- "Error getting tokens from keystore");
+ "Error getting challenge (bug in HAL or TA)");
}
- // It's entirely possible getTokensFromKeystore() succeeded but didn't
- // return any tokens (in which case the returned byte-vectors are
- // empty). For example, this can happen if no auth token is available
- // which satifies e.g. |authTokenMaxAgeMillis|.
+ // Note: if all selected profiles require auth-on-every-presentation
+ // then authTokenMaxAgeMillis will be 0 (because timeoutMillis for each
+ // profile is 0). Which means that keystore will only return an
+ // AuthToken if its challenge matches what we pass, regardless of its
+ // age. This is intended b/c the HAL/TA will check not care about
+ // the age in this case, it only cares that the challenge matches.
//
- if (authTokenBytes.size() > 0) {
- HardwareAuthToken authToken =
- android::hardware::keymaster::V4_0::support::hidlVec2AuthToken(authTokenBytes);
- // Convert from HIDL to AIDL...
- aidlAuthToken.challenge = int64_t(authToken.challenge);
- aidlAuthToken.userId = int64_t(authToken.userId);
- aidlAuthToken.authenticatorId = int64_t(authToken.authenticatorId);
- aidlAuthToken.authenticatorType =
- ::android::hardware::keymaster::HardwareAuthenticatorType(
- int32_t(authToken.authenticatorType));
- aidlAuthToken.timestamp.milliSeconds = int64_t(authToken.timestamp);
- aidlAuthToken.mac = authToken.mac;
- }
+ // Otherwise, if one or more of the profiles is auth-with-a-timeout then
+ // authTokenMaxAgeMillis will be set to the largest of those
+ // timeouts. We'll get an AuthToken which satisfies this deadline if it
+ // exists. This authToken _may_ have the requested challenge but it's
+ // not a guarantee and it's also not required.
+ //
- if (verificationTokenBytes.size() > 0) {
- optional<VerificationToken> token =
- android::hardware::keymaster::V4_0::support::deserializeVerificationToken(
- verificationTokenBytes);
- if (!token) {
- LOG(ERROR) << "Error deserializing verification token";
- return Status::fromServiceSpecificError(ICredentialStore::ERROR_GENERIC,
- "Error deserializing verification token");
- }
- aidlVerificationToken.challenge = token->challenge;
- aidlVerificationToken.timestamp.milliSeconds = token->timestamp;
- aidlVerificationToken.securityLevel =
- ::android::hardware::keymaster::SecurityLevel(token->securityLevel);
- aidlVerificationToken.mac = token->mac;
+ if (!getTokensFromKeystore2(selectedChallenge_, data->getSecureUserId(),
+ authTokenMaxAgeMillis, aidlAuthToken, aidlVerificationToken)) {
+ LOG(ERROR) << "Error getting tokens from keystore2";
+ return Status::fromServiceSpecificError(ICredentialStore::ERROR_GENERIC,
+ "Error getting tokens from keystore2");
}
}
// Note that the selectAuthKey() method is only called if a CryptoObject is involved at
// the Java layer. So we could end up with no previously selected auth key and we may
// need one.
- const AuthKeyData* authKey = selectedAuthKey_;
- if (sessionTranscript.size() > 0) {
- if (authKey == nullptr) {
- authKey = data->selectAuthKey(allowUsingExhaustedKeys, allowUsingExpiredKeys);
- if (authKey == nullptr) {
- return Status::fromServiceSpecificError(
- ICredentialStore::ERROR_NO_AUTHENTICATION_KEY_AVAILABLE,
- "No suitable authentication key available");
- }
+ //
+ const AuthKeyData* authKey =
+ data->selectAuthKey(allowUsingExhaustedKeys, allowUsingExpiredKeys);
+ if (authKey == nullptr) {
+ // If no authKey is available, consider it an error only when a
+ // SessionTranscript was provided.
+ //
+ // We allow no SessionTranscript to be provided because it makes
+ // the API simpler to deal with insofar it can be used without having
+ // to generate any authentication keys.
+ //
+ // In this "no SessionTranscript is provided" mode we don't return
+ // DeviceNameSpaces nor a MAC over DeviceAuthentication so we don't
+ // need a device key.
+ //
+ if (sessionTranscript.size() > 0) {
+ return Status::fromServiceSpecificError(
+ ICredentialStore::ERROR_NO_AUTHENTICATION_KEY_AVAILABLE,
+ "No suitable authentication key available and one is needed");
}
}
vector<uint8_t> signingKeyBlob;
@@ -750,31 +766,36 @@
//
// It is because of this we need to set the CredentialKey certificate chain,
// keyCount, and maxUsesPerKey below.
- sp<WritableCredential> writableCredential =
- new WritableCredential(dataPath_, credentialName_, docType.value(), true, hwInfo_,
- halWritableCredential, halApiVersion_);
+ sp<WritableCredential> writableCredential = new WritableCredential(
+ dataPath_, credentialName_, docType.value(), true, hwInfo_, halWritableCredential);
writableCredential->setAttestationCertificate(data->getAttestationCertificate());
auto [keyCount, maxUsesPerKey] = data->getAvailableAuthenticationKeys();
writableCredential->setAvailableAuthenticationKeys(keyCount, maxUsesPerKey);
- // Because its data has changed, we need to reconnect to the HAL when the
- // credential has been updated... otherwise the remote object will have
- // stale data for future calls (e.g. getAuthKeysNeedingCertification().
+ // Because its data has changed, we need to replace the binder for the
+ // IIdentityCredential when the credential has been updated... otherwise the
+ // remote object will have stale data for future calls, for example
+ // getAuthKeysNeedingCertification().
//
- // The joys and pitfalls of mutable objects...
+ // The way this is implemented is that setCredentialToReloadWhenUpdated()
+ // instructs the WritableCredential to call writableCredentialPersonalized()
+ // on |this|.
//
- writableCredential->setCredentialUpdatedCallback([this] {
- Status status = this->ensureOrReplaceHalBinder();
- if (!status.isOk()) {
- LOG(ERROR) << "Error loading credential";
- }
- });
+ //
+ writableCredential->setCredentialToReloadWhenUpdated(this);
*_aidl_return = writableCredential;
return Status::ok();
}
+void Credential::writableCredentialPersonalized() {
+ Status status = ensureOrReplaceHalBinder();
+ if (!status.isOk()) {
+ LOG(ERROR) << "Error reloading credential";
+ }
+}
+
} // namespace identity
} // namespace security
} // namespace android
diff --git a/identity/Credential.h b/identity/Credential.h
index 7f08515..a76f3cc 100644
--- a/identity/Credential.h
+++ b/identity/Credential.h
@@ -50,6 +50,7 @@
~Credential();
Status ensureOrReplaceHalBinder();
+ void writableCredentialPersonalized();
// ICredential overrides
Status createEphemeralKeyPair(vector<uint8_t>* _aidl_return) override;
@@ -94,12 +95,13 @@
HardwareInformation hwInfo_;
sp<IIdentityCredentialStore> halStoreBinder_;
- const AuthKeyData* selectedAuthKey_ = nullptr;
uint64_t selectedChallenge_ = 0;
sp<IIdentityCredential> halBinder_;
int halApiVersion_;
+ bool ensureChallenge();
+
ssize_t
calcExpectedDeviceNameSpacesSize(const vector<uint8_t>& requestMessage,
const vector<RequestNamespaceParcel>& requestNamespaces,
diff --git a/identity/CredentialData.cpp b/identity/CredentialData.cpp
index 96c436a..d95c1ac 100644
--- a/identity/CredentialData.cpp
+++ b/identity/CredentialData.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "CredentialData"
+#define LOG_TAG "credstore"
#include <chrono>
diff --git a/identity/CredentialData.h b/identity/CredentialData.h
index b037997..24b55d3 100644
--- a/identity/CredentialData.h
+++ b/identity/CredentialData.h
@@ -55,7 +55,7 @@
vector<uint8_t> certificate;
vector<uint8_t> keyBlob;
- int64_t expirationDateMillisSinceEpoch;
+ int64_t expirationDateMillisSinceEpoch = 0;
vector<uint8_t> staticAuthenticationData;
vector<uint8_t> pendingCertificate;
vector<uint8_t> pendingKeyBlob;
diff --git a/identity/CredentialStore.cpp b/identity/CredentialStore.cpp
index f77294e..071cf24 100644
--- a/identity/CredentialStore.cpp
+++ b/identity/CredentialStore.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "CredentialStore"
+#define LOG_TAG "credstore"
#include <algorithm>
@@ -90,7 +90,7 @@
}
sp<IWritableCredential> writableCredential = new WritableCredential(
- dataPath_, credentialName, docType, false, hwInfo_, halWritableCredential, halApiVersion_);
+ dataPath_, credentialName, docType, false, hwInfo_, halWritableCredential);
*_aidl_return = writableCredential;
return Status::ok();
}
diff --git a/identity/CredentialStoreFactory.cpp b/identity/CredentialStoreFactory.cpp
index 5c3bf36..0e901ba 100644
--- a/identity/CredentialStoreFactory.cpp
+++ b/identity/CredentialStoreFactory.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "CredentialStoreFactory"
+#define LOG_TAG "credstore"
#include <android-base/logging.h>
diff --git a/identity/TEST_MAPPING b/identity/TEST_MAPPING
new file mode 100644
index 0000000..87707a8
--- /dev/null
+++ b/identity/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsIdentityTestCases"
+ }
+ ]
+}
diff --git a/identity/Util.cpp b/identity/Util.cpp
index cd29017..3a46bca 100644
--- a/identity/Util.cpp
+++ b/identity/Util.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "Util"
+#define LOG_TAG "credstore"
#include <fcntl.h>
#include <stdlib.h>
diff --git a/identity/WritableCredential.cpp b/identity/WritableCredential.cpp
index d0688b8..9827d75 100644
--- a/identity/WritableCredential.cpp
+++ b/identity/WritableCredential.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "WritableCredential"
+#define LOG_TAG "credstore"
#include <android-base/logging.h>
#include <android/hardware/identity/support/IdentityCredentialSupport.h>
@@ -41,15 +41,14 @@
WritableCredential::WritableCredential(const string& dataPath, const string& credentialName,
const string& docType, bool isUpdate,
HardwareInformation hwInfo,
- sp<IWritableIdentityCredential> halBinder, int halApiVersion)
+ sp<IWritableIdentityCredential> halBinder)
: dataPath_(dataPath), credentialName_(credentialName), docType_(docType), isUpdate_(isUpdate),
- hwInfo_(std::move(hwInfo)), halBinder_(halBinder), halApiVersion_(halApiVersion) {}
+ hwInfo_(std::move(hwInfo)), halBinder_(halBinder) {}
WritableCredential::~WritableCredential() {}
-void WritableCredential::setCredentialUpdatedCallback(
- std::function<void()>&& onCredentialUpdatedCallback) {
- onCredentialUpdatedCallback_ = onCredentialUpdatedCallback;
+void WritableCredential::setCredentialToReloadWhenUpdated(sp<Credential> credential) {
+ credentialToReloadWhenUpdated_ = credential;
}
Status WritableCredential::ensureAttestationCertificateExists(const vector<uint8_t>& challenge) {
@@ -268,7 +267,10 @@
"Error saving credential data to disk");
}
- onCredentialUpdatedCallback_();
+ if (credentialToReloadWhenUpdated_) {
+ credentialToReloadWhenUpdated_->writableCredentialPersonalized();
+ credentialToReloadWhenUpdated_.clear();
+ }
*_aidl_return = proofOfProvisioningSignature;
return Status::ok();
diff --git a/identity/WritableCredential.h b/identity/WritableCredential.h
index 6ff31ae..838b956 100644
--- a/identity/WritableCredential.h
+++ b/identity/WritableCredential.h
@@ -24,6 +24,8 @@
#include <android/hardware/identity/IIdentityCredentialStore.h>
+#include "Credential.h"
+
namespace android {
namespace security {
namespace identity {
@@ -38,13 +40,15 @@
public:
WritableCredential(const string& dataPath, const string& credentialName, const string& docType,
bool isUpdate, HardwareInformation hwInfo,
- sp<IWritableIdentityCredential> halBinder, int halApiVersion);
+ sp<IWritableIdentityCredential> halBinder);
~WritableCredential();
// Used when updating a credential
void setAttestationCertificate(const vector<uint8_t>& attestationCertificate);
void setAvailableAuthenticationKeys(int keyCount, int maxUsesPerKey);
- void setCredentialUpdatedCallback(std::function<void()>&& onCredentialUpdatedCallback);
+
+ // Used by Credential::update()
+ void setCredentialToReloadWhenUpdated(sp<Credential> credential);
// IWritableCredential overrides
Status getCredentialKeyCertificateChain(const vector<uint8_t>& challenge,
@@ -61,13 +65,12 @@
bool isUpdate_;
HardwareInformation hwInfo_;
sp<IWritableIdentityCredential> halBinder_;
- int halApiVersion_;
vector<uint8_t> attestationCertificate_;
int keyCount_ = 0;
int maxUsesPerKey_ = 1;
- std::function<void()> onCredentialUpdatedCallback_ = []() {};
+ sp<Credential> credentialToReloadWhenUpdated_;
ssize_t calcExpectedProofOfProvisioningSize(
const vector<AccessControlProfileParcel>& accessControlProfiles,
diff --git a/identity/main.cpp b/identity/main.cpp
index 8f4968d..2559789 100644
--- a/identity/main.cpp
+++ b/identity/main.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "android.security.identity"
+#define LOG_TAG "credstore"
#include <filesystem>
@@ -40,7 +40,7 @@
using ::android::security::identity::CredentialStoreFactory;
int main(int argc, char* argv[]) {
- InitLogging(argv, StderrLogger);
+ InitLogging(argv);
CHECK(argc == 2) << "A directory must be specified";
string data_dir = string(argv[1]);
@@ -51,11 +51,10 @@
auto ret = sm->addService(String16("android.security.identity"), factory);
CHECK(ret == ::android::OK) << "Couldn't register binder service";
- LOG(ERROR) << "Registered binder service";
+ LOG(INFO) << "Registered binder service";
- // This is needed for binder callbacks from keystore on a ICredstoreTokenCallback binder.
- android::ProcessState::self()->startThreadPool();
-
+ // Credstore is a single-threaded process. So devote the main thread
+ // to handling binder messages.
IPCThreadState::self()->joinThreadPool();
return 0;
diff --git a/keystore-engine/Android.bp b/keystore-engine/Android.bp
index 6512c66..9980765 100644
--- a/keystore-engine/Android.bp
+++ b/keystore-engine/Android.bp
@@ -12,12 +12,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-BSD
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_library_shared {
name: "libkeystore-engine",
srcs: [
"android_engine.cpp",
"keystore_backend_binder.cpp",
+ "keystore2_engine.cpp",
],
cflags: [
@@ -27,7 +37,9 @@
],
shared_libs: [
+ "android.system.keystore2-V1-ndk_platform",
"libbinder",
+ "libbinder_ndk",
"libcrypto",
"libcutils",
"libhidlbase",
@@ -49,6 +61,7 @@
srcs: [
"android_engine.cpp",
"keystore_backend_hidl.cpp",
+ "keystore2_engine.cpp",
],
cflags: [
@@ -59,7 +72,10 @@
],
shared_libs: [
+ "android.system.keystore2-V1-ndk_platform",
"android.system.wifi.keystore@1.0",
+ "libbase",
+ "libbinder_ndk",
"libcrypto",
"liblog",
"libhidlbase",
diff --git a/keystore-engine/android_engine.cpp b/keystore-engine/android_engine.cpp
index e3525b2..5881523 100644
--- a/keystore-engine/android_engine.cpp
+++ b/keystore-engine/android_engine.cpp
@@ -23,10 +23,7 @@
#define LOG_TAG "keystore-engine"
#include <pthread.h>
-#include <sys/socket.h>
-#include <stdarg.h>
#include <string.h>
-#include <unistd.h>
#include <log/log.h>
@@ -41,6 +38,8 @@
#include <memory>
+#include "keystore2_engine.h"
+
#ifndef BACKEND_WIFI_HIDL
#include "keystore_backend_binder.h"
#else
@@ -335,6 +334,10 @@
EVP_PKEY* EVP_PKEY_from_keystore(const char* key_id) {
ALOGV("EVP_PKEY_from_keystore(\"%s\")", key_id);
+ if (auto ks2_key = EVP_PKEY_from_keystore2(key_id)) {
+ return ks2_key;
+ }
+
ensure_keystore_engine();
uint8_t *pubkey = nullptr;
diff --git a/keystore-engine/keystore2_engine.cpp b/keystore-engine/keystore2_engine.cpp
new file mode 100644
index 0000000..69d2ca6
--- /dev/null
+++ b/keystore-engine/keystore2_engine.cpp
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "keystore2_engine.h"
+
+#include <aidl/android/system/keystore2/IKeystoreService.h>
+#include <android-base/logging.h>
+#include <android-base/strings.h>
+#include <android/binder_manager.h>
+
+#include <private/android_filesystem_config.h>
+
+#include <openssl/bn.h>
+#include <openssl/ec.h>
+#include <openssl/ec_key.h>
+#include <openssl/ecdsa.h>
+#include <openssl/engine.h>
+#include <openssl/rsa.h>
+#include <openssl/x509.h>
+
+#define AT __func__ << ":" << __LINE__ << " "
+
+constexpr const char keystore2_service_name[] = "android.system.keystore2.IKeystoreService/default";
+const std::string keystore2_grant_id_prefix("ks2_keystore-engine_grant_id:");
+
+/**
+ * Keystore 2.0 namespace identifiers.
+ * Keep in sync with system/sepolicy/private/keystore2_key_contexts.
+ */
+constexpr const int64_t KS2_NAMESPACE_WIFI = 102;
+
+namespace ks2 = ::aidl::android::system::keystore2;
+namespace KMV1 = ::aidl::android::hardware::security::keymint;
+
+namespace {
+
+int64_t getNamespaceforCurrentUid() {
+ auto uid = getuid();
+ switch (uid) {
+ case AID_WIFI:
+ return KS2_NAMESPACE_WIFI;
+ // 0 is the super user namespace, and nothing has access to this namespace on user builds.
+ // So this will always fail.
+ default:
+ return 0;
+ }
+}
+
+struct Keystore2KeyBackend {
+ ks2::KeyDescriptor descriptor_;
+ std::shared_ptr<ks2::IKeystoreSecurityLevel> i_keystore_security_level_;
+};
+
+/* key_backend_dup is called when one of the RSA or EC_KEY objects is duplicated. */
+extern "C" int key_backend_dup(CRYPTO_EX_DATA* /* to */, const CRYPTO_EX_DATA* /* from */,
+ void** from_d, int /* index */, long /* argl */, void* /* argp */) {
+ auto key_backend = reinterpret_cast<std::shared_ptr<Keystore2KeyBackend>*>(*from_d);
+ if (key_backend != nullptr) {
+ *from_d = new std::shared_ptr<Keystore2KeyBackend>(*key_backend);
+ }
+ return 1;
+}
+
+/* key_backend_free is called when one of the RSA, DSA or EC_KEY object is freed. */
+extern "C" void key_backend_free(void* /* parent */, void* ptr, CRYPTO_EX_DATA* /* ad */,
+ int /* index */, long /* argl */, void* /* argp */) {
+ delete reinterpret_cast<std::shared_ptr<Keystore2KeyBackend>*>(ptr);
+}
+
+extern "C" int rsa_private_transform(RSA* rsa, uint8_t* out, const uint8_t* in, size_t len);
+extern "C" int ecdsa_sign(const uint8_t* digest, size_t digest_len, uint8_t* sig,
+ unsigned int* sig_len, EC_KEY* ec_key);
+/* KeystoreEngine is a BoringSSL ENGINE that implements RSA and ECDSA by
+ * forwarding the requested operations to Keystore. */
+class Keystore2Engine {
+ public:
+ Keystore2Engine()
+ : rsa_index_(RSA_get_ex_new_index(0 /* argl */, nullptr /* argp */, nullptr /* new_func */,
+ key_backend_dup, key_backend_free)),
+ ec_key_index_(EC_KEY_get_ex_new_index(0 /* argl */, nullptr /* argp */,
+ nullptr /* new_func */, key_backend_dup,
+ key_backend_free)),
+ engine_(ENGINE_new()) {
+ memset(&rsa_method_, 0, sizeof(rsa_method_));
+ rsa_method_.common.is_static = 1;
+ rsa_method_.private_transform = rsa_private_transform;
+ rsa_method_.flags = RSA_FLAG_OPAQUE;
+ ENGINE_set_RSA_method(engine_, &rsa_method_, sizeof(rsa_method_));
+
+ memset(&ecdsa_method_, 0, sizeof(ecdsa_method_));
+ ecdsa_method_.common.is_static = 1;
+ ecdsa_method_.sign = ecdsa_sign;
+ ecdsa_method_.flags = ECDSA_FLAG_OPAQUE;
+ ENGINE_set_ECDSA_method(engine_, &ecdsa_method_, sizeof(ecdsa_method_));
+ }
+
+ int rsa_ex_index() const { return rsa_index_; }
+ int ec_key_ex_index() const { return ec_key_index_; }
+
+ const ENGINE* engine() const { return engine_; }
+
+ static const Keystore2Engine& get() {
+ static Keystore2Engine engine;
+ return engine;
+ }
+
+ private:
+ const int rsa_index_;
+ const int ec_key_index_;
+ RSA_METHOD rsa_method_;
+ ECDSA_METHOD ecdsa_method_;
+ ENGINE* const engine_;
+};
+
+#define OWNERSHIP_TRANSFERRED(x) x.release()
+
+/* wrap_rsa returns an |EVP_PKEY| that contains an RSA key where the public
+ * part is taken from |public_rsa| and the private operations are forwarded to
+ * KeyStore and operate on the key named |key_id|. */
+bssl::UniquePtr<EVP_PKEY> wrap_rsa(std::shared_ptr<Keystore2KeyBackend> key_backend,
+ const RSA* public_rsa) {
+ bssl::UniquePtr<RSA> rsa(RSA_new_method(Keystore2Engine::get().engine()));
+ if (rsa.get() == nullptr) {
+ return nullptr;
+ }
+
+ auto key_backend_copy = new decltype(key_backend)(key_backend);
+
+ if (!RSA_set_ex_data(rsa.get(), Keystore2Engine::get().rsa_ex_index(), key_backend_copy)) {
+ delete key_backend_copy;
+ return nullptr;
+ }
+
+ rsa->n = BN_dup(public_rsa->n);
+ rsa->e = BN_dup(public_rsa->e);
+ if (rsa->n == nullptr || rsa->e == nullptr) {
+ return nullptr;
+ }
+
+ bssl::UniquePtr<EVP_PKEY> result(EVP_PKEY_new());
+ if (result.get() == nullptr || !EVP_PKEY_assign_RSA(result.get(), rsa.get())) {
+ return nullptr;
+ }
+ OWNERSHIP_TRANSFERRED(rsa);
+
+ return result;
+}
+
+/* wrap_ecdsa returns an |EVP_PKEY| that contains an ECDSA key where the public
+ * part is taken from |public_rsa| and the private operations are forwarded to
+ * KeyStore and operate on the key named |key_id|. */
+bssl::UniquePtr<EVP_PKEY> wrap_ecdsa(std::shared_ptr<Keystore2KeyBackend> key_backend,
+ const EC_KEY* public_ecdsa) {
+ bssl::UniquePtr<EC_KEY> ec(EC_KEY_new_method(Keystore2Engine::get().engine()));
+ if (ec.get() == nullptr) {
+ return nullptr;
+ }
+
+ if (!EC_KEY_set_group(ec.get(), EC_KEY_get0_group(public_ecdsa)) ||
+ !EC_KEY_set_public_key(ec.get(), EC_KEY_get0_public_key(public_ecdsa))) {
+ return nullptr;
+ }
+
+ auto key_backend_copy = new decltype(key_backend)(key_backend);
+
+ if (!EC_KEY_set_ex_data(ec.get(), Keystore2Engine::get().ec_key_ex_index(), key_backend_copy)) {
+ delete key_backend_copy;
+ return nullptr;
+ }
+
+ bssl::UniquePtr<EVP_PKEY> result(EVP_PKEY_new());
+ if (result.get() == nullptr || !EVP_PKEY_assign_EC_KEY(result.get(), ec.get())) {
+ return nullptr;
+ }
+ OWNERSHIP_TRANSFERRED(ec);
+
+ return result;
+}
+
+std::optional<std::vector<uint8_t>> keystore2_sign(const Keystore2KeyBackend& key_backend,
+ std::vector<uint8_t> input,
+ KMV1::Algorithm algorithm) {
+ auto sec_level = key_backend.i_keystore_security_level_;
+ ks2::CreateOperationResponse response;
+
+ std::vector<KMV1::KeyParameter> op_params(4);
+ op_params[0] = KMV1::KeyParameter{
+ .tag = KMV1::Tag::PURPOSE,
+ .value = KMV1::KeyParameterValue::make<KMV1::KeyParameterValue::keyPurpose>(
+ KMV1::KeyPurpose::SIGN)};
+ op_params[1] = KMV1::KeyParameter{
+ .tag = KMV1::Tag::ALGORITHM,
+ .value = KMV1::KeyParameterValue::make<KMV1::KeyParameterValue::algorithm>(algorithm)};
+ op_params[2] = KMV1::KeyParameter{
+ .tag = KMV1::Tag::PADDING,
+ .value = KMV1::KeyParameterValue::make<KMV1::KeyParameterValue::paddingMode>(
+ KMV1::PaddingMode::NONE)};
+ op_params[3] =
+ KMV1::KeyParameter{.tag = KMV1::Tag::DIGEST,
+ .value = KMV1::KeyParameterValue::make<KMV1::KeyParameterValue::digest>(
+ KMV1::Digest::NONE)};
+
+ auto rc = sec_level->createOperation(key_backend.descriptor_, op_params, false /* forced */,
+ &response);
+ if (!rc.isOk()) {
+ auto exception_code = rc.getExceptionCode();
+ if (exception_code == EX_SERVICE_SPECIFIC) {
+ LOG(ERROR) << AT << "Keystore createOperation returned service specific error: "
+ << rc.getServiceSpecificError();
+ } else {
+ LOG(ERROR) << AT << "Communication with Keystore createOperation failed error: "
+ << exception_code;
+ }
+ return std::nullopt;
+ }
+
+ auto op = response.iOperation;
+
+ std::optional<std::vector<uint8_t>> output = std::nullopt;
+ rc = op->finish(std::move(input), {}, &output);
+ if (!rc.isOk()) {
+ auto exception_code = rc.getExceptionCode();
+ if (exception_code == EX_SERVICE_SPECIFIC) {
+ LOG(ERROR) << AT << "Keystore finish returned service specific error: "
+ << rc.getServiceSpecificError();
+ } else {
+ LOG(ERROR) << AT
+ << "Communication with Keystore finish failed error: " << exception_code;
+ }
+ return std::nullopt;
+ }
+
+ if (!output) {
+ LOG(ERROR) << AT << "We did not get a signature from Keystore.";
+ }
+
+ return output;
+}
+
+/* rsa_private_transform takes a big-endian integer from |in|, calculates the
+ * d'th power of it, modulo the RSA modulus, and writes the result as a
+ * big-endian integer to |out|. Both |in| and |out| are |len| bytes long. It
+ * returns one on success and zero otherwise. */
+extern "C" int rsa_private_transform(RSA* rsa, uint8_t* out, const uint8_t* in, size_t len) {
+ auto key_backend = reinterpret_cast<std::shared_ptr<Keystore2KeyBackend>*>(
+ RSA_get_ex_data(rsa, Keystore2Engine::get().rsa_ex_index()));
+
+ if (key_backend == nullptr) {
+ LOG(ERROR) << AT << "Invalid key.";
+ return 0;
+ }
+
+ auto output =
+ keystore2_sign(**key_backend, std::vector<uint8_t>(in, in + len), KMV1::Algorithm::RSA);
+ if (!output) {
+ return 0;
+ }
+
+ if (output->size() > len) {
+ /* The result of the RSA operation can never be larger than the size of
+ * the modulus so we assume that the result has extra zeros on the
+ * left. This provides attackers with an oracle, but there's nothing
+ * that we can do about it here. */
+ LOG(WARNING) << "Reply len " << output->size() << " greater than expected " << len;
+ memcpy(out, &output->data()[output->size() - len], len);
+ } else if (output->size() < len) {
+ /* If the Keystore implementation returns a short value we assume that
+ * it's because it removed leading zeros from the left side. This is
+ * bad because it provides attackers with an oracle but we cannot do
+ * anything about a broken Keystore implementation here. */
+ LOG(WARNING) << "Reply len " << output->size() << " less than expected " << len;
+ memset(out, 0, len);
+ memcpy(out + len - output->size(), output->data(), output->size());
+ } else {
+ memcpy(out, output->data(), len);
+ }
+
+ return 1;
+}
+
+/* ecdsa_sign signs |digest_len| bytes from |digest| with |ec_key| and writes
+ * the resulting signature (an ASN.1 encoded blob) to |sig|. It returns one on
+ * success and zero otherwise. */
+extern "C" int ecdsa_sign(const uint8_t* digest, size_t digest_len, uint8_t* sig,
+ unsigned int* sig_len, EC_KEY* ec_key) {
+ auto key_backend = reinterpret_cast<std::shared_ptr<Keystore2KeyBackend>*>(
+ EC_KEY_get_ex_data(ec_key, Keystore2Engine::get().ec_key_ex_index()));
+
+ if (key_backend == nullptr) {
+ LOG(ERROR) << AT << "Invalid key.";
+ return 0;
+ }
+
+ size_t ecdsa_size = ECDSA_size(ec_key);
+
+ auto output = keystore2_sign(**key_backend, std::vector<uint8_t>(digest, digest + digest_len),
+ KMV1::Algorithm::EC);
+ if (!output) {
+ LOG(ERROR) << "There was an error during ecdsa_sign.";
+ return 0;
+ }
+
+ if (output->size() == 0) {
+ LOG(ERROR) << "No valid signature returned";
+ return 0;
+ } else if (output->size() > ecdsa_size) {
+ LOG(ERROR) << "Signature is too large";
+ return 0;
+ }
+
+ memcpy(sig, output->data(), output->size());
+ *sig_len = output->size();
+
+ return 1;
+}
+
+} // namespace
+
+/* EVP_PKEY_from_keystore returns an |EVP_PKEY| that contains either an RSA or
+ * ECDSA key where the public part of the key reflects the value of the key
+ * named |key_id| in Keystore and the private operations are forwarded onto
+ * KeyStore. */
+extern "C" EVP_PKEY* EVP_PKEY_from_keystore2(const char* key_id) {
+ ::ndk::SpAIBinder keystoreBinder(AServiceManager_checkService(keystore2_service_name));
+ auto keystore2 = ks2::IKeystoreService::fromBinder(keystoreBinder);
+
+ if (!keystore2) {
+ LOG(ERROR) << AT << "Unable to connect to Keystore 2.0.";
+ return nullptr;
+ }
+
+ std::string alias = key_id;
+ if (android::base::StartsWith(alias, "USRPKEY_")) {
+ LOG(WARNING) << AT << "Keystore backend used with legacy alias prefix - ignoring.";
+ alias = alias.substr(8);
+ }
+
+ ks2::KeyDescriptor descriptor = {
+ .domain = ks2::Domain::SELINUX,
+ .nspace = getNamespaceforCurrentUid(),
+ .alias = alias,
+ .blob = std::nullopt,
+ };
+
+ // If the key_id starts with the grant id prefix, we parse the following string as numeric
+ // grant id. We can then use the grant domain without alias to load the designated key.
+ if (alias.find(keystore2_grant_id_prefix) == 0) {
+ std::stringstream s(alias.substr(keystore2_grant_id_prefix.size()));
+ s >> std::hex >> reinterpret_cast<uint64_t&>(descriptor.nspace);
+ descriptor.domain = ks2::Domain::GRANT;
+ descriptor.alias = std::nullopt;
+ }
+
+ ks2::KeyEntryResponse response;
+ auto rc = keystore2->getKeyEntry(descriptor, &response);
+ if (!rc.isOk()) {
+ auto exception_code = rc.getExceptionCode();
+ if (exception_code == EX_SERVICE_SPECIFIC) {
+ LOG(ERROR) << AT << "Keystore getKeyEntry returned service specific error: "
+ << rc.getServiceSpecificError();
+ } else {
+ LOG(ERROR) << AT << "Communication with Keystore getKeyEntry failed error: "
+ << exception_code;
+ }
+ return nullptr;
+ }
+
+ if (!response.metadata.certificate) {
+ LOG(ERROR) << AT << "No public key found.";
+ return nullptr;
+ }
+
+ const uint8_t* p = response.metadata.certificate->data();
+ bssl::UniquePtr<X509> x509(d2i_X509(nullptr, &p, response.metadata.certificate->size()));
+ if (!x509) {
+ LOG(ERROR) << AT << "Failed to parse x509 certificate.";
+ return nullptr;
+ }
+ bssl::UniquePtr<EVP_PKEY> pkey(X509_get_pubkey(x509.get()));
+ if (!pkey) {
+ LOG(ERROR) << AT << "Failed to extract public key.";
+ return nullptr;
+ }
+
+ auto key_backend = std::make_shared<Keystore2KeyBackend>(
+ Keystore2KeyBackend{response.metadata.key, response.iSecurityLevel});
+
+ bssl::UniquePtr<EVP_PKEY> result;
+ switch (EVP_PKEY_type(pkey->type)) {
+ case EVP_PKEY_RSA: {
+ bssl::UniquePtr<RSA> public_rsa(EVP_PKEY_get1_RSA(pkey.get()));
+ result = wrap_rsa(key_backend, public_rsa.get());
+ break;
+ }
+ case EVP_PKEY_EC: {
+ bssl::UniquePtr<EC_KEY> public_ecdsa(EVP_PKEY_get1_EC_KEY(pkey.get()));
+ result = wrap_ecdsa(key_backend, public_ecdsa.get());
+ break;
+ }
+ default:
+ LOG(ERROR) << AT << "Unsupported key type " << EVP_PKEY_type(pkey->type);
+ return nullptr;
+ }
+
+ return result.release();
+}
diff --git a/keystore-engine/keystore2_engine.h b/keystore-engine/keystore2_engine.h
new file mode 100644
index 0000000..a8381d9
--- /dev/null
+++ b/keystore-engine/keystore2_engine.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <openssl/evp.h>
+
+extern "C" EVP_PKEY* EVP_PKEY_from_keystore2(const char* key_id);
diff --git a/keystore/Android.bp b/keystore/Android.bp
index 45b721b..7278cee 100644
--- a/keystore/Android.bp
+++ b/keystore/Android.bp
@@ -1,3 +1,13 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ // SPDX-license-identifier-BSD
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_defaults {
name: "keystore_defaults",
diff --git a/keystore/auth_token_table.cpp b/keystore/auth_token_table.cpp
index 5e6d572..971f9ef 100644
--- a/keystore/auth_token_table.cpp
+++ b/keystore/auth_token_table.cpp
@@ -178,33 +178,39 @@
int64_t authTokenMaxAgeMillis) {
std::vector<uint64_t> sids = {secureUserId};
HardwareAuthenticatorType auth_type = HardwareAuthenticatorType::ANY;
-
time_t now = clock_function_();
+ int64_t nowMillis = now * 1000;
- // challenge-based - the authToken has to contain the given challenge.
- if (challenge != 0) {
- auto matching_op = find_if(
- entries_, [&](Entry& e) { return e.token().challenge == challenge && !e.completed(); });
- if (matching_op == entries_.end()) {
- return {AUTH_TOKEN_NOT_FOUND, {}};
- }
-
- if (!matching_op->SatisfiesAuth(sids, auth_type)) {
- return {AUTH_TOKEN_WRONG_SID, {}};
- }
-
- if (authTokenMaxAgeMillis > 0) {
- if (static_cast<int64_t>(matching_op->time_received()) + authTokenMaxAgeMillis <
- static_cast<int64_t>(now)) {
- return {AUTH_TOKEN_EXPIRED, {}};
- }
- }
-
- return {OK, matching_op->token()};
+ // It's an error to call this without a non-zero challenge.
+ if (challenge == 0) {
+ return {OP_HANDLE_REQUIRED, {}};
}
- // Otherwise, no challenge - any authToken younger than the specified maximum
- // age will do.
+ // First see if we can find a token which matches the given challenge. If we
+ // can, return the newest one. We specifically don't care about its age.
+ //
+ Entry* newest_match_for_challenge = nullptr;
+ for (auto& entry : entries_) {
+ if (entry.token().challenge == challenge && !entry.completed() &&
+ entry.SatisfiesAuth(sids, auth_type)) {
+ if (newest_match_for_challenge == nullptr ||
+ entry.is_newer_than(newest_match_for_challenge)) {
+ newest_match_for_challenge = &entry;
+ }
+ }
+ }
+ if (newest_match_for_challenge != nullptr) {
+ newest_match_for_challenge->UpdateLastUse(now);
+ return {OK, newest_match_for_challenge->token()};
+ }
+
+ // If that didn't work, we'll take the most recent token within the specified
+ // deadline, if any. Of course if the deadline is zero it doesn't make sense
+ // to look at all.
+ if (authTokenMaxAgeMillis == 0) {
+ return {AUTH_TOKEN_NOT_FOUND, {}};
+ }
+
Entry* newest_match = nullptr;
for (auto& entry : entries_) {
if (entry.SatisfiesAuth(sids, auth_type) && entry.is_newer_than(newest_match)) {
@@ -216,11 +222,9 @@
return {AUTH_TOKEN_NOT_FOUND, {}};
}
- if (authTokenMaxAgeMillis > 0) {
- if (static_cast<int64_t>(newest_match->time_received()) + authTokenMaxAgeMillis <
- static_cast<int64_t>(now)) {
- return {AUTH_TOKEN_EXPIRED, {}};
- }
+ int64_t tokenAgeMillis = nowMillis - newest_match->time_received() * 1000;
+ if (tokenAgeMillis >= authTokenMaxAgeMillis) {
+ return {AUTH_TOKEN_EXPIRED, {}};
}
newest_match->UpdateLastUse(now);
diff --git a/keystore/binder/android/security/keystore/IKeystoreService.aidl b/keystore/binder/android/security/keystore/IKeystoreService.aidl
index e0879dd..3b9a1b4 100644
--- a/keystore/binder/android/security/keystore/IKeystoreService.aidl
+++ b/keystore/binder/android/security/keystore/IKeystoreService.aidl
@@ -87,7 +87,20 @@
int onKeyguardVisibilityChanged(in boolean isShowing, in int userId);
int listUidsOfAuthBoundKeys(out @utf8InCpp List<String> uids);
- // Called by credstore (and only credstore).
+ // This method looks through auth-tokens cached by keystore which match
+ // the passed-in |secureUserId|.
+ //
+ // If one or more of these tokens has a |challenge| field which matches
+ // the passed-in |challenge| parameter, the most recent is returned. In
+ // this case the |authTokenMaxAgeMillis| parameter is not used.
+ //
+ // Otherwise, the most recent auth-token of these tokens which is younger
+ // than |authTokenMaxAgeMillis| is returned.
+ //
+ // The passed in |challenge| parameter must always be non-zero.
+ //
+ // This method is called by credstore (and only credstore).
+ //
void getTokensForCredstore(in long challenge, in long secureUserId, in int authTokenMaxAgeMillis,
in ICredstoreTokenCallback cb);
}
diff --git a/keystore/tests/Android.bp b/keystore/tests/Android.bp
index 883e020..327eb93 100644
--- a/keystore/tests/Android.bp
+++ b/keystore/tests/Android.bp
@@ -1,5 +1,14 @@
// Unit test for AuthTokenTable
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_test {
cflags: [
"-Wall",
diff --git a/keystore2/Android.bp b/keystore2/Android.bp
index f9295ca..aff824b 100644
--- a/keystore2/Android.bp
+++ b/keystore2/Android.bp
@@ -12,33 +12,58 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-rust_library {
- name: "libkeystore2",
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
+rust_defaults {
+ name: "libkeystore2_defaults",
crate_name: "keystore2",
srcs: ["src/lib.rs"],
rustlibs: [
"android.hardware.security.keymint-V1-rust",
"android.hardware.security.secureclock-V1-rust",
+ "android.hardware.security.sharedsecret-V1-rust",
+ "android.os.permissions_aidl-rust",
"android.security.apc-rust",
"android.security.authorization-rust",
"android.security.compat-rust",
+ "android.security.maintenance-rust",
+ "android.security.remoteprovisioning-rust",
"android.system.keystore2-V1-rust",
"libanyhow",
"libbinder_rs",
+ "libcutils_bindgen",
"libkeystore2_aaid-rust",
"libkeystore2_apc_compat-rust",
"libkeystore2_crypto_rust",
"libkeystore2_km_compat",
"libkeystore2_selinux",
+ "libkeystore2_system_property-rust",
+ "libkeystore2_vintf_rust",
"liblazy_static",
"liblibc",
"liblibsqlite3_sys",
"liblog_rust",
"librand",
"librusqlite",
+ "libstatslog_rust",
"libthiserror",
],
+ shared_libs: [
+ "libcutils",
+ ],
+}
+
+rust_library {
+ name: "libkeystore2",
+ defaults: ["libkeystore2_defaults"],
}
rust_library {
@@ -54,32 +79,13 @@
rust_test {
name: "keystore2_test",
crate_name: "keystore2",
- srcs: ["src/lib.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
+ compile_multilib: "first",
+ defaults: ["libkeystore2_defaults"],
rustlibs: [
- "android.hardware.security.keymint-V1-rust",
- "android.hardware.security.secureclock-V1-rust",
- "android.security.apc-rust",
- "android.security.authorization-rust",
- "android.security.compat-rust",
- "android.system.keystore2-V1-rust",
"libandroid_logger",
- "libanyhow",
- "libbinder_rs",
- "libkeystore2_aaid-rust",
- "libkeystore2_apc_compat-rust",
- "libkeystore2_crypto_rust",
- "libkeystore2_km_compat",
- "libkeystore2_selinux",
"libkeystore2_test_utils",
- "liblazy_static",
- "liblibc",
- "liblibsqlite3_sys",
- "liblog_rust",
- "librand",
- "librusqlite",
- "libthiserror",
],
}
@@ -91,6 +97,9 @@
"libbinder_rs",
"libkeystore2",
"liblog_rust",
+ "libvpnprofilestore-rust",
],
init_rc: ["keystore2.rc"],
+
+ vintf_fragments: ["android.system.keystore2-service.xml"],
}
diff --git a/keystore2/TEST_MAPPING b/keystore2/TEST_MAPPING
index 33d157e..16b6f85 100644
--- a/keystore2/TEST_MAPPING
+++ b/keystore2/TEST_MAPPING
@@ -1,13 +1,16 @@
{
"presubmit": [
{
- "name": "keystore2_certificate_test"
+ "name": "keystore2_crypto_test"
},
{
- "name": "keystore2_km_compat_test"
+ "name": "keystore2_crypto_test_rust"
},
{
"name": "keystore2_test"
+ },
+ {
+ "name": "CtsIdentityTestCases"
}
]
}
diff --git a/keystore2/aaid/Android.bp b/keystore2/aaid/Android.bp
index 2329400..d27fdf6 100644
--- a/keystore2/aaid/Android.bp
+++ b/keystore2/aaid/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_library {
name: "libkeystore2_aaid",
srcs: [
diff --git a/keystore2/aidl/Android.bp b/keystore2/aidl/Android.bp
index fac36e5..183096c 100644
--- a/keystore2/aidl/Android.bp
+++ b/keystore2/aidl/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
aidl_interface {
name: "android.security.attestationmanager",
srcs: [ "android/security/attestationmanager/*.aidl", ],
@@ -19,7 +28,8 @@
unstable: true,
backend: {
java: {
- sdk_version: "module_current",
+ platform_apis: true,
+ srcs_available: true,
},
rust: {
enabled: true,
@@ -37,7 +47,8 @@
unstable: true,
backend: {
java: {
- sdk_version: "module_current",
+ platform_apis: true,
+ srcs_available: true,
},
rust: {
enabled: true,
@@ -55,6 +66,7 @@
backend: {
java: {
enabled: true,
+ srcs_available: true,
},
rust: {
enabled: true,
@@ -73,7 +85,8 @@
unstable: true,
backend: {
java: {
- sdk_version: "module_current",
+ platform_apis: true,
+ srcs_available: true,
},
rust: {
enabled: true,
@@ -83,3 +96,61 @@
}
},
}
+
+aidl_interface {
+ name: "android.security.remoteprovisioning",
+ srcs: [ "android/security/remoteprovisioning/*.aidl" ],
+ imports: [
+ "android.hardware.security.keymint",
+ ],
+ unstable: true,
+ backend: {
+ java: {
+ platform_apis: true,
+ srcs_available: true,
+ },
+ ndk: {
+ enabled: true,
+ },
+ rust: {
+ enabled: true,
+ },
+ },
+}
+
+aidl_interface {
+ name: "android.security.maintenance",
+ srcs: [ "android/security/maintenance/*.aidl" ],
+ imports: [
+ "android.system.keystore2",
+ ],
+ unstable: true,
+ backend: {
+ java: {
+ platform_apis: true,
+ srcs_available: true,
+ },
+ rust: {
+ enabled: true,
+ },
+ ndk: {
+ enabled: true,
+ }
+ },
+}
+
+aidl_interface {
+ name: "android.security.vpnprofilestore",
+ srcs: [ "android/security/vpnprofilestore/*.aidl" ],
+ unstable: true,
+ backend: {
+ java: {
+ platform_apis: true,
+ srcs_available: true,
+ },
+ rust: {
+ enabled: true,
+ },
+ },
+}
+
diff --git a/keystore2/aidl/android/security/apc/IConfirmationCallback.aidl b/keystore2/aidl/android/security/apc/IConfirmationCallback.aidl
index f47d7f5..277b9dd 100644
--- a/keystore2/aidl/android/security/apc/IConfirmationCallback.aidl
+++ b/keystore2/aidl/android/security/apc/IConfirmationCallback.aidl
@@ -21,6 +21,7 @@
/**
* This callback interface must be implemented by the client to receive the result of the user
* confirmation.
+ * @hide
*/
interface IConfirmationCallback {
/**
diff --git a/keystore2/aidl/android/security/apc/IProtectedConfirmation.aidl b/keystore2/aidl/android/security/apc/IProtectedConfirmation.aidl
index 26ccf0f..3162224 100644
--- a/keystore2/aidl/android/security/apc/IProtectedConfirmation.aidl
+++ b/keystore2/aidl/android/security/apc/IProtectedConfirmation.aidl
@@ -18,6 +18,7 @@
import android.security.apc.IConfirmationCallback;
+/** @hide */
interface IProtectedConfirmation {
/**
diff --git a/keystore2/aidl/android/security/apc/ResponseCode.aidl b/keystore2/aidl/android/security/apc/ResponseCode.aidl
index 7ae3e1c..9a3619f 100644
--- a/keystore2/aidl/android/security/apc/ResponseCode.aidl
+++ b/keystore2/aidl/android/security/apc/ResponseCode.aidl
@@ -19,6 +19,7 @@
/**
* Used as service specific exception code by IProtectedConfirmation and as result
* code by IConfirmationCallback
+ * @hide
*/
@Backing(type="int")
enum ResponseCode {
diff --git a/keystore2/aidl/android/security/attestationmanager/ByteArray.aidl b/keystore2/aidl/android/security/attestationmanager/ByteArray.aidl
index a1592ec..dc37b1b 100644
--- a/keystore2/aidl/android/security/attestationmanager/ByteArray.aidl
+++ b/keystore2/aidl/android/security/attestationmanager/ByteArray.aidl
@@ -18,7 +18,6 @@
/**
* Simple data holder for a byte array, allowing for multidimensional arrays in AIDL.
- *
* @hide
*/
parcelable ByteArray {
diff --git a/keystore2/aidl/android/security/attestationmanager/IAttestationManager.aidl b/keystore2/aidl/android/security/attestationmanager/IAttestationManager.aidl
index 85eee57..e77a21e 100644
--- a/keystore2/aidl/android/security/attestationmanager/IAttestationManager.aidl
+++ b/keystore2/aidl/android/security/attestationmanager/IAttestationManager.aidl
@@ -21,7 +21,6 @@
/**
* Internal interface for performing device attestation.
- *
* @hide
*/
interface IAttestationManager {
diff --git a/keystore2/aidl/android/security/authorization/AuthorizationTokens.aidl b/keystore2/aidl/android/security/authorization/AuthorizationTokens.aidl
new file mode 100644
index 0000000..9061998
--- /dev/null
+++ b/keystore2/aidl/android/security/authorization/AuthorizationTokens.aidl
@@ -0,0 +1,33 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package android.security.authorization;
+
+import android.hardware.security.keymint.HardwareAuthToken;
+import android.hardware.security.secureclock.TimeStampToken;
+
+/**
+ * This parcelable is returned by `IKeystoreAuthorization::getAuthTokensForCredStore`.
+ * @hide
+ */
+parcelable AuthorizationTokens {
+ /**
+ * HardwareAuthToken provided by an authenticator.
+ */
+ HardwareAuthToken authToken;
+ /**
+ * TimeStampToken provided by a SecureClock.
+ */
+ TimeStampToken timestampToken;
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/authorization/IKeystoreAuthorization.aidl b/keystore2/aidl/android/security/authorization/IKeystoreAuthorization.aidl
index df64401..86472eb 100644
--- a/keystore2/aidl/android/security/authorization/IKeystoreAuthorization.aidl
+++ b/keystore2/aidl/android/security/authorization/IKeystoreAuthorization.aidl
@@ -16,12 +16,14 @@
import android.hardware.security.keymint.HardwareAuthToken;
import android.security.authorization.LockScreenEvent;
+import android.security.authorization.AuthorizationTokens;
// TODO: mark the interface with @SensitiveData when the annotation is ready (b/176110256).
/**
* IKeystoreAuthorization interface exposes the methods for other system components to
* provide keystore with the information required to enforce authorizations on key usage.
+ * @hide
*/
interface IKeystoreAuthorization {
@@ -45,6 +47,8 @@
* ## Error conditions:
* `ResponseCode::PERMISSION_DENIED` - if the callers do not have the 'Unlock' permission.
* `ResponseCode::SYSTEM_ERROR` - if failed to perform lock/unlock operations due to various
+ * `ResponseCode::VALUE_CORRUPTED` - if the super key can not be decrypted.
+ * `ResponseCode::KEY_NOT_FOUND` - if the super key is not found.
*
* @lockScreenEvent - Indicates what happened.
* * LockScreenEvent.UNLOCK if the screen was unlocked.
@@ -56,4 +60,37 @@
*/
void onLockScreenEvent(in LockScreenEvent lockScreenEvent, in int userId,
in @nullable byte[] password);
+
+ /**
+ * Allows Credstore to retrieve a HardwareAuthToken and a TimestampToken.
+ * Identity Credential Trusted App can run either in the TEE or in other secure Hardware.
+ * So, credstore always need to retrieve a TimestampToken along with a HardwareAuthToken.
+ *
+ * The passed in |challenge| parameter must always be non-zero.
+ *
+ * The returned TimestampToken will always have its |challenge| field set to
+ * the |challenge| parameter.
+ *
+ * This method looks through auth-tokens cached by keystore which match
+ * the passed-in |secureUserId|.
+ * The most recent matching auth token which has a |challenge| field which matches
+ * the passed-in |challenge| parameter is returned.
+ * In this case the |authTokenMaxAgeMillis| parameter is not used.
+ *
+ * Otherwise, the most recent matching auth token which is younger
+ * than |authTokenMaxAgeMillis| is returned.
+ *
+ * This method is called by credstore (and only credstore).
+ *
+ * The caller requires 'get_auth_token' permission.
+ *
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - if the caller does not have the 'get_auth_token'
+ * permission.
+ * `ResponseCode::SYSTEM_ERROR` - if failed to obtain an authtoken from the database.
+ * `ResponseCode::NO_AUTH_TOKEN_FOUND` - a matching auth token is not found.
+ * `ResponseCode::INVALID_ARGUMENT` - if the passed-in |challenge| parameter is zero.
+ */
+ AuthorizationTokens getAuthTokensForCredStore(in long challenge, in long secureUserId,
+ in long authTokenMaxAgeMillis);
}
diff --git a/keystore2/aidl/android/security/authorization/LockScreenEvent.aidl b/keystore2/aidl/android/security/authorization/LockScreenEvent.aidl
index 877a916..c7553a2 100644
--- a/keystore2/aidl/android/security/authorization/LockScreenEvent.aidl
+++ b/keystore2/aidl/android/security/authorization/LockScreenEvent.aidl
@@ -14,6 +14,7 @@
package android.security.authorization;
+/** @hide */
@Backing(type="int")
enum LockScreenEvent {
UNLOCK = 0,
diff --git a/keystore2/aidl/android/security/authorization/ResponseCode.aidl b/keystore2/aidl/android/security/authorization/ResponseCode.aidl
new file mode 100644
index 0000000..169dc7b
--- /dev/null
+++ b/keystore2/aidl/android/security/authorization/ResponseCode.aidl
@@ -0,0 +1,57 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package android.security.authorization;
+
+/**
+ * Used as exception codes by IKeystoreAuthorization.
+ * @hide
+ */
+@Backing(type="int")
+enum ResponseCode {
+ /**
+ * A matching auth token is not found.
+ */
+ NO_AUTH_TOKEN_FOUND = 1,
+ /**
+ * The matching auth token is expired.
+ */
+ AUTH_TOKEN_EXPIRED = 2,
+ /**
+ * Same as in keystore2/ResponseCode.aidl.
+ * Any unexpected Error such as IO or communication errors.
+ */
+ SYSTEM_ERROR = 4,
+ /**
+ * Same as in keystore2/ResponseCode.aidl.
+ * Indicates that the caller does not have the permissions for the attempted request.
+ */
+ PERMISSION_DENIED = 6,
+ /**
+ * Same as in keystore2/ResponseCode.aidl.
+ * Indicates that the requested key does not exist.
+ */
+ KEY_NOT_FOUND = 7,
+ /**
+ * Same as in keystore2/ResponseCode.aidl.
+ * Indicates that a value being processed is corrupted.
+ */
+ VALUE_CORRUPTED = 8,
+ /**
+ * Same as in keystore2/ResponseCode.aidl.
+ * Indicates that an invalid argument was passed to an API call.
+ */
+ INVALID_ARGUMENT = 20,
+
+ }
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/compat/IKeystoreCompatService.aidl b/keystore2/aidl/android/security/compat/IKeystoreCompatService.aidl
index 4b6a93b..50bfa19 100644
--- a/keystore2/aidl/android/security/compat/IKeystoreCompatService.aidl
+++ b/keystore2/aidl/android/security/compat/IKeystoreCompatService.aidl
@@ -25,6 +25,7 @@
* The compatibility service allows Keystore 2.0 to connect to legacy wrapper implementations that
* it hosts itself without registering them as a service. Keystore 2.0 would not be allowed to
* register a HAL service, so instead it registers this service which it can then connect to.
+ * @hide
*/
interface IKeystoreCompatService {
/**
diff --git a/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl b/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
new file mode 100644
index 0000000..280500c
--- /dev/null
+++ b/keystore2/aidl/android/security/maintenance/IKeystoreMaintenance.aidl
@@ -0,0 +1,111 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package android.security.maintenance;
+
+import android.system.keystore2.Domain;
+import android.security.maintenance.UserState;
+
+// TODO: mark the interface with @SensitiveData when the annotation is ready (b/176110256).
+
+/**
+ * IKeystoreMaintenance interface exposes the methods for adding/removing users and changing the
+ * user's password.
+ * @hide
+ */
+interface IKeystoreMaintenance {
+
+ /**
+ * Allows LockSettingsService to inform keystore about adding a new user.
+ * Callers require 'AddUser' permission.
+ *
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - if the callers do not have the 'AddUser' permission.
+ * `ResponseCode::SYSTEM_ERROR` - if failed to delete the keys of an existing user with the same
+ * user id.
+ *
+ * @param userId - Android user id
+ */
+ void onUserAdded(in int userId);
+
+ /**
+ * Allows LockSettingsService to inform keystore about removing a user.
+ * Callers require 'RemoveUser' permission.
+ *
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - if the callers do not have the 'RemoveUser' permission.
+ * `ResponseCode::SYSTEM_ERROR` - if failed to delete the keys of the user being deleted.
+ *
+ * @param userId - Android user id
+ */
+ void onUserRemoved(in int userId);
+
+ /**
+ * Allows LockSettingsService to inform keystore about password change of a user.
+ * Callers require 'ChangePassword' permission.
+ *
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - if the callers does not have the 'ChangePassword'
+ * permission.
+ * `ResponseCode::SYSTEM_ERROR` - if failed to delete the super encrypted keys of the user.
+ * `ResponseCode::Locked' - if the keystore is locked for the given user.
+ *
+ * @param userId - Android user id
+ * @param password - a secret derived from the synthetic password of the user
+ */
+ void onUserPasswordChanged(in int userId, in @nullable byte[] password);
+
+ /**
+ * This function deletes all keys within a namespace. It mainly gets called when an app gets
+ * removed and all resources of this app need to be cleaned up.
+ *
+ * @param domain - One of Domain.APP or Domain.SELINUX.
+ * @param nspace - The UID of the app that is to be cleared if domain is Domain.APP or
+ * the SEPolicy namespace if domain is Domain.SELINUX.
+ */
+ void clearNamespace(Domain domain, long nspace);
+
+ /**
+ * Allows querying user state, given user id.
+ * Callers require 'GetState' permission.
+ *
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - if the callers do not have the 'GetState'
+ * permission.
+ * `ResponseCode::SYSTEM_ERROR` - if an error occurred when querying the user state.
+ *
+ * @param userId - Android user id
+ */
+ UserState getState(in int userId);
+
+ /**
+ * This function notifies the Keymint device of the specified securityLevel that
+ * early boot has ended, so that they no longer allow early boot keys to be used.
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - if the caller does not have the 'EarlyBootEnded'
+ * permission.
+ * A KeyMint ErrorCode may be returned indicating a backend diagnosed error.
+ */
+ void earlyBootEnded();
+
+ /**
+ * Informs Keystore 2.0 that the an off body event was detected.
+ *
+ * ## Error conditions:
+ * `ResponseCode::PERMISSION_DENIED` - if the caller does not have the `ReportOffBody`
+ * permission.
+ * `ResponseCode::SYSTEM_ERROR` - if an unexpected error occurred.
+ */
+ void onDeviceOffBody();
+}
diff --git a/keystore2/aidl/android/security/maintenance/UserState.aidl b/keystore2/aidl/android/security/maintenance/UserState.aidl
new file mode 100644
index 0000000..376f4fb
--- /dev/null
+++ b/keystore2/aidl/android/security/maintenance/UserState.aidl
@@ -0,0 +1,23 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package android.security.maintenance;
+
+/** @hide */
+@Backing(type="int")
+enum UserState {
+ UNINITIALIZED = 0,
+ LSKF_UNLOCKED = 1,
+ LSKF_LOCKED = 2,
+}
\ No newline at end of file
diff --git a/keystore2/aidl/android/security/remoteprovisioning/AttestationPoolStatus.aidl b/keystore2/aidl/android/security/remoteprovisioning/AttestationPoolStatus.aidl
new file mode 100644
index 0000000..3528b42
--- /dev/null
+++ b/keystore2/aidl/android/security/remoteprovisioning/AttestationPoolStatus.aidl
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.remoteprovisioning;
+
+/**
+ * This parcelable provides information about the state of the attestation key pool.
+ * @hide
+ */
+parcelable AttestationPoolStatus {
+ /**
+ * The number of signed attestation certificate chains which will expire when the date provided
+ * to keystore to check against is reached.
+ */
+ int expiring;
+ /**
+ * The number of signed attestation certificate chains which have not yet been assigned to an
+ * app. This should be less than or equal to signed keys. The remainder of `signed` -
+ * `unassigned` gives the number of signed keys that have been assigned to an app.
+ */
+ int unassigned;
+ /**
+ * The number of signed attestation keys. This should be less than or equal to `total`. The
+ * remainder of `total` - `attested` gives the number of keypairs available to be sent off to
+ * the server for signing.
+ */
+ int attested;
+ /**
+ * The total number of attestation keys.
+ */
+ int total;
+}
diff --git a/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl b/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
new file mode 100644
index 0000000..4a092af
--- /dev/null
+++ b/keystore2/aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.remoteprovisioning;
+
+import android.hardware.security.keymint.DeviceInfo;
+import android.hardware.security.keymint.ProtectedData;
+import android.hardware.security.keymint.SecurityLevel;
+import android.security.remoteprovisioning.AttestationPoolStatus;
+
+/**
+ * `IRemoteProvisioning` is the interface provided to use the remote provisioning functionality
+ * provided through KeyStore. The intent is for a higher level system component to use these
+ * functions in order to drive the process through which the device can receive functioning
+ * attestation certificates.
+ *
+ * ## Error conditions
+ * Error conditions are reported as service specific errors.
+ * Positive codes correspond to `android.security.remoteprovisioning.ResponseCode`
+ * and indicate error conditions diagnosed by the Keystore 2.0 service.
+ * TODO: Remote Provisioning HAL error code info
+ *
+ * `ResponseCode::PERMISSION_DENIED` if the caller does not have the permissions
+ * to use the RemoteProvisioning API. This permission is defined under access_vectors in SEPolicy
+ * in the keystore2 class: remotely_provision
+ *
+ * `ResponseCode::SYSTEM_ERROR` for any unexpected errors like IO or IPC failures.
+ *
+ * @hide
+ */
+interface IRemoteProvisioning {
+
+ /**
+ * Returns the status of the attestation key pool in the database.
+ *
+ * @param expiredBy The date as seconds since epoch by which to judge expiration status of
+ * certificates.
+ *
+ * @param secLevel The security level to specify which KM instance to get the pool for.
+ *
+ * @return The `AttestationPoolStatus` parcelable contains fields communicating information
+ * relevant to making decisions about when to generate and provision
+ * more attestation keys.
+ */
+ AttestationPoolStatus getPoolStatus(in long expiredBy, in SecurityLevel secLevel);
+
+ /**
+ * This is the primary entry point for beginning a remote provisioning flow. The caller
+ * specifies how many CSRs should be generated and provides an X25519 ECDH public key along
+ * with a challenge to encrypt privacy sensitive portions of the returned CBOR blob and
+ * guarantee freshness of the request to the certifying third party.
+ *
+ * ## Error conditions
+ * `ResponseCode::NO_UNSIGNED_KEYS` if there are no unsigned keypairs in the database that can
+ * be used for the CSRs.
+ *
+ * A RemoteProvisioning HAL response code may indicate backend errors such as failed EEK
+ * verification.
+ *
+ * @param testMode Whether or not the TA implementing the Remote Provisioning HAL should accept
+ * any EEK (Endpoint Encryption Key), or only one signed by a chain
+ * that verifies back to the Root of Trust baked into the TA. True
+ * means that any key is accepted.
+ *
+ * @param numCsr How many certificate signing requests should be generated.
+ *
+ * @param eek A chain of certificates terminating in an X25519 public key, the Endpoint
+ * Encryption Key.
+ *
+ * @param challenge A challenge to be included and MACed in the returned CBOR blob.
+ *
+ * @param secLevel The security level to specify which KM instance from which to generate a
+ * CSR.
+ *
+ * @param protectedData The encrypted CBOR blob generated by the remote provisioner
+ *
+ * @return A CBOR blob composed of various elements required by the server to verify the
+ * request.
+ */
+ byte[] generateCsr(in boolean testMode, in int numCsr, in byte[] eek, in byte[] challenge,
+ in SecurityLevel secLevel, out ProtectedData protectedData, out DeviceInfo deviceInfo);
+
+ /**
+ * This method provides a way for the returned attestation certificate chains to be provisioned
+ * to the attestation key database. When an app requests an attesation key, it will be assigned
+ * one of these certificate chains along with the corresponding private key.
+ *
+ * @param publicKey The raw public key encoded in the leaf certificate.
+ *
+ * @param batchCert The batch certificate corresponding to the attestation key. Separated for
+ * the purpose of making Subject lookup for KM attestation easier.
+ *
+ * @param certs An X.509, DER encoded certificate chain for the attestation key.
+ *
+ * @param expirationDate The expiration date on the certificate chain, provided by the caller
+ * for convenience.
+ *
+ * @param secLevel The security level representing the KM instance containing the key that this
+ * chain corresponds to.
+ */
+ void provisionCertChain(in byte[] publicKey, in byte[] batchCert, in byte[] certs,
+ in long expirationDate, in SecurityLevel secLevel);
+
+ /**
+ * This method allows the caller to instruct KeyStore to generate and store a key pair to be
+ * used for attestation in the `generateCsr` method. The caller should handle spacing out these
+ * requests so as not to jam up the KeyStore work queue.
+ *
+ * @param is_test_mode Instructs the underlying HAL interface to mark the generated key with a
+ * tag to indicate that it's for testing.
+ *
+ * @param secLevel The security level to specify which KM instance should generate a key pair.
+ */
+ void generateKeyPair(in boolean is_test_mode, in SecurityLevel secLevel);
+
+ /**
+ * This method returns the SecurityLevels of whichever instances of
+ * IRemotelyProvisionedComponent are running on the device. The RemoteProvisioner app needs to
+ * know which KM instances it should be generating and managing attestation keys for.
+ *
+ * @return The array of security levels.
+ */
+ SecurityLevel[] getSecurityLevels();
+
+ /**
+ * This method deletes all remotely provisioned attestation keys in the database, regardless
+ * of what state in their life cycle they are in. This is primarily useful to facilitate
+ * testing.
+ *
+ * @return Number of keys deleted
+ */
+ long deleteAllKeys();
+}
diff --git a/keystore2/aidl/android/security/remoteprovisioning/ResponseCode.aidl b/keystore2/aidl/android/security/remoteprovisioning/ResponseCode.aidl
new file mode 100644
index 0000000..c9877db
--- /dev/null
+++ b/keystore2/aidl/android/security/remoteprovisioning/ResponseCode.aidl
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.remoteprovisioning;
+
+@Backing(type="int")
+/** @hide */
+enum ResponseCode {
+ /**
+ * Returned if there are no keys available in the database to be used in a CSR
+ */
+ NO_UNSIGNED_KEYS = 1,
+ /**
+ * The caller has imrproper SELinux permissions to access the Remote Provisioning API.
+ */
+ PERMISSION_DENIED = 2,
+ /**
+ * An unexpected error occurred, likely with IO or IPC.
+ */
+ SYSTEM_ERROR = 3,
+}
diff --git a/keystore2/aidl/android/security/vpnprofilestore/IVpnProfileStore.aidl b/keystore2/aidl/android/security/vpnprofilestore/IVpnProfileStore.aidl
new file mode 100644
index 0000000..8375b7b
--- /dev/null
+++ b/keystore2/aidl/android/security/vpnprofilestore/IVpnProfileStore.aidl
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.security.vpnprofilestore;
+
+/**
+ * Internal interface for accessing and storing VPN profiles.
+ * @hide
+ */
+interface IVpnProfileStore {
+ /**
+ * Service specific error code indicating that the profile was not found.
+ */
+ const int ERROR_PROFILE_NOT_FOUND = 1;
+
+ /**
+ * Service specific error code indicating that an unexpected system error occurred.
+ */
+ const int ERROR_SYSTEM_ERROR = 2;
+
+ /**
+ * Returns the profile stored under the given alias.
+ *
+ * @param alias name of the profile.
+ * @return The unstructured blob that was passed as profile parameter into put()
+ */
+ byte[] get(in String alias);
+
+ /**
+ * Stores one profile as unstructured blob under the given alias.
+ */
+ void put(in String alias, in byte[] profile);
+
+ /**
+ * Deletes the profile under the given alias.
+ */
+ void remove(in String alias);
+
+ /**
+ * Returns a list of aliases of profiles stored. The list is filtered by prefix.
+ * The resulting strings are the full aliases including the prefix.
+ */
+ String[] list(in String prefix);
+}
\ No newline at end of file
diff --git a/keystore2/android.system.keystore2-service.xml b/keystore2/android.system.keystore2-service.xml
new file mode 100644
index 0000000..6b8d0cb
--- /dev/null
+++ b/keystore2/android.system.keystore2-service.xml
@@ -0,0 +1,9 @@
+<manifest version="1.0" type="framework">
+ <hal format="aidl">
+ <name>android.system.keystore2</name>
+ <interface>
+ <name>IKeystoreService</name>
+ <instance>default</instance>
+ </interface>
+ </hal>
+</manifest>
diff --git a/keystore2/apc_compat/Android.bp b/keystore2/apc_compat/Android.bp
index 405e9b8..9519c8e 100644
--- a/keystore2/apc_compat/Android.bp
+++ b/keystore2/apc_compat/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
cc_library {
name: "libkeystore2_apc_compat",
srcs: [
diff --git a/keystore2/apc_compat/apc_compat.rs b/keystore2/apc_compat/apc_compat.rs
index 4391f5b..57f8710 100644
--- a/keystore2/apc_compat/apc_compat.rs
+++ b/keystore2/apc_compat/apc_compat.rs
@@ -141,6 +141,10 @@
/// data_confirmed: Option<&[u8]> and
/// confirmation_token: Option<&[u8]> hold the confirmed message and the confirmation token
/// respectively. They must be `Some()` if `rc == APC_COMPAT_ERROR_OK` and `None` otherwise.
+ ///
+ /// `cb` does not get called if this function returns an error.
+ /// (Thus the allow(unused_must_use))
+ #[allow(unused_must_use)]
pub fn prompt_user_confirmation<F>(
&self,
prompt_text: &str,
@@ -150,9 +154,9 @@
cb: F,
) -> Result<(), u32>
where
- F: FnOnce(u32, Option<&[u8]>, Option<&[u8]>),
+ F: FnOnce(u32, Option<&[u8]>, Option<&[u8]>) + 'static,
{
- let cb_data_ptr = Box::into_raw(Box::new(Box::new(cb)));
+ let cb_data_ptr = Box::into_raw(Box::new(Box::new(cb) as Box<Callback>));
let cb = ApcCompatCallback {
data: cb_data_ptr as *mut std::ffi::c_void,
result: Some(confirmation_result_callback),
diff --git a/keystore2/keystore2.rc b/keystore2/keystore2.rc
index bc040e5..82bf3b8 100644
--- a/keystore2/keystore2.rc
+++ b/keystore2/keystore2.rc
@@ -6,15 +6,8 @@
#
# See system/core/init/README.md for information on the init.rc language.
-# Start Keystore 2 conditionally
-# TODO b/171563717 Remove when Keystore 2 migration is complete.
-on nonencrypted && property:ro.android.security.keystore2.enable=true
- enable keystore2
-
service keystore2 /system/bin/keystore2 /data/misc/keystore
- class main
+ class early_hal
user keystore
group keystore readproc log
writepid /dev/cpuset/foreground/tasks
- # TODO b/171563717 Remove when Keystore 2 migration is complete.
- disabled
diff --git a/keystore2/selinux/Android.bp b/keystore2/selinux/Android.bp
index acbf5ef..18063d3 100644
--- a/keystore2/selinux/Android.bp
+++ b/keystore2/selinux/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
rust_library {
name: "libkeystore2_selinux",
crate_name: "keystore2_selinux",
diff --git a/keystore2/selinux/src/lib.rs b/keystore2/selinux/src/lib.rs
index 2b5091d..cc707e7 100644
--- a/keystore2/selinux/src/lib.rs
+++ b/keystore2/selinux/src/lib.rs
@@ -455,7 +455,6 @@
check_keystore_perm!(add_auth);
check_keystore_perm!(clear_ns);
- check_keystore_perm!(get_state);
check_keystore_perm!(lock);
check_keystore_perm!(reset);
check_keystore_perm!(unlock);
diff --git a/keystore2/src/apc.rs b/keystore2/src/apc.rs
index 105e071..f8259ea 100644
--- a/keystore2/src/apc.rs
+++ b/keystore2/src/apc.rs
@@ -12,16 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// TODO The confirmation token is yet unused.
-#![allow(unused_variables)]
-
//! This module implements the Android Protected Confirmation (APC) service as defined
//! in the android.security.apc AIDL spec.
use std::{
cmp::PartialEq,
collections::HashMap,
- sync::{Arc, Mutex},
+ sync::{mpsc::Sender, Arc, Mutex},
};
use crate::utils::{compat_2_response_code, ui_opts_2_compat};
@@ -31,10 +28,10 @@
ResponseCode::ResponseCode,
};
use android_security_apc::binder::{
- ExceptionCode, Interface, Result as BinderResult, SpIBinder, Status as BinderStatus,
+ ExceptionCode, Interface, Result as BinderResult, SpIBinder, Status as BinderStatus, Strong,
};
use anyhow::{Context, Result};
-use binder::{IBinder, ThreadState};
+use binder::{IBinderInternal, ThreadState};
use keystore2_apc_compat::ApcHal;
use keystore2_selinux as selinux;
use std::time::{Duration, Instant};
@@ -182,10 +179,16 @@
client_aborted: bool,
}
-#[derive(Default)]
struct ApcState {
session: Option<ApcSessionState>,
rate_limiting: HashMap<u32, RateInfo>,
+ confirmation_token_sender: Sender<Vec<u8>>,
+}
+
+impl ApcState {
+ fn new(confirmation_token_sender: Sender<Vec<u8>>) -> Self {
+ Self { session: None, rate_limiting: Default::default(), confirmation_token_sender }
+ }
}
/// Implementation of the APC service.
@@ -197,9 +200,11 @@
impl ApcManager {
/// Create a new instance of the Android Protected Confirmation service.
- pub fn new_native_binder() -> Result<impl IProtectedConfirmation> {
+ pub fn new_native_binder(
+ confirmation_token_sender: Sender<Vec<u8>>,
+ ) -> Result<Strong<dyn IProtectedConfirmation>> {
let result = BnProtectedConfirmation::new_binder(Self {
- state: Arc::new(Mutex::new(Default::default())),
+ state: Arc::new(Mutex::new(ApcState::new(confirmation_token_sender))),
});
result.as_binder().set_requesting_sid(true);
Ok(result)
@@ -222,21 +227,28 @@
let rc = compat_2_response_code(rc);
// Update rate limiting information.
- match (rc, client_aborted) {
+ match (rc, client_aborted, confirmation_token) {
// If the user confirmed the dialog.
- (ResponseCode::OK, _) => {
+ (ResponseCode::OK, _, Some(confirmation_token)) => {
// Reset counter.
state.rate_limiting.remove(&uid);
- // TODO at this point we need to send the confirmation token to where keystore can
- // use it.
+ // Send confirmation token to the enforcement module.
+ if let Err(e) = state.confirmation_token_sender.send(confirmation_token.to_vec()) {
+ log::error!("Got confirmation token, but receiver would not have it. {:?}", e);
+ }
}
// If cancelled by the user or if aborted by the client.
- (ResponseCode::CANCELLED, _) | (ResponseCode::ABORTED, true) => {
+ (ResponseCode::CANCELLED, _, _) | (ResponseCode::ABORTED, true, _) => {
// Penalize.
let mut rate_info = state.rate_limiting.entry(uid).or_default();
rate_info.counter += 1;
rate_info.timestamp = start;
}
+ (ResponseCode::OK, _, None) => {
+ log::error!(
+ "Confirmation prompt was successful but no confirmation token was returned."
+ );
+ }
// In any other case this try does not count at all.
_ => {}
}
@@ -299,7 +311,7 @@
extra_data,
locale,
ui_opts,
- |rc, data_confirmed, confirmation_token| {
+ move |rc, data_confirmed, confirmation_token| {
Self::result(state_clone, rc, data_confirmed, confirmation_token)
},
)
diff --git a/keystore2/src/async_task.rs b/keystore2/src/async_task.rs
index 6edd760..20a7458 100644
--- a/keystore2/src/async_task.rs
+++ b/keystore2/src/async_task.rs
@@ -19,9 +19,9 @@
//! processed all tasks before it terminates.
//! Note that low priority tasks are processed only when the high priority queue is empty.
-use std::time::Duration;
+use std::{any::Any, any::TypeId, time::Duration};
use std::{
- collections::VecDeque,
+ collections::{HashMap, VecDeque},
sync::Arc,
sync::{Condvar, Mutex, MutexGuard},
thread,
@@ -33,60 +33,140 @@
Running,
}
+/// The Shelf allows async tasks to store state across invocations.
+/// Note: Store elves at your own peril ;-).
+#[derive(Debug, Default)]
+pub struct Shelf(HashMap<TypeId, Box<dyn Any + Send>>);
+
+impl Shelf {
+ /// Get a reference to the shelved data of type T. Returns Some if the data exists.
+ pub fn get_downcast_ref<T: Any + Send>(&self) -> Option<&T> {
+ self.0.get(&TypeId::of::<T>()).and_then(|v| v.downcast_ref::<T>())
+ }
+
+ /// Get a mutable reference to the shelved data of type T. If a T was inserted using put,
+ /// get_mut, or get_or_put_with.
+ pub fn get_downcast_mut<T: Any + Send>(&mut self) -> Option<&mut T> {
+ self.0.get_mut(&TypeId::of::<T>()).and_then(|v| v.downcast_mut::<T>())
+ }
+
+ /// Remove the entry of the given type and returns the stored data if it existed.
+ pub fn remove_downcast_ref<T: Any + Send>(&mut self) -> Option<T> {
+ self.0.remove(&TypeId::of::<T>()).and_then(|v| v.downcast::<T>().ok().map(|b| *b))
+ }
+
+ /// Puts data `v` on the shelf. If there already was an entry of type T it is returned.
+ pub fn put<T: Any + Send>(&mut self, v: T) -> Option<T> {
+ self.0
+ .insert(TypeId::of::<T>(), Box::new(v) as Box<dyn Any + Send>)
+ .and_then(|v| v.downcast::<T>().ok().map(|b| *b))
+ }
+
+ /// Gets a mutable reference to the entry of the given type and default creates it if necessary.
+ /// The type must implement Default.
+ pub fn get_mut<T: Any + Send + Default>(&mut self) -> &mut T {
+ self.0
+ .entry(TypeId::of::<T>())
+ .or_insert_with(|| Box::new(T::default()) as Box<dyn Any + Send>)
+ .downcast_mut::<T>()
+ .unwrap()
+ }
+
+ /// Gets a mutable reference to the entry of the given type or creates it using the init
+ /// function. Init is not executed if the entry already existed.
+ pub fn get_or_put_with<T: Any + Send, F>(&mut self, init: F) -> &mut T
+ where
+ F: FnOnce() -> T,
+ {
+ self.0
+ .entry(TypeId::of::<T>())
+ .or_insert_with(|| Box::new(init()) as Box<dyn Any + Send>)
+ .downcast_mut::<T>()
+ .unwrap()
+ }
+}
+
struct AsyncTaskState {
state: State,
thread: Option<thread::JoinHandle<()>>,
- hi_prio_req: VecDeque<Box<dyn FnOnce() + Send>>,
- lo_prio_req: VecDeque<Box<dyn FnOnce() + Send>>,
+ timeout: Duration,
+ hi_prio_req: VecDeque<Box<dyn FnOnce(&mut Shelf) + Send>>,
+ lo_prio_req: VecDeque<Box<dyn FnOnce(&mut Shelf) + Send>>,
+ idle_fns: Vec<Arc<dyn Fn(&mut Shelf) + Send + Sync>>,
+ /// The store allows tasks to store state across invocations. It is passed to each invocation
+ /// of each task. Tasks need to cooperate on the ids they use for storing state.
+ shelf: Option<Shelf>,
}
/// AsyncTask spawns one worker thread on demand to process jobs inserted into
-/// a low and a high priority work queue.
+/// a low and a high priority work queue. The queues are processed FIFO, and low
+/// priority queue is processed if the high priority queue is empty.
+/// Note: Because there is only one worker thread at a time for a given AsyncTask instance,
+/// all scheduled requests are guaranteed to be serialized with respect to one another.
pub struct AsyncTask {
state: Arc<(Condvar, Mutex<AsyncTaskState>)>,
}
impl Default for AsyncTask {
fn default() -> Self {
+ Self::new(Duration::from_secs(30))
+ }
+}
+
+impl AsyncTask {
+ /// Construct an [`AsyncTask`] with a specific timeout value.
+ pub fn new(timeout: Duration) -> Self {
Self {
state: Arc::new((
Condvar::new(),
Mutex::new(AsyncTaskState {
state: State::Exiting,
thread: None,
+ timeout,
hi_prio_req: VecDeque::new(),
lo_prio_req: VecDeque::new(),
+ idle_fns: Vec::new(),
+ shelf: None,
}),
)),
}
}
-}
-impl AsyncTask {
- /// Adds a job to the high priority queue. High priority jobs are completed before
- /// low priority jobs and can also overtake low priority jobs. But they cannot
- /// preempt them.
+ /// Adds a one-off job to the high priority queue. High priority jobs are
+ /// completed before low priority jobs and can also overtake low priority
+ /// jobs. But they cannot preempt them.
pub fn queue_hi<F>(&self, f: F)
where
- F: FnOnce() + Send + 'static,
+ F: for<'r> FnOnce(&'r mut Shelf) + Send + 'static,
{
self.queue(f, true)
}
- /// Adds a job to the low priority queue. Low priority jobs are completed after
- /// high priority. And they are not executed as long as high priority jobs are
- /// present. Jobs always run to completion and are never preempted by high
- /// priority jobs.
+ /// Adds a one-off job to the low priority queue. Low priority jobs are
+ /// completed after high priority. And they are not executed as long as high
+ /// priority jobs are present. Jobs always run to completion and are never
+ /// preempted by high priority jobs.
pub fn queue_lo<F>(&self, f: F)
where
- F: FnOnce() + Send + 'static,
+ F: FnOnce(&mut Shelf) + Send + 'static,
{
self.queue(f, false)
}
+ /// Adds an idle callback. This will be invoked whenever the worker becomes
+ /// idle (all high and low priority jobs have been performed).
+ pub fn add_idle<F>(&self, f: F)
+ where
+ F: Fn(&mut Shelf) + Send + Sync + 'static,
+ {
+ let (ref _condvar, ref state) = *self.state;
+ let mut state = state.lock().unwrap();
+ state.idle_fns.push(Arc::new(f));
+ }
+
fn queue<F>(&self, f: F, hi_prio: bool)
where
- F: FnOnce() + Send + 'static,
+ F: for<'r> FnOnce(&'r mut Shelf) + Send + 'static,
{
let (ref condvar, ref state) = *self.state;
let mut state = state.lock().unwrap();
@@ -109,36 +189,343 @@
}
let cloned_state = self.state.clone();
+ let timeout_period = state.timeout;
state.thread = Some(thread::spawn(move || {
let (ref condvar, ref state) = *cloned_state;
+
+ enum Action {
+ QueuedFn(Box<dyn FnOnce(&mut Shelf) + Send>),
+ IdleFns(Vec<Arc<dyn Fn(&mut Shelf) + Send + Sync>>),
+ };
+ let mut done_idle = false;
+
+ // When the worker starts, it takes the shelf and puts it on the stack.
+ let mut shelf = state.lock().unwrap().shelf.take().unwrap_or_default();
loop {
- if let Some(f) = {
- let (mut state, timeout) = condvar
- .wait_timeout_while(
- state.lock().unwrap(),
- Duration::from_secs(30),
- |state| state.hi_prio_req.is_empty() && state.lo_prio_req.is_empty(),
- )
- .unwrap();
- match (
- state.hi_prio_req.pop_front(),
- state.lo_prio_req.is_empty(),
- timeout.timed_out(),
- ) {
- (Some(f), _, _) => Some(f),
- (None, false, _) => state.lo_prio_req.pop_front(),
- (None, true, true) => {
- state.state = State::Exiting;
- break;
+ if let Some(action) = {
+ let state = state.lock().unwrap();
+ if !done_idle && state.hi_prio_req.is_empty() && state.lo_prio_req.is_empty() {
+ // No jobs queued so invoke the idle callbacks.
+ Some(Action::IdleFns(state.idle_fns.clone()))
+ } else {
+ // Wait for either a queued job to arrive or a timeout.
+ let (mut state, timeout) = condvar
+ .wait_timeout_while(state, timeout_period, |state| {
+ state.hi_prio_req.is_empty() && state.lo_prio_req.is_empty()
+ })
+ .unwrap();
+ match (
+ state.hi_prio_req.pop_front(),
+ state.lo_prio_req.is_empty(),
+ timeout.timed_out(),
+ ) {
+ (Some(f), _, _) => Some(Action::QueuedFn(f)),
+ (None, false, _) => {
+ state.lo_prio_req.pop_front().map(|f| Action::QueuedFn(f))
+ }
+ (None, true, true) => {
+ // When the worker exits it puts the shelf back into the shared
+ // state for the next worker to use. So state is preserved not
+ // only across invocations but also across worker thread shut down.
+ state.shelf = Some(shelf);
+ state.state = State::Exiting;
+ break;
+ }
+ (None, true, false) => None,
}
- (None, true, false) => None,
}
} {
- f()
+ // Now that the lock has been dropped, perform the action.
+ match action {
+ Action::QueuedFn(f) => {
+ f(&mut shelf);
+ done_idle = false;
+ }
+ Action::IdleFns(idle_fns) => {
+ for idle_fn in idle_fns {
+ idle_fn(&mut shelf);
+ }
+ done_idle = true;
+ }
+ }
}
}
}));
state.state = State::Running;
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::{AsyncTask, Shelf};
+ use std::sync::{
+ mpsc::{channel, sync_channel, RecvTimeoutError},
+ Arc,
+ };
+ use std::time::Duration;
+
+ #[test]
+ fn test_shelf() {
+ let mut shelf = Shelf::default();
+
+ let s = "A string".to_string();
+ assert_eq!(shelf.put(s), None);
+
+ let s2 = "Another string".to_string();
+ assert_eq!(shelf.put(s2), Some("A string".to_string()));
+
+ // Put something of a different type on the shelf.
+ #[derive(Debug, PartialEq, Eq)]
+ struct Elf {
+ pub name: String,
+ }
+ let e1 = Elf { name: "Glorfindel".to_string() };
+ assert_eq!(shelf.put(e1), None);
+
+ // The String value is still on the shelf.
+ let s3 = shelf.get_downcast_ref::<String>().unwrap();
+ assert_eq!(s3, "Another string");
+
+ // As is the Elf.
+ {
+ let e2 = shelf.get_downcast_mut::<Elf>().unwrap();
+ assert_eq!(e2.name, "Glorfindel");
+ e2.name = "Celeborn".to_string();
+ }
+
+ // Take the Elf off the shelf.
+ let e3 = shelf.remove_downcast_ref::<Elf>().unwrap();
+ assert_eq!(e3.name, "Celeborn");
+
+ assert_eq!(shelf.remove_downcast_ref::<Elf>(), None);
+
+ // No u64 value has been put on the shelf, so getting one gives the default value.
+ {
+ let i = shelf.get_mut::<u64>();
+ assert_eq!(*i, 0);
+ *i = 42;
+ }
+ let i2 = shelf.get_downcast_ref::<u64>().unwrap();
+ assert_eq!(*i2, 42);
+
+ // No i32 value has ever been seen near the shelf.
+ assert_eq!(shelf.get_downcast_ref::<i32>(), None);
+ assert_eq!(shelf.get_downcast_mut::<i32>(), None);
+ assert_eq!(shelf.remove_downcast_ref::<i32>(), None);
+ }
+
+ #[test]
+ fn test_async_task() {
+ let at = AsyncTask::default();
+
+ // First queue up a job that blocks until we release it, to avoid
+ // unpredictable synchronization.
+ let (start_sender, start_receiver) = channel();
+ at.queue_hi(move |shelf| {
+ start_receiver.recv().unwrap();
+ // Put a trace vector on the shelf
+ shelf.put(Vec::<String>::new());
+ });
+
+ // Queue up some high-priority and low-priority jobs.
+ for i in 0..3 {
+ let j = i;
+ at.queue_lo(move |shelf| {
+ let trace = shelf.get_mut::<Vec<String>>();
+ trace.push(format!("L{}", j));
+ });
+ let j = i;
+ at.queue_hi(move |shelf| {
+ let trace = shelf.get_mut::<Vec<String>>();
+ trace.push(format!("H{}", j));
+ });
+ }
+
+ // Finally queue up a low priority job that emits the trace.
+ let (trace_sender, trace_receiver) = channel();
+ at.queue_lo(move |shelf| {
+ let trace = shelf.get_downcast_ref::<Vec<String>>().unwrap();
+ trace_sender.send(trace.clone()).unwrap();
+ });
+
+ // Ready, set, go.
+ start_sender.send(()).unwrap();
+ let trace = trace_receiver.recv().unwrap();
+
+ assert_eq!(trace, vec!["H0", "H1", "H2", "L0", "L1", "L2"]);
+ }
+
+ #[test]
+ fn test_async_task_chain() {
+ let at = Arc::new(AsyncTask::default());
+ let (sender, receiver) = channel();
+ // Queue up a job that will queue up another job. This confirms
+ // that the job is not invoked with any internal AsyncTask locks held.
+ let at_clone = at.clone();
+ at.queue_hi(move |_shelf| {
+ at_clone.queue_lo(move |_shelf| {
+ sender.send(()).unwrap();
+ });
+ });
+ receiver.recv().unwrap();
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_async_task_panic() {
+ let at = AsyncTask::default();
+ at.queue_hi(|_shelf| {
+ panic!("Panic from queued job");
+ });
+ // Queue another job afterwards to ensure that the async thread gets joined.
+ let (done_sender, done_receiver) = channel();
+ at.queue_hi(move |_shelf| {
+ done_sender.send(()).unwrap();
+ });
+ done_receiver.recv().unwrap();
+ }
+
+ #[test]
+ fn test_async_task_idle() {
+ let at = AsyncTask::new(Duration::from_secs(3));
+ // Need a SyncSender as it is Send+Sync.
+ let (idle_done_sender, idle_done_receiver) = sync_channel::<()>(3);
+ at.add_idle(move |_shelf| {
+ idle_done_sender.send(()).unwrap();
+ });
+
+ // Queue up some high-priority and low-priority jobs that take time.
+ for _i in 0..3 {
+ at.queue_lo(|_shelf| {
+ std::thread::sleep(Duration::from_millis(500));
+ });
+ at.queue_hi(|_shelf| {
+ std::thread::sleep(Duration::from_millis(500));
+ });
+ }
+ // Final low-priority job.
+ let (done_sender, done_receiver) = channel();
+ at.queue_lo(move |_shelf| {
+ done_sender.send(()).unwrap();
+ });
+
+ // Nothing happens until the last job completes.
+ assert_eq!(
+ idle_done_receiver.recv_timeout(Duration::from_secs(1)),
+ Err(RecvTimeoutError::Timeout)
+ );
+ done_receiver.recv().unwrap();
+ idle_done_receiver.recv_timeout(Duration::from_millis(1)).unwrap();
+
+ // Idle callback not executed again even if we wait for a while.
+ assert_eq!(
+ idle_done_receiver.recv_timeout(Duration::from_secs(3)),
+ Err(RecvTimeoutError::Timeout)
+ );
+
+ // However, if more work is done then there's another chance to go idle.
+ let (done_sender, done_receiver) = channel();
+ at.queue_hi(move |_shelf| {
+ std::thread::sleep(Duration::from_millis(500));
+ done_sender.send(()).unwrap();
+ });
+ // Idle callback not immediately executed, because the high priority
+ // job is taking a while.
+ assert_eq!(
+ idle_done_receiver.recv_timeout(Duration::from_millis(1)),
+ Err(RecvTimeoutError::Timeout)
+ );
+ done_receiver.recv().unwrap();
+ idle_done_receiver.recv_timeout(Duration::from_millis(1)).unwrap();
+ }
+
+ #[test]
+ fn test_async_task_multiple_idle() {
+ let at = AsyncTask::new(Duration::from_secs(3));
+ let (idle_sender, idle_receiver) = sync_channel::<i32>(5);
+ // Queue a high priority job to start things off
+ at.queue_hi(|_shelf| {
+ std::thread::sleep(Duration::from_millis(500));
+ });
+
+ // Multiple idle callbacks.
+ for i in 0..3 {
+ let idle_sender = idle_sender.clone();
+ at.add_idle(move |_shelf| {
+ idle_sender.send(i).unwrap();
+ });
+ }
+
+ // Nothing happens immediately.
+ assert_eq!(
+ idle_receiver.recv_timeout(Duration::from_millis(1)),
+ Err(RecvTimeoutError::Timeout)
+ );
+ // Wait for a moment and the idle jobs should have run.
+ std::thread::sleep(Duration::from_secs(1));
+
+ let mut results = Vec::new();
+ while let Ok(i) = idle_receiver.recv_timeout(Duration::from_millis(1)) {
+ results.push(i);
+ }
+ assert_eq!(results, [0, 1, 2]);
+ }
+
+ #[test]
+ fn test_async_task_idle_queues_job() {
+ let at = Arc::new(AsyncTask::new(Duration::from_secs(1)));
+ let at_clone = at.clone();
+ let (idle_sender, idle_receiver) = sync_channel::<i32>(100);
+ // Add an idle callback that queues a low-priority job.
+ at.add_idle(move |shelf| {
+ at_clone.queue_lo(|_shelf| {
+ // Slow things down so the channel doesn't fill up.
+ std::thread::sleep(Duration::from_millis(50));
+ });
+ let i = shelf.get_mut::<i32>();
+ idle_sender.send(*i).unwrap();
+ *i += 1;
+ });
+
+ // Nothing happens immediately.
+ assert_eq!(
+ idle_receiver.recv_timeout(Duration::from_millis(1500)),
+ Err(RecvTimeoutError::Timeout)
+ );
+
+ // Once we queue a normal job, things start.
+ at.queue_hi(|_shelf| {});
+ assert_eq!(0, idle_receiver.recv_timeout(Duration::from_millis(200)).unwrap());
+
+ // The idle callback queues a job, and completion of that job
+ // means the task is going idle again...so the idle callback will
+ // be called repeatedly.
+ assert_eq!(1, idle_receiver.recv_timeout(Duration::from_millis(100)).unwrap());
+ assert_eq!(2, idle_receiver.recv_timeout(Duration::from_millis(100)).unwrap());
+ assert_eq!(3, idle_receiver.recv_timeout(Duration::from_millis(100)).unwrap());
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_async_task_idle_panic() {
+ let at = AsyncTask::new(Duration::from_secs(1));
+ let (idle_sender, idle_receiver) = sync_channel::<()>(3);
+ // Add an idle callback that panics.
+ at.add_idle(move |_shelf| {
+ idle_sender.send(()).unwrap();
+ panic!("Panic from idle callback");
+ });
+ // Queue a job to trigger idleness and ensuing panic.
+ at.queue_hi(|_shelf| {});
+ idle_receiver.recv().unwrap();
+
+ // Queue another job afterwards to ensure that the async thread gets joined
+ // and the panic detected.
+ let (done_sender, done_receiver) = channel();
+ at.queue_hi(move |_shelf| {
+ done_sender.send(()).unwrap();
+ });
+ done_receiver.recv().unwrap();
+ }
+}
diff --git a/keystore2/src/attestation_key_utils.rs b/keystore2/src/attestation_key_utils.rs
new file mode 100644
index 0000000..425eec6
--- /dev/null
+++ b/keystore2/src/attestation_key_utils.rs
@@ -0,0 +1,126 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Implements get_attestation_key_info which loads remote provisioned or user
+//! generated attestation keys.
+
+use crate::database::{BlobMetaData, KeyEntryLoadBits, KeyType};
+use crate::database::{KeyIdGuard, KeystoreDB};
+use crate::error::{Error, ErrorCode};
+use crate::permission::KeyPerm;
+use crate::remote_provisioning::RemProvState;
+use crate::utils::check_key_permission;
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ AttestationKey::AttestationKey, Certificate::Certificate, KeyParameter::KeyParameter,
+};
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, KeyDescriptor::KeyDescriptor,
+};
+use anyhow::{Context, Result};
+use keystore2_crypto::parse_subject_from_certificate;
+
+/// KeyMint takes two different kinds of attestation keys. Remote provisioned keys
+/// and those that have been generated by the user. Unfortunately, they need to be
+/// handled quite differently, thus the different representations.
+pub enum AttestationKeyInfo {
+ RemoteProvisioned {
+ attestation_key: AttestationKey,
+ attestation_certs: Certificate,
+ },
+ UserGenerated {
+ key_id_guard: KeyIdGuard,
+ blob: Vec<u8>,
+ blob_metadata: BlobMetaData,
+ issuer_subject: Vec<u8>,
+ },
+}
+
+/// This function loads and, optionally, assigns the caller's remote provisioned
+/// attestation key or, if `attest_key_descriptor` is given, it loads the user
+/// generated attestation key from the database.
+pub fn get_attest_key_info(
+ key: &KeyDescriptor,
+ caller_uid: u32,
+ attest_key_descriptor: Option<&KeyDescriptor>,
+ params: &[KeyParameter],
+ rem_prov_state: &RemProvState,
+ db: &mut KeystoreDB,
+) -> Result<Option<AttestationKeyInfo>> {
+ match attest_key_descriptor {
+ None => rem_prov_state
+ .get_remotely_provisioned_attestation_key_and_certs(&key, caller_uid, params, db)
+ .context(concat!(
+ "In get_attest_key_and_cert_chain: ",
+ "Trying to get remotely provisioned attestation key."
+ ))
+ .map(|result| {
+ result.map(|(attestation_key, attestation_certs)| {
+ AttestationKeyInfo::RemoteProvisioned { attestation_key, attestation_certs }
+ })
+ }),
+ Some(attest_key) => get_user_generated_attestation_key(&attest_key, caller_uid, db)
+ .context("In get_attest_key_and_cert_chain: Trying to load attest key")
+ .map(Some),
+ }
+}
+
+fn get_user_generated_attestation_key(
+ key: &KeyDescriptor,
+ caller_uid: u32,
+ db: &mut KeystoreDB,
+) -> Result<AttestationKeyInfo> {
+ let (key_id_guard, blob, cert, blob_metadata) =
+ load_attest_key_blob_and_cert(&key, caller_uid, db)
+ .context("In get_user_generated_attestation_key: Failed to load blob and cert")?;
+
+ let issuer_subject: Vec<u8> = parse_subject_from_certificate(&cert).context(
+ "In get_user_generated_attestation_key: Failed to parse subject from certificate.",
+ )?;
+
+ Ok(AttestationKeyInfo::UserGenerated { key_id_guard, blob, issuer_subject, blob_metadata })
+}
+
+fn load_attest_key_blob_and_cert(
+ key: &KeyDescriptor,
+ caller_uid: u32,
+ db: &mut KeystoreDB,
+) -> Result<(KeyIdGuard, Vec<u8>, Vec<u8>, BlobMetaData)> {
+ match key.domain {
+ Domain::BLOB => Err(Error::Km(ErrorCode::INVALID_ARGUMENT)).context(
+ "In load_attest_key_blob_and_cert: Domain::BLOB attestation keys not supported",
+ ),
+ _ => {
+ let (key_id_guard, mut key_entry) = db
+ .load_key_entry(
+ &key,
+ KeyType::Client,
+ KeyEntryLoadBits::BOTH,
+ caller_uid,
+ |k, av| check_key_permission(KeyPerm::use_(), k, &av),
+ )
+ .context("In load_attest_key_blob_and_cert: Failed to load key.")?;
+
+ let (blob, blob_metadata) =
+ key_entry.take_key_blob_info().ok_or_else(Error::sys).context(concat!(
+ "In load_attest_key_blob_and_cert: Successfully loaded key entry,",
+ " but KM blob was missing."
+ ))?;
+ let cert = key_entry.take_cert().ok_or_else(Error::sys).context(concat!(
+ "In load_attest_key_blob_and_cert: Successfully loaded key entry,",
+ " but cert was missing."
+ ))?;
+ Ok((key_id_guard, blob, cert, blob_metadata))
+ }
+ }
+}
diff --git a/keystore2/src/authorization.rs b/keystore2/src/authorization.rs
index ba27df8..06b5598 100644
--- a/keystore2/src/authorization.rs
+++ b/keystore2/src/authorization.rs
@@ -12,27 +12,98 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//! This module implements IKeyAuthorization AIDL interface.
+//! This module implements IKeystoreAuthorization AIDL interface.
use crate::error::Error as KeystoreError;
-use crate::error::map_or_log_err;
-use crate::globals::{DB, ENFORCEMENTS, LEGACY_BLOB_LOADER, SUPER_KEY};
+use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_MIGRATOR};
use crate::permission::KeystorePerm;
+use crate::super_key::UserState;
use crate::utils::check_keystore_permission;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- HardwareAuthToken::HardwareAuthToken, HardwareAuthenticatorType::HardwareAuthenticatorType,
+ HardwareAuthToken::HardwareAuthToken,
};
-use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
- Timestamp::Timestamp,
+use android_security_authorization::binder::{ExceptionCode, Interface, Result as BinderResult,
+ Strong, Status as BinderStatus};
+use android_security_authorization::aidl::android::security::authorization::{
+ IKeystoreAuthorization::BnKeystoreAuthorization, IKeystoreAuthorization::IKeystoreAuthorization,
+ LockScreenEvent::LockScreenEvent, AuthorizationTokens::AuthorizationTokens,
+ ResponseCode::ResponseCode,
};
-use android_security_authorization::binder::{Interface, Result as BinderResult};
-use android_security_authorization:: aidl::android::security::authorization::IKeystoreAuthorization::{
- BnKeystoreAuthorization, IKeystoreAuthorization,
-};
-use android_security_authorization:: aidl::android::security::authorization::LockScreenEvent::LockScreenEvent;
-use android_system_keystore2::aidl::android::system::keystore2::ResponseCode::ResponseCode;
+use android_system_keystore2::aidl::android::system::keystore2::{
+ ResponseCode::ResponseCode as KsResponseCode };
use anyhow::{Context, Result};
-use binder::IBinder;
+use binder::IBinderInternal;
+use keystore2_crypto::Password;
+use keystore2_selinux as selinux;
+
+/// This is the Authorization error type, it wraps binder exceptions and the
+/// Authorization ResponseCode
+#[derive(Debug, thiserror::Error, PartialEq)]
+pub enum Error {
+ /// Wraps an IKeystoreAuthorization response code as defined by
+ /// android.security.authorization AIDL interface specification.
+ #[error("Error::Rc({0:?})")]
+ Rc(ResponseCode),
+ /// Wraps a Binder exception code other than a service specific exception.
+ #[error("Binder exception code {0:?}, {1:?}")]
+ Binder(ExceptionCode, i32),
+}
+
+/// This function should be used by authorization service calls to translate error conditions
+/// into service specific exceptions.
+///
+/// All error conditions get logged by this function.
+///
+/// `Error::Rc(x)` variants get mapped onto a service specific error code of `x`.
+/// Certain response codes may be returned from keystore/ResponseCode.aidl by the keystore2 modules,
+/// which are then converted to the corresponding response codes of android.security.authorization
+/// AIDL interface specification.
+///
+/// `selinux::Error::perm()` is mapped on `ResponseCode::PERMISSION_DENIED`.
+///
+/// All non `Error` error conditions get mapped onto ResponseCode::SYSTEM_ERROR`.
+///
+/// `handle_ok` will be called if `result` is `Ok(value)` where `value` will be passed
+/// as argument to `handle_ok`. `handle_ok` must generate a `BinderResult<T>`, but it
+/// typically returns Ok(value).
+pub fn map_or_log_err<T, U, F>(result: Result<U>, handle_ok: F) -> BinderResult<T>
+where
+ F: FnOnce(U) -> BinderResult<T>,
+{
+ result.map_or_else(
+ |e| {
+ log::error!("{:#?}", e);
+ let root_cause = e.root_cause();
+ if let Some(KeystoreError::Rc(ks_rcode)) = root_cause.downcast_ref::<KeystoreError>() {
+ let rc = match *ks_rcode {
+ // Although currently keystore2/ResponseCode.aidl and
+ // authorization/ResponseCode.aidl share the same integer values for the
+ // common response codes, this may deviate in the future, hence the
+ // conversion here.
+ KsResponseCode::SYSTEM_ERROR => ResponseCode::SYSTEM_ERROR.0,
+ KsResponseCode::KEY_NOT_FOUND => ResponseCode::KEY_NOT_FOUND.0,
+ KsResponseCode::VALUE_CORRUPTED => ResponseCode::VALUE_CORRUPTED.0,
+ KsResponseCode::INVALID_ARGUMENT => ResponseCode::INVALID_ARGUMENT.0,
+ // If the code paths of IKeystoreAuthorization aidl's methods happen to return
+ // other error codes from KsResponseCode in the future, they should be converted
+ // as well.
+ _ => ResponseCode::SYSTEM_ERROR.0,
+ };
+ return Err(BinderStatus::new_service_specific_error(rc, None));
+ }
+ let rc = match root_cause.downcast_ref::<Error>() {
+ Some(Error::Rc(rcode)) => rcode.0,
+ Some(Error::Binder(_, _)) => ResponseCode::SYSTEM_ERROR.0,
+ None => match root_cause.downcast_ref::<selinux::Error>() {
+ Some(selinux::Error::PermissionDenied) => ResponseCode::PERMISSION_DENIED.0,
+ _ => ResponseCode::SYSTEM_ERROR.0,
+ },
+ };
+ Err(BinderStatus::new_service_specific_error(rc, None))
+ },
+ handle_ok,
+ )
+}
/// This struct is defined to implement the aforementioned AIDL interface.
/// As of now, it is an empty struct.
@@ -40,7 +111,7 @@
impl AuthorizationManager {
/// Create a new instance of Keystore Authorization service.
- pub fn new_native_binder() -> Result<impl IKeystoreAuthorization> {
+ pub fn new_native_binder() -> Result<Strong<dyn IKeystoreAuthorization>> {
let result = BnKeystoreAuthorization::new_binder(Self);
result.as_binder().set_requesting_sid(true);
Ok(result)
@@ -50,16 +121,7 @@
//check keystore permission
check_keystore_permission(KeystorePerm::add_auth()).context("In add_auth_token.")?;
- //TODO: Keymint's HardwareAuthToken aidl needs to implement Copy/Clone
- let auth_token_copy = HardwareAuthToken {
- challenge: auth_token.challenge,
- userId: auth_token.userId,
- authenticatorId: auth_token.authenticatorId,
- authenticatorType: HardwareAuthenticatorType(auth_token.authenticatorType.0),
- timestamp: Timestamp { milliSeconds: auth_token.timestamp.milliSeconds },
- mac: auth_token.mac.clone(),
- };
- ENFORCEMENTS.add_auth_token(auth_token_copy)?;
+ ENFORCEMENTS.add_auth_token(auth_token.clone())?;
Ok(())
}
@@ -67,32 +129,42 @@
&self,
lock_screen_event: LockScreenEvent,
user_id: i32,
- password: Option<&[u8]>,
+ password: Option<Password>,
) -> Result<()> {
match (lock_screen_event, password) {
- (LockScreenEvent::UNLOCK, Some(user_password)) => {
+ (LockScreenEvent::UNLOCK, Some(password)) => {
//This corresponds to the unlock() method in legacy keystore API.
//check permission
check_keystore_permission(KeystorePerm::unlock())
.context("In on_lock_screen_event: Unlock with password.")?;
ENFORCEMENTS.set_device_locked(user_id, false);
- // Unlock super key.
- DB.with::<_, Result<()>>(|db| {
- let mut db = db.borrow_mut();
- //TODO - b/176123105 - Once the user management API is implemented, unlock is
- //allowed only if the user is added. Then the two tasks handled by the
- //unlock_user_key will be split into two methods. For now, unlock_user_key
- //method is used as it is, which created a super key for the user if one does
- //not exists, in addition to unlocking the existing super key of the user/
- SUPER_KEY.unlock_user_key(
+
+ DB.with(|db| {
+ SUPER_KEY.unlock_screen_lock_bound_key(
+ &mut db.borrow_mut(),
user_id as u32,
- user_password,
- &mut db,
- &LEGACY_BLOB_LOADER,
- )?;
- Ok(())
+ &password,
+ )
})
- .context("In on_lock_screen_event.")?;
+ .context("In on_lock_screen_event: unlock_screen_lock_bound_key failed")?;
+
+ // Unlock super key.
+ if let UserState::Uninitialized = DB
+ .with(|db| {
+ UserState::get_with_password_unlock(
+ &mut db.borrow_mut(),
+ &LEGACY_MIGRATOR,
+ &SUPER_KEY,
+ user_id as u32,
+ &password,
+ )
+ })
+ .context("In on_lock_screen_event: Unlock with password.")?
+ {
+ log::info!(
+ "In on_lock_screen_event. Trying to unlock when LSKF is uninitialized."
+ );
+ }
Ok(())
}
@@ -106,15 +178,39 @@
check_keystore_permission(KeystorePerm::lock())
.context("In on_lock_screen_event: Lock")?;
ENFORCEMENTS.set_device_locked(user_id, true);
+ SUPER_KEY.lock_screen_lock_bound_key(user_id as u32);
+
Ok(())
}
_ => {
// Any other combination is not supported.
- Err(KeystoreError::Rc(ResponseCode::INVALID_ARGUMENT))
+ Err(Error::Rc(ResponseCode::INVALID_ARGUMENT))
.context("In on_lock_screen_event: Unknown event.")
}
}
}
+
+ fn get_auth_tokens_for_credstore(
+ &self,
+ challenge: i64,
+ secure_user_id: i64,
+ auth_token_max_age_millis: i64,
+ ) -> Result<AuthorizationTokens> {
+ // Check permission. Function should return if this failed. Therefore having '?' at the end
+ // is very important.
+ check_keystore_permission(KeystorePerm::get_auth_token())
+ .context("In get_auth_tokens_for_credstore.")?;
+
+ // if the challenge is zero, return error
+ if challenge == 0 {
+ return Err(Error::Rc(ResponseCode::INVALID_ARGUMENT))
+ .context("In get_auth_tokens_for_credstore. Challenge can not be zero.");
+ }
+ // Obtain the auth token and the timestamp token from the enforcement module.
+ let (auth_token, ts_token) =
+ ENFORCEMENTS.get_auth_tokens(challenge, secure_user_id, auth_token_max_age_millis)?;
+ Ok(AuthorizationTokens { authToken: auth_token, timestampToken: ts_token })
+ }
}
impl Interface for AuthorizationManager {}
@@ -130,6 +226,25 @@
user_id: i32,
password: Option<&[u8]>,
) -> BinderResult<()> {
- map_or_log_err(self.on_lock_screen_event(lock_screen_event, user_id, password), Ok)
+ map_or_log_err(
+ self.on_lock_screen_event(lock_screen_event, user_id, password.map(|pw| pw.into())),
+ Ok,
+ )
+ }
+
+ fn getAuthTokensForCredStore(
+ &self,
+ challenge: i64,
+ secure_user_id: i64,
+ auth_token_max_age_millis: i64,
+ ) -> binder::public_api::Result<AuthorizationTokens> {
+ map_or_log_err(
+ self.get_auth_tokens_for_credstore(
+ challenge,
+ secure_user_id,
+ auth_token_max_age_millis,
+ ),
+ Ok,
+ )
}
}
diff --git a/keystore2/src/crypto/Android.bp b/keystore2/src/crypto/Android.bp
index 9ecd823..21c9b74 100644
--- a/keystore2/src/crypto/Android.bp
+++ b/keystore2/src/crypto/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
rust_library {
name: "libkeystore2_crypto_rust",
crate_name: "keystore2_crypto",
@@ -59,12 +68,14 @@
"--whitelist-function", "HKDFExpand",
"--whitelist-function", "ECDHComputeKey",
"--whitelist-function", "ECKEYGenerateKey",
- "--whitelist-function", "ECKEYDeriveFromSecret",
+ "--whitelist-function", "ECKEYMarshalPrivateKey",
+ "--whitelist-function", "ECKEYParsePrivateKey",
"--whitelist-function", "EC_KEY_get0_public_key",
"--whitelist-function", "ECPOINTPoint2Oct",
"--whitelist-function", "ECPOINTOct2Point",
"--whitelist-function", "EC_KEY_free",
"--whitelist-function", "EC_POINT_free",
+ "--whitelist-function", "extractSubjectFromCertificate",
"--whitelist-type", "EC_KEY",
"--whitelist-type", "EC_POINT",
"--whitelist-var", "EC_MAX_BYTES",
diff --git a/keystore2/src/crypto/certificate_utils.cpp b/keystore2/src/crypto/certificate_utils.cpp
index 4b0dca4..31c7fb4 100644
--- a/keystore2/src/crypto/certificate_utils.cpp
+++ b/keystore2/src/crypto/certificate_utils.cpp
@@ -42,17 +42,30 @@
DEFINE_OPENSSL_OBJECT_POINTER(AUTHORITY_KEYID);
DEFINE_OPENSSL_OBJECT_POINTER(BASIC_CONSTRAINTS);
DEFINE_OPENSSL_OBJECT_POINTER(X509_ALGOR);
+DEFINE_OPENSSL_OBJECT_POINTER(BIGNUM);
} // namespace
-std::variant<CertUtilsError, X509_NAME_Ptr> makeCommonName(const std::string& name) {
+constexpr const char kDefaultCommonName[] = "Default Common Name";
+
+std::variant<CertUtilsError, X509_NAME_Ptr>
+makeCommonName(std::optional<std::reference_wrapper<const std::vector<uint8_t>>> name) {
+ if (name) {
+ const uint8_t* p = name->get().data();
+ X509_NAME_Ptr x509_name(d2i_X509_NAME(nullptr, &p, name->get().size()));
+ if (!x509_name) {
+ return CertUtilsError::MemoryAllocation;
+ }
+ return x509_name;
+ }
+
X509_NAME_Ptr x509_name(X509_NAME_new());
if (!x509_name) {
- return CertUtilsError::BoringSsl;
+ return CertUtilsError::MemoryAllocation;
}
if (!X509_NAME_add_entry_by_txt(x509_name.get(), "CN", MBSTRING_ASC,
- reinterpret_cast<const uint8_t*>(name.c_str()), name.length(),
- -1 /* loc */, 0 /* set */)) {
+ reinterpret_cast<const uint8_t*>(kDefaultCommonName),
+ sizeof(kDefaultCommonName) - 1, -1 /* loc */, 0 /* set */)) {
return CertUtilsError::BoringSsl;
}
return x509_name;
@@ -154,18 +167,56 @@
return key_usage;
}
+template <typename Out, typename In> static Out saturate(In in) {
+ if constexpr (std::is_signed_v<Out> == std::is_signed_v<In>) {
+ if constexpr (sizeof(Out) >= sizeof(In)) {
+ // Same sign, and In fits into Out. Cast is lossless.
+ return static_cast<Out>(in);
+ } else {
+ // Out is smaller than In we may need to truncate.
+ // We pick the smaller of `out::max()` and the greater of `out::min()` and `in`.
+ return static_cast<Out>(
+ std::min(static_cast<In>(std::numeric_limits<Out>::max()),
+ std::max(static_cast<In>(std::numeric_limits<Out>::min()), in)));
+ }
+ } else {
+ // So we have different signs. This puts the lower bound at 0 because either input or output
+ // is unsigned. The upper bound is max of the smaller type or, if they are equal the max of
+ // the signed type.
+ if constexpr (std::is_signed_v<Out>) {
+ if constexpr (sizeof(Out) > sizeof(In)) {
+ return static_cast<Out>(in);
+ } else {
+ // Because `out` is the signed one, the lower bound of `in` is 0 and fits into
+ // `out`. We just have to compare the maximum and we do it in type In because it has
+ // a greater range than Out, so Out::max() is guaranteed to fit.
+ return static_cast<Out>(
+ std::min(static_cast<In>(std::numeric_limits<Out>::max()), in));
+ }
+ } else {
+ // Out is unsigned. So we can return 0 if in is negative.
+ if (in < 0) return 0;
+ if constexpr (sizeof(Out) >= sizeof(In)) {
+ // If Out is wider or equal we can assign lossless.
+ return static_cast<Out>(in);
+ } else {
+ // Otherwise we have to take the minimum of Out::max() and `in`.
+ return static_cast<Out>(
+ std::min(static_cast<In>(std::numeric_limits<Out>::max()), in));
+ }
+ }
+ }
+}
+
// Creates a rump certificate structure with serial, subject and issuer names, as well as
// activation and expiry date.
// Callers should pass an empty X509_Ptr and check the return value for CertUtilsError::Ok (0)
// before accessing the result.
std::variant<CertUtilsError, X509_Ptr>
-makeCertRump(const uint32_t serial, const char subject[], const uint64_t activeDateTimeMilliSeconds,
- const uint64_t usageExpireDateTimeMilliSeconds) {
-
- // Sanitize pointer arguments.
- if (!subject || strlen(subject) == 0) {
- return CertUtilsError::InvalidArgument;
- }
+makeCertRump(std::optional<std::reference_wrapper<const std::vector<uint8_t>>> serial,
+ std::optional<std::reference_wrapper<const std::vector<uint8_t>>> subject,
+ const int64_t activeDateTimeMilliSeconds,
+ const int64_t usageExpireDateTimeMilliSeconds) {
// Create certificate structure.
X509_Ptr certificate(X509_new());
@@ -178,9 +229,23 @@
return CertUtilsError::BoringSsl;
}
+ BIGNUM_Ptr bn_serial;
+ if (serial) {
+ bn_serial = BIGNUM_Ptr(BN_bin2bn(serial->get().data(), serial->get().size(), nullptr));
+ if (!bn_serial) {
+ return CertUtilsError::MemoryAllocation;
+ }
+ } else {
+ bn_serial = BIGNUM_Ptr(BN_new());
+ if (!bn_serial) {
+ return CertUtilsError::MemoryAllocation;
+ }
+ BN_zero(bn_serial.get());
+ }
+
// Set the certificate serialNumber
ASN1_INTEGER_Ptr serialNumber(ASN1_INTEGER_new());
- if (!serialNumber || !ASN1_INTEGER_set(serialNumber.get(), serial) ||
+ if (!serialNumber || !BN_to_ASN1_INTEGER(bn_serial.get(), serialNumber.get()) ||
!X509_set_serialNumber(certificate.get(), serialNumber.get() /* Don't release; copied */))
return CertUtilsError::BoringSsl;
@@ -194,16 +259,16 @@
return std::get<CertUtilsError>(subjectName);
}
+ time_t notBeforeTime = saturate<time_t>(activeDateTimeMilliSeconds / 1000);
// Set activation date.
ASN1_TIME_Ptr notBefore(ASN1_TIME_new());
- if (!notBefore || !ASN1_TIME_set(notBefore.get(), activeDateTimeMilliSeconds / 1000) ||
+ if (!notBefore || !ASN1_TIME_set(notBefore.get(), notBeforeTime) ||
!X509_set_notBefore(certificate.get(), notBefore.get() /* Don't release; copied */))
return CertUtilsError::BoringSsl;
// Set expiration date.
time_t notAfterTime;
- notAfterTime = (time_t)std::min((uint64_t)std::numeric_limits<time_t>::max(),
- usageExpireDateTimeMilliSeconds / 1000);
+ notAfterTime = saturate<time_t>(usageExpireDateTimeMilliSeconds / 1000);
ASN1_TIME_Ptr notAfter(ASN1_TIME_new());
if (!notAfter || !ASN1_TIME_set(notAfter.get(), notAfterTime) ||
@@ -215,8 +280,10 @@
}
std::variant<CertUtilsError, X509_Ptr>
-makeCert(const EVP_PKEY* evp_pkey, const uint32_t serial, const char subject[],
- const uint64_t activeDateTimeMilliSeconds, const uint64_t usageExpireDateTimeMilliSeconds,
+makeCert(const EVP_PKEY* evp_pkey,
+ std::optional<std::reference_wrapper<const std::vector<uint8_t>>> serial,
+ std::optional<std::reference_wrapper<const std::vector<uint8_t>>> subject,
+ const int64_t activeDateTimeMilliSeconds, const int64_t usageExpireDateTimeMilliSeconds,
bool addSubjectKeyIdEx, std::optional<KeyUsageExtension> keyUsageEx,
std::optional<BasicConstraintsExtension> basicConstraints) {
diff --git a/keystore2/src/crypto/crypto.cpp b/keystore2/src/crypto/crypto.cpp
index 3cc19c5..e4a1ac3 100644
--- a/keystore2/src/crypto/crypto.cpp
+++ b/keystore2/src/crypto/crypto.cpp
@@ -26,6 +26,7 @@
#include <openssl/evp.h>
#include <openssl/hkdf.h>
#include <openssl/rand.h>
+#include <openssl/x509.h>
#include <vector>
@@ -235,10 +236,28 @@
return key;
}
-EC_KEY* ECKEYDeriveFromSecret(const uint8_t* secret, size_t secret_len) {
+size_t ECKEYMarshalPrivateKey(const EC_KEY* priv_key, uint8_t* buf, size_t len) {
+ CBB cbb;
+ size_t out_len;
+ if (!CBB_init_fixed(&cbb, buf, len) ||
+ !EC_KEY_marshal_private_key(&cbb, priv_key, EC_PKEY_NO_PARAMETERS | EC_PKEY_NO_PUBKEY) ||
+ !CBB_finish(&cbb, nullptr, &out_len)) {
+ return 0;
+ } else {
+ return out_len;
+ }
+}
+
+EC_KEY* ECKEYParsePrivateKey(const uint8_t* buf, size_t len) {
+ CBS cbs;
+ CBS_init(&cbs, buf, len);
EC_GROUP* group = EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1);
- auto result = EC_KEY_derive_from_secret(group, secret, secret_len);
+ auto result = EC_KEY_parse_private_key(&cbs, group);
EC_GROUP_free(group);
+ if (result != nullptr && CBS_len(&cbs) != 0) {
+ EC_KEY_free(result);
+ return nullptr;
+ }
return result;
}
@@ -261,3 +280,42 @@
}
return point;
}
+
+int extractSubjectFromCertificate(const uint8_t* cert_buf, size_t cert_len, uint8_t* subject_buf,
+ size_t subject_buf_len) {
+ if (!cert_buf || !subject_buf) {
+ ALOGE("extractSubjectFromCertificate: received null pointer");
+ return 0;
+ }
+
+ const uint8_t* p = cert_buf;
+ bssl::UniquePtr<X509> cert(d2i_X509(nullptr /* Allocate X509 struct */, &p, cert_len));
+ if (!cert) {
+ ALOGE("extractSubjectFromCertificate: failed to parse certificate");
+ return 0;
+ }
+
+ X509_NAME* subject = X509_get_subject_name(cert.get());
+ if (!subject) {
+ ALOGE("extractSubjectFromCertificate: failed to retrieve subject name");
+ return 0;
+ }
+
+ int subject_len = i2d_X509_NAME(subject, nullptr /* Don't copy the data */);
+ if (subject_len < 0) {
+ ALOGE("extractSubjectFromCertificate: error obtaining encoded subject name length");
+ return 0;
+ }
+
+ if (subject_len > subject_buf_len) {
+ // Return the subject length, negated, so the caller knows how much
+ // buffer space is required.
+ ALOGI("extractSubjectFromCertificate: needed %d bytes for subject, caller provided %zu",
+ subject_len, subject_buf_len);
+ return -subject_len;
+ }
+
+ // subject_buf has enough space.
+ uint8_t* tmp = subject_buf;
+ return i2d_X509_NAME(subject, &tmp);
+}
diff --git a/keystore2/src/crypto/crypto.hpp b/keystore2/src/crypto/crypto.hpp
index 9bd7758..f841eb3 100644
--- a/keystore2/src/crypto/crypto.hpp
+++ b/keystore2/src/crypto/crypto.hpp
@@ -55,11 +55,36 @@
EC_KEY* ECKEYGenerateKey();
- EC_KEY* ECKEYDeriveFromSecret(const uint8_t *secret, size_t secret_len);
+ size_t ECKEYMarshalPrivateKey(const EC_KEY *priv_key, uint8_t *buf, size_t len);
+
+ EC_KEY* ECKEYParsePrivateKey(const uint8_t *buf, size_t len);
size_t ECPOINTPoint2Oct(const EC_POINT *point, uint8_t *buf, size_t len);
EC_POINT* ECPOINTOct2Point(const uint8_t *buf, size_t len);
+
}
+// Parse a DER-encoded X.509 certificate contained in cert_buf, with length
+// cert_len, extract the subject, DER-encode it and write the result to
+// subject_buf, which has subject_buf_len capacity.
+//
+// Because the length of the subject is unknown, and because we'd like to (a) be
+// able to handle subjects of any size and (b) avoid parsing the certificate
+// twice most of the time, once to discover the length and once to parse it, the
+// return value is overloaded.
+//
+// If the return value > 0 it specifies the number of bytes written into
+// subject_buf; the operation was successful.
+//
+// If the return value == 0, certificate parsing failed unrecoverably. The
+// reason will be logged.
+//
+// If the return value < 0, the operation failed because the subject size >
+// subject_buf_len. The return value is -(subject_size), where subject_size is
+// the size of the extracted DER-encoded subject field. Call
+// extractSubjectFromCertificate again with a sufficiently-large buffer.
+int extractSubjectFromCertificate(const uint8_t* cert_buf, size_t cert_len,
+ uint8_t* subject_buf, size_t subject_buf_len);
+
#endif // __CRYPTO_H__
diff --git a/keystore2/src/crypto/error.rs b/keystore2/src/crypto/error.rs
index 1e84fc6..a369012 100644
--- a/keystore2/src/crypto/error.rs
+++ b/keystore2/src/crypto/error.rs
@@ -74,9 +74,13 @@
#[error("Failed to generate key.")]
ECKEYGenerateKeyFailed,
- /// This is returned if the C implementation of ECKEYDeriveFromSecret returned null.
- #[error("Failed to derive key.")]
- ECKEYDeriveFailed,
+ /// This is returned if the C implementation of ECKEYMarshalPrivateKey returned 0.
+ #[error("Failed to marshal private key.")]
+ ECKEYMarshalPrivateKeyFailed,
+
+ /// This is returned if the C implementation of ECKEYParsePrivateKey returned null.
+ #[error("Failed to parse private key.")]
+ ECKEYParsePrivateKeyFailed,
/// This is returned if the C implementation of ECPOINTPoint2Oct returned 0.
#[error("Failed to convert point to oct.")]
@@ -85,4 +89,8 @@
/// This is returned if the C implementation of ECPOINTOct2Point returned null.
#[error("Failed to convert oct to point.")]
ECOct2PointFailed,
+
+ /// This is returned if the C implementation of extractSubjectFromCertificate failed.
+ #[error("Failed to extract certificate subject.")]
+ ExtractSubjectFailed,
}
diff --git a/keystore2/src/crypto/include/certificate_utils.h b/keystore2/src/crypto/include/certificate_utils.h
index 1e80d80..6c25b9a 100644
--- a/keystore2/src/crypto/include/certificate_utils.h
+++ b/keystore2/src/crypto/include/certificate_utils.h
@@ -80,7 +80,7 @@
* `signCert` or `signCertWith`.
* @param evp_pkey The public key that the certificate is issued for.
* @param serial The certificate serial number.
- * @param subject The subject common name.
+ * @param subject The X509 name encoded subject common name.
* @param activeDateTimeMilliSeconds The not before date in epoch milliseconds.
* @param usageExpireDateTimeMilliSeconds The not after date in epoch milliseconds.
* @param addSubjectKeyIdEx If true, adds the subject key id extension.
@@ -89,14 +89,14 @@
* @return CertUtilsError::Ok on success.
*/
std::variant<CertUtilsError, X509_Ptr>
-makeCert(const EVP_PKEY* evp_pkey, //
- const uint32_t serial, //
- const char subject[], //
- const uint64_t activeDateTimeMilliSeconds, //
- const uint64_t usageExpireDateTimeMilliSeconds, //
- bool addSubjectKeyIdEx, //
- std::optional<KeyUsageExtension> keyUsageEx, //
- std::optional<BasicConstraintsExtension> basicConstraints); //
+makeCert(const EVP_PKEY* evp_pkey, //
+ std::optional<std::reference_wrapper<const std::vector<uint8_t>>> serial, //
+ std::optional<std::reference_wrapper<const std::vector<uint8_t>>> subject, //
+ const int64_t activeDateTimeMilliSeconds, //
+ const int64_t usageExpireDateTimeMilliSeconds, //
+ bool addSubjectKeyIdEx, //
+ std::optional<KeyUsageExtension> keyUsageEx, //
+ std::optional<BasicConstraintsExtension> basicConstraints); //
/**
* Takes the subject name from `signingCert` and sets it as issuer name in `cert`.
diff --git a/keystore2/src/crypto/lib.rs b/keystore2/src/crypto/lib.rs
index 92b257c..3523a9d 100644
--- a/keystore2/src/crypto/lib.rs
+++ b/keystore2/src/crypto/lib.rs
@@ -19,11 +19,12 @@
mod zvec;
pub use error::Error;
use keystore2_crypto_bindgen::{
- generateKeyFromPassword, randomBytes, AES_gcm_decrypt, AES_gcm_encrypt, ECDHComputeKey,
- ECKEYDeriveFromSecret, ECKEYGenerateKey, ECPOINTOct2Point, ECPOINTPoint2Oct, EC_KEY_free,
- EC_KEY_get0_public_key, EC_POINT_free, HKDFExpand, HKDFExtract, EC_KEY, EC_MAX_BYTES, EC_POINT,
- EVP_MAX_MD_SIZE,
+ extractSubjectFromCertificate, generateKeyFromPassword, randomBytes, AES_gcm_decrypt,
+ AES_gcm_encrypt, ECDHComputeKey, ECKEYGenerateKey, ECKEYMarshalPrivateKey,
+ ECKEYParsePrivateKey, ECPOINTOct2Point, ECPOINTPoint2Oct, EC_KEY_free, EC_KEY_get0_public_key,
+ EC_POINT_free, HKDFExpand, HKDFExtract, EC_KEY, EC_MAX_BYTES, EC_POINT, EVP_MAX_MD_SIZE,
};
+use std::convert::TryFrom;
use std::convert::TryInto;
use std::marker::PhantomData;
pub use zvec::ZVec;
@@ -57,10 +58,15 @@
/// Generate a salt.
pub fn generate_salt() -> Result<Vec<u8>, Error> {
- // Safety: salt has the same length as the requested number of random bytes.
- let mut salt = vec![0; SALT_LENGTH];
- if unsafe { randomBytes(salt.as_mut_ptr(), SALT_LENGTH) } {
- Ok(salt)
+ generate_random_data(SALT_LENGTH)
+}
+
+/// Generate random data of the given size.
+pub fn generate_random_data(size: usize) -> Result<Vec<u8>, Error> {
+ // Safety: data has the same length as the requested number of random bytes.
+ let mut data = vec![0; size];
+ if unsafe { randomBytes(data.as_mut_ptr(), size) } {
+ Ok(data)
} else {
Err(Error::RandomNumberGenerationFailed)
}
@@ -143,42 +149,68 @@
}
}
-/// Generates a key from the given password and salt.
-/// The salt must be exactly 16 bytes long.
-/// Two key sizes are accepted: 16 and 32 bytes.
-pub fn derive_key_from_password(
- pw: &[u8],
- salt: Option<&[u8]>,
- key_length: usize,
-) -> Result<ZVec, Error> {
- let salt: *const u8 = match salt {
- Some(s) => {
- if s.len() != SALT_LENGTH {
- return Err(Error::InvalidSaltLength);
- }
- s.as_ptr()
- }
- None => std::ptr::null(),
- };
+/// Represents a "password" that can be used to key the PBKDF2 algorithm.
+pub enum Password<'a> {
+ /// Borrow an existing byte array
+ Ref(&'a [u8]),
+ /// Use an owned ZVec to store the key
+ Owned(ZVec),
+}
- match key_length {
- AES_128_KEY_LENGTH | AES_256_KEY_LENGTH => {}
- _ => return Err(Error::InvalidKeyLength),
+impl<'a> From<&'a [u8]> for Password<'a> {
+ fn from(pw: &'a [u8]) -> Self {
+ Self::Ref(pw)
+ }
+}
+
+impl<'a> Password<'a> {
+ fn get_key(&'a self) -> &'a [u8] {
+ match self {
+ Self::Ref(b) => b,
+ Self::Owned(z) => &*z,
+ }
}
- let mut result = ZVec::new(key_length)?;
+ /// Generate a key from the given password and salt.
+ /// The salt must be exactly 16 bytes long.
+ /// Two key sizes are accepted: 16 and 32 bytes.
+ pub fn derive_key(&self, salt: Option<&[u8]>, key_length: usize) -> Result<ZVec, Error> {
+ let pw = self.get_key();
- unsafe {
- generateKeyFromPassword(
- result.as_mut_ptr(),
- result.len(),
- pw.as_ptr() as *const std::os::raw::c_char,
- pw.len(),
- salt,
- )
- };
+ let salt: *const u8 = match salt {
+ Some(s) => {
+ if s.len() != SALT_LENGTH {
+ return Err(Error::InvalidSaltLength);
+ }
+ s.as_ptr()
+ }
+ None => std::ptr::null(),
+ };
- Ok(result)
+ match key_length {
+ AES_128_KEY_LENGTH | AES_256_KEY_LENGTH => {}
+ _ => return Err(Error::InvalidKeyLength),
+ }
+
+ let mut result = ZVec::new(key_length)?;
+
+ unsafe {
+ generateKeyFromPassword(
+ result.as_mut_ptr(),
+ result.len(),
+ pw.as_ptr() as *const std::os::raw::c_char,
+ pw.len(),
+ salt,
+ )
+ };
+
+ Ok(result)
+ }
+
+ /// Try to make another Password object with the same data.
+ pub fn try_clone(&self) -> Result<Password<'static>, Error> {
+ Ok(Password::Owned(ZVec::try_from(self.get_key())?))
+ }
}
/// Calls the boringssl HKDF_extract function.
@@ -306,14 +338,32 @@
Ok(ECKey(key))
}
-/// Calls the boringssl EC_KEY_derive_from_secret function.
-pub fn ec_key_derive_from_secret(secret: &[u8]) -> Result<ECKey, Error> {
- // Safety: secret is a valid buffer.
- let result = unsafe { ECKEYDeriveFromSecret(secret.as_ptr(), secret.len()) };
- if result.is_null() {
- return Err(Error::ECKEYDeriveFailed);
+/// Calls the boringssl EC_KEY_marshal_private_key function.
+pub fn ec_key_marshal_private_key(key: &ECKey) -> Result<ZVec, Error> {
+ let len = 39; // Empirically observed length of private key
+ let mut buf = ZVec::new(len)?;
+ // Safety: the key is valid.
+ // This will not write past the specified length of the buffer; if the
+ // len above is too short, it returns 0.
+ let written_len =
+ unsafe { ECKEYMarshalPrivateKey(key.0, buf.as_mut_ptr(), buf.len()) } as usize;
+ if written_len == len {
+ Ok(buf)
+ } else {
+ Err(Error::ECKEYMarshalPrivateKeyFailed)
}
- Ok(ECKey(result))
+}
+
+/// Calls the boringssl EC_KEY_parse_private_key function.
+pub fn ec_key_parse_private_key(buf: &[u8]) -> Result<ECKey, Error> {
+ // Safety: this will not read past the specified length of the buffer.
+ // It fails if less than the whole buffer is consumed.
+ let key = unsafe { ECKEYParsePrivateKey(buf.as_ptr(), buf.len()) };
+ if key.is_null() {
+ Err(Error::ECKEYParsePrivateKeyFailed)
+ } else {
+ Ok(ECKey(key))
+ }
}
/// Calls the boringssl EC_KEY_get0_public_key function.
@@ -353,6 +403,54 @@
Ok(OwnedECPoint(result))
}
+/// Uses BoringSSL to extract the DER-encoded subject from a DER-encoded X.509 certificate.
+pub fn parse_subject_from_certificate(cert_buf: &[u8]) -> Result<Vec<u8>, Error> {
+ // Try with a 200-byte output buffer, should be enough in all but bizarre cases.
+ let mut retval = vec![0; 200];
+
+ // Safety: extractSubjectFromCertificate reads at most cert_buf.len() bytes from cert_buf and
+ // writes at most retval.len() bytes to retval.
+ let mut size = unsafe {
+ extractSubjectFromCertificate(
+ cert_buf.as_ptr(),
+ cert_buf.len(),
+ retval.as_mut_ptr(),
+ retval.len(),
+ )
+ };
+
+ if size == 0 {
+ return Err(Error::ExtractSubjectFailed);
+ }
+
+ if size < 0 {
+ // Our buffer wasn't big enough. Make one that is just the right size and try again.
+ let negated_size = usize::try_from(-size).map_err(|_e| Error::ExtractSubjectFailed)?;
+ retval = vec![0; negated_size];
+
+ // Safety: extractSubjectFromCertificate reads at most cert_buf.len() bytes from cert_buf
+ // and writes at most retval.len() bytes to retval.
+ size = unsafe {
+ extractSubjectFromCertificate(
+ cert_buf.as_ptr(),
+ cert_buf.len(),
+ retval.as_mut_ptr(),
+ retval.len(),
+ )
+ };
+
+ if size <= 0 {
+ return Err(Error::ExtractSubjectFailed);
+ }
+ }
+
+ // Reduce buffer size to the amount written.
+ let safe_size = usize::try_from(size).map_err(|_e| Error::ExtractSubjectFailed)?;
+ retval.truncate(safe_size);
+
+ Ok(retval)
+}
+
#[cfg(test)]
mod tests {
@@ -439,26 +537,26 @@
}
#[test]
- fn test_ec() {
- let key = ec_key_generate_key();
- assert!(key.is_ok());
- assert!(!key.unwrap().0.is_null());
+ fn test_ec() -> Result<(), Error> {
+ let priv0 = ec_key_generate_key()?;
+ assert!(!priv0.0.is_null());
+ let pub0 = ec_key_get0_public_key(&priv0);
- let key = ec_key_derive_from_secret(&[42; 16]);
- assert!(key.is_ok());
- let key = key.unwrap();
- assert!(!key.0.is_null());
+ let priv1 = ec_key_generate_key()?;
+ let pub1 = ec_key_get0_public_key(&priv1);
- let point = ec_key_get0_public_key(&key);
+ let priv0s = ec_key_marshal_private_key(&priv0)?;
+ let pub0s = ec_point_point_to_oct(pub0.get_point())?;
+ let pub1s = ec_point_point_to_oct(pub1.get_point())?;
- let result = ecdh_compute_key(point.get_point(), &key);
- assert!(result.is_ok());
+ let priv0 = ec_key_parse_private_key(&priv0s)?;
+ let pub0 = ec_point_oct_to_point(&pub0s)?;
+ let pub1 = ec_point_oct_to_point(&pub1s)?;
- let oct = ec_point_point_to_oct(point.get_point());
- assert!(oct.is_ok());
- let oct = oct.unwrap();
+ let left_key = ecdh_compute_key(pub0.get_point(), &priv1)?;
+ let right_key = ecdh_compute_key(pub1.get_point(), &priv0)?;
- let point2 = ec_point_oct_to_point(oct.as_slice());
- assert!(point2.is_ok());
+ assert_eq!(left_key, right_key);
+ Ok(())
}
}
diff --git a/keystore2/src/crypto/tests/certificate_utils_test.cpp b/keystore2/src/crypto/tests/certificate_utils_test.cpp
index 2df9ce5..119c3fa 100644
--- a/keystore2/src/crypto/tests/certificate_utils_test.cpp
+++ b/keystore2/src/crypto/tests/certificate_utils_test.cpp
@@ -173,8 +173,8 @@
.isCertificationKey = true,
};
- auto certV = makeCert(pkey.get(), 1, "Me", now_ms - kValidity, now_ms + kValidity,
- true /* subject key id extension */, keyUsage, bcons);
+ auto certV = makeCert(pkey.get(), std::nullopt, std::nullopt, now_ms - kValidity,
+ now_ms + kValidity, true /* subject key id extension */, keyUsage, bcons);
ASSERT_TRUE(std::holds_alternative<X509_Ptr>(certV));
auto& cert = std::get<X509_Ptr>(certV);
ASSERT_TRUE(!setIssuer(cert.get(), cert.get(), true));
@@ -272,8 +272,8 @@
.isCertificationKey = true,
};
- auto certV = makeCert(pkey.get(), 1, "Me", now_ms - kValidity, now_ms + kValidity,
- true /* subject key id extension */, keyUsage, bcons);
+ auto certV = makeCert(pkey.get(), std::nullopt, std::nullopt, now_ms - kValidity,
+ now_ms + kValidity, true /* subject key id extension */, keyUsage, bcons);
ASSERT_TRUE(std::holds_alternative<X509_Ptr>(certV));
auto& cert = std::get<X509_Ptr>(certV);
ASSERT_TRUE(!setIssuer(cert.get(), cert.get(), true));
diff --git a/keystore2/src/crypto/zvec.rs b/keystore2/src/crypto/zvec.rs
index e75e1dc..4af7b5a 100644
--- a/keystore2/src/crypto/zvec.rs
+++ b/keystore2/src/crypto/zvec.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#![allow(dead_code)]
-
use crate::error::Error;
use nix::sys::mman::{mlock, munlock};
use std::convert::TryFrom;
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index 785847d..174a928 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -41,14 +41,21 @@
//! from the database module these functions take permission check
//! callbacks.
-use crate::db_utils::{self, SqlField};
-use crate::error::{Error as KsError, ErrorCode, ResponseCode};
use crate::impl_metadata; // This is in db_utils.rs
use crate::key_parameter::{KeyParameter, Tag};
use crate::permission::KeyPermSet;
-use crate::utils::get_current_time_in_seconds;
+use crate::utils::{get_current_time_in_seconds, AID_USER_OFFSET};
+use crate::{
+ db_utils::{self, SqlField},
+ gc::Gc,
+ super_key::USER_SUPER_KEY,
+};
+use crate::{
+ error::{Error as KsError, ErrorCode, ResponseCode},
+ super_key::SuperKeyType,
+};
use anyhow::{anyhow, Context, Result};
-use std::{convert::TryFrom, convert::TryInto, time::SystemTimeError};
+use std::{convert::TryFrom, convert::TryInto, ops::Deref, time::SystemTimeError};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
HardwareAuthToken::HardwareAuthToken,
@@ -60,6 +67,11 @@
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, KeyDescriptor::KeyDescriptor,
};
+use android_security_remoteprovisioning::aidl::android::security::remoteprovisioning::{
+ AttestationPoolStatus::AttestationPoolStatus,
+};
+
+use keystore2_crypto::ZVec;
use lazy_static::lazy_static;
use log::error;
#[cfg(not(test))]
@@ -72,12 +84,14 @@
types::{FromSqlError, Value, ValueRef},
Connection, OptionalExtension, ToSql, Transaction, TransactionBehavior, NO_PARAMS,
};
+
use std::{
collections::{HashMap, HashSet},
path::Path,
sync::{Condvar, Mutex},
time::{Duration, SystemTime},
};
+
#[cfg(test)]
use tests::random;
@@ -88,20 +102,18 @@
/// A metadata entry for key entries.
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
pub enum KeyMetaEntry {
- /// If present, indicates that the sensitive part of key
- /// is encrypted with another key or a key derived from a password.
- EncryptedBy(EncryptedBy) with accessor encrypted_by,
- /// If the blob is password encrypted this field is set to the
- /// salt used for the key derivation.
- Salt(Vec<u8>) with accessor salt,
- /// If the blob is encrypted, this field is set to the initialization vector.
- Iv(Vec<u8>) with accessor iv,
- /// If the blob is encrypted, this field holds the AEAD TAG.
- AeadTag(Vec<u8>) with accessor aead_tag,
- /// Creation date of a the key entry.
+ /// Date of the creation of the key entry.
CreationDate(DateTime) with accessor creation_date,
/// Expiration date for attestation keys.
AttestationExpirationDate(DateTime) with accessor attestation_expiration_date,
+ /// CBOR Blob that represents a COSE_Key and associated metadata needed for remote
+ /// provisioning
+ AttestationMacedPublicKey(Vec<u8>) with accessor attestation_maced_public_key,
+ /// Vector representing the raw public key so results from the server can be matched
+ /// to the right entry
+ AttestationRawPubKey(Vec<u8>) with accessor attestation_raw_pub_key,
+ /// SEC1 public key for ECDH encryption
+ Sec1PublicKey(Vec<u8>) with accessor sec1_public_key,
// --- ADD NEW META DATA FIELDS HERE ---
// For backwards compatibility add new entries only to
// end of this list and above this comment.
@@ -138,7 +150,7 @@
fn store_in_db(&self, key_id: i64, tx: &Transaction) -> Result<()> {
let mut stmt = tx
.prepare(
- "INSERT into persistent.keymetadata (keyentryid, tag, data)
+ "INSERT or REPLACE INTO persistent.keymetadata (keyentryid, tag, data)
VALUES (?, ?, ?);",
)
.context("In KeyMetaData::store_in_db: Failed to prepare statement.")?;
@@ -153,6 +165,78 @@
}
}
+impl_metadata!(
+ /// A set of metadata for key blobs.
+ #[derive(Debug, Default, Eq, PartialEq)]
+ pub struct BlobMetaData;
+ /// A metadata entry for key blobs.
+ #[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
+ pub enum BlobMetaEntry {
+ /// If present, indicates that the blob is encrypted with another key or a key derived
+ /// from a password.
+ EncryptedBy(EncryptedBy) with accessor encrypted_by,
+ /// If the blob is password encrypted this field is set to the
+ /// salt used for the key derivation.
+ Salt(Vec<u8>) with accessor salt,
+ /// If the blob is encrypted, this field is set to the initialization vector.
+ Iv(Vec<u8>) with accessor iv,
+ /// If the blob is encrypted, this field holds the AEAD TAG.
+ AeadTag(Vec<u8>) with accessor aead_tag,
+ /// The uuid of the owning KeyMint instance.
+ KmUuid(Uuid) with accessor km_uuid,
+ /// If the key is ECDH encrypted, this is the ephemeral public key
+ PublicKey(Vec<u8>) with accessor public_key,
+ // --- ADD NEW META DATA FIELDS HERE ---
+ // For backwards compatibility add new entries only to
+ // end of this list and above this comment.
+ };
+);
+
+impl BlobMetaData {
+ fn load_from_db(blob_id: i64, tx: &Transaction) -> Result<Self> {
+ let mut stmt = tx
+ .prepare(
+ "SELECT tag, data from persistent.blobmetadata
+ WHERE blobentryid = ?;",
+ )
+ .context("In BlobMetaData::load_from_db: prepare statement failed.")?;
+
+ let mut metadata: HashMap<i64, BlobMetaEntry> = Default::default();
+
+ let mut rows =
+ stmt.query(params![blob_id]).context("In BlobMetaData::load_from_db: query failed.")?;
+ db_utils::with_rows_extract_all(&mut rows, |row| {
+ let db_tag: i64 = row.get(0).context("Failed to read tag.")?;
+ metadata.insert(
+ db_tag,
+ BlobMetaEntry::new_from_sql(db_tag, &SqlField::new(1, &row))
+ .context("Failed to read BlobMetaEntry.")?,
+ );
+ Ok(())
+ })
+ .context("In BlobMetaData::load_from_db.")?;
+
+ Ok(Self { data: metadata })
+ }
+
+ fn store_in_db(&self, blob_id: i64, tx: &Transaction) -> Result<()> {
+ let mut stmt = tx
+ .prepare(
+ "INSERT or REPLACE INTO persistent.blobmetadata (blobentryid, tag, data)
+ VALUES (?, ?, ?);",
+ )
+ .context("In BlobMetaData::store_in_db: Failed to prepare statement.")?;
+
+ let iter = self.data.iter();
+ for (tag, entry) in iter {
+ stmt.insert(params![blob_id, tag, entry,]).with_context(|| {
+ format!("In BlobMetaData::store_in_db: Failed to insert {:?}", entry)
+ })?;
+ }
+ Ok(())
+ }
+}
+
/// Indicates the type of the keyentry.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub enum KeyType {
@@ -187,6 +271,50 @@
}
}
+/// Uuid representation that can be stored in the database.
+/// Right now it can only be initialized from SecurityLevel.
+/// Once KeyMint provides a UUID type a corresponding From impl shall be added.
+#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct Uuid([u8; 16]);
+
+impl Deref for Uuid {
+ type Target = [u8; 16];
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl From<SecurityLevel> for Uuid {
+ fn from(sec_level: SecurityLevel) -> Self {
+ Self((sec_level.0 as u128).to_be_bytes())
+ }
+}
+
+impl ToSql for Uuid {
+ fn to_sql(&self) -> rusqlite::Result<ToSqlOutput> {
+ self.0.to_sql()
+ }
+}
+
+impl FromSql for Uuid {
+ fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
+ let blob = Vec::<u8>::column_result(value)?;
+ if blob.len() != 16 {
+ return Err(FromSqlError::OutOfRange(blob.len() as i64));
+ }
+ let mut arr = [0u8; 16];
+ arr.copy_from_slice(&blob);
+ Ok(Self(arr))
+ }
+}
+
+/// Key entries that are not associated with any KeyMint instance, such as pure certificate
+/// entries are associated with this UUID.
+pub static KEYSTORE_UUID: Uuid = Uuid([
+ 0x41, 0xe3, 0xb9, 0xce, 0x27, 0x58, 0x4e, 0x91, 0xbc, 0xfd, 0xa5, 0x5d, 0x91, 0x85, 0xab, 0x11,
+]);
+
/// Indicates how the sensitive part of this key blob is encrypted.
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
pub enum EncryptedBy {
@@ -349,7 +477,7 @@
/// certificate chain components.
/// KeyEntryLoadBits is a bitmap that indicates to `KeystoreDB::load_key_entry`
/// which components shall be loaded from the database if present.
-#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
pub struct KeyEntryLoadBits(u32);
impl KeyEntryLoadBits {
@@ -435,6 +563,42 @@
}
}
+/// This type represents a certificate and certificate chain entry for a key.
+#[derive(Debug, Default)]
+pub struct CertificateInfo {
+ cert: Option<Vec<u8>>,
+ cert_chain: Option<Vec<u8>>,
+}
+
+impl CertificateInfo {
+ /// Constructs a new CertificateInfo object from `cert` and `cert_chain`
+ pub fn new(cert: Option<Vec<u8>>, cert_chain: Option<Vec<u8>>) -> Self {
+ Self { cert, cert_chain }
+ }
+
+ /// Take the cert
+ pub fn take_cert(&mut self) -> Option<Vec<u8>> {
+ self.cert.take()
+ }
+
+ /// Take the cert chain
+ pub fn take_cert_chain(&mut self) -> Option<Vec<u8>> {
+ self.cert_chain.take()
+ }
+}
+
+/// This type represents a certificate chain with a private key corresponding to the leaf
+/// certificate. TODO(jbires): This will be used in a follow-on CL, for now it's used in the tests.
+pub struct CertificateChain {
+ /// A KM key blob
+ pub private_key: ZVec,
+ /// A batch cert for private_key
+ pub batch_cert: Vec<u8>,
+ /// A full certificate chain from root signing authority to private_key, including batch_cert
+ /// for convenience.
+ pub cert_chain: Vec<u8>,
+}
+
/// This type represents a Keystore 2.0 key entry.
/// An entry has a unique `id` by which it can be found in the database.
/// It has a security level field, key parameters, and three optional fields
@@ -442,12 +606,13 @@
#[derive(Debug, Default, Eq, PartialEq)]
pub struct KeyEntry {
id: i64,
- km_blob: Option<Vec<u8>>,
+ key_blob_info: Option<(Vec<u8>, BlobMetaData)>,
cert: Option<Vec<u8>>,
cert_chain: Option<Vec<u8>>,
- sec_level: SecurityLevel,
+ km_uuid: Uuid,
parameters: Vec<KeyParameter>,
metadata: KeyMetaData,
+ pure_cert: bool,
}
impl KeyEntry {
@@ -456,12 +621,12 @@
self.id
}
/// Exposes the optional KeyMint blob.
- pub fn km_blob(&self) -> &Option<Vec<u8>> {
- &self.km_blob
+ pub fn key_blob_info(&self) -> &Option<(Vec<u8>, BlobMetaData)> {
+ &self.key_blob_info
}
- /// Extracts the Optional KeyMint blob.
- pub fn take_km_blob(&mut self) -> Option<Vec<u8>> {
- self.km_blob.take()
+ /// Extracts the Optional KeyMint blob including its metadata.
+ pub fn take_key_blob_info(&mut self) -> Option<(Vec<u8>, BlobMetaData)> {
+ self.key_blob_info.take()
}
/// Exposes the optional public certificate.
pub fn cert(&self) -> &Option<Vec<u8>> {
@@ -479,9 +644,9 @@
pub fn take_cert_chain(&mut self) -> Option<Vec<u8>> {
self.cert_chain.take()
}
- /// Returns the security level of the key entry.
- pub fn sec_level(&self) -> SecurityLevel {
- self.sec_level
+ /// Returns the uuid of the owning KeyMint instance.
+ pub fn km_uuid(&self) -> &Uuid {
+ &self.km_uuid
}
/// Exposes the key parameters of this key entry.
pub fn key_parameters(&self) -> &Vec<KeyParameter> {
@@ -495,10 +660,19 @@
pub fn metadata(&self) -> &KeyMetaData {
&self.metadata
}
+ /// This returns true if the entry is a pure certificate entry with no
+ /// private key component.
+ pub fn pure_cert(&self) -> bool {
+ self.pure_cert
+ }
+ /// Consumes this key entry and extracts the keyparameters and metadata from it.
+ pub fn into_key_parameters_and_metadata(self) -> (Vec<KeyParameter>, KeyMetaData) {
+ (self.parameters, self.metadata)
+ }
}
/// Indicates the sub component of a key entry for persistent storage.
-#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
pub struct SubComponentType(u32);
impl SubComponentType {
/// Persistent identifier for a key blob.
@@ -521,10 +695,39 @@
}
}
+/// This trait is private to the database module. It is used to convey whether or not the garbage
+/// collector shall be invoked after a database access. All closures passed to
+/// `KeystoreDB::with_transaction` return a tuple (bool, T) where the bool indicates if the
+/// gc needs to be triggered. This convenience function allows to turn any anyhow::Result<T>
+/// into anyhow::Result<(bool, T)> by simply appending one of `.do_gc(bool)`, `.no_gc()`, or
+/// `.need_gc()`.
+trait DoGc<T> {
+ fn do_gc(self, need_gc: bool) -> Result<(bool, T)>;
+
+ fn no_gc(self) -> Result<(bool, T)>;
+
+ fn need_gc(self) -> Result<(bool, T)>;
+}
+
+impl<T> DoGc<T> for Result<T> {
+ fn do_gc(self, need_gc: bool) -> Result<(bool, T)> {
+ self.map(|r| (need_gc, r))
+ }
+
+ fn no_gc(self) -> Result<(bool, T)> {
+ self.do_gc(false)
+ }
+
+ fn need_gc(self) -> Result<(bool, T)> {
+ self.do_gc(true)
+ }
+}
+
/// KeystoreDB wraps a connection to an SQLite database and tracks its
/// ownership. It also implements all of Keystore 2.0's database functionality.
pub struct KeystoreDB {
conn: Connection,
+ gc: Option<Gc>,
}
/// Database representation of the monotonic time retrieved from the system call clock_gettime with
@@ -538,11 +741,21 @@
Self(get_current_time_in_seconds())
}
+ /// Constructs a new MonotonicRawTime from a given number of seconds.
+ pub fn from_secs(val: i64) -> Self {
+ Self(val)
+ }
+
/// Returns the integer value of MonotonicRawTime as i64
pub fn seconds(&self) -> i64 {
self.0
}
+ /// Returns the value of MonotonicRawTime in milli seconds as i64
+ pub fn milli_seconds(&self) -> i64 {
+ self.0 * 1000
+ }
+
/// Like i64::checked_sub.
pub fn checked_sub(&self, other: &Self) -> Option<Self> {
self.0.checked_sub(other.0).map(Self)
@@ -595,47 +808,87 @@
pub fn time_received(&self) -> MonotonicRawTime {
self.time_received
}
+
+ /// Returns the challenge value of the auth token.
+ pub fn challenge(&self) -> i64 {
+ self.auth_token.challenge
+ }
}
+/// Shared in-memory databases get destroyed as soon as the last connection to them gets closed.
+/// This object does not allow access to the database connection. But it keeps a database
+/// connection alive in order to keep the in memory per boot database alive.
+pub struct PerBootDbKeepAlive(Connection);
+
impl KeystoreDB {
+ const UNASSIGNED_KEY_ID: i64 = -1i64;
+ const PERBOOT_DB_FILE_NAME: &'static str = &"file:perboot.sqlite?mode=memory&cache=shared";
+
+ /// This creates a PerBootDbKeepAlive object to keep the per boot database alive.
+ pub fn keep_perboot_db_alive() -> Result<PerBootDbKeepAlive> {
+ let conn = Connection::open_in_memory()
+ .context("In keep_perboot_db_alive: Failed to initialize SQLite connection.")?;
+
+ conn.execute("ATTACH DATABASE ? as perboot;", params![Self::PERBOOT_DB_FILE_NAME])
+ .context("In keep_perboot_db_alive: Failed to attach database perboot.")?;
+ Ok(PerBootDbKeepAlive(conn))
+ }
+
/// This will create a new database connection connecting the two
/// files persistent.sqlite and perboot.sqlite in the given directory.
/// It also attempts to initialize all of the tables.
/// KeystoreDB cannot be used by multiple threads.
/// Each thread should open their own connection using `thread_local!`.
- pub fn new(db_root: &Path) -> Result<Self> {
- // Build the path to the sqlite files.
+ pub fn new(db_root: &Path, gc: Option<Gc>) -> Result<Self> {
+ // Build the path to the sqlite file.
let mut persistent_path = db_root.to_path_buf();
persistent_path.push("persistent.sqlite");
- let mut perboot_path = db_root.to_path_buf();
- perboot_path.push("perboot.sqlite");
// Now convert them to strings prefixed with "file:"
let mut persistent_path_str = "file:".to_owned();
persistent_path_str.push_str(&persistent_path.to_string_lossy());
- let mut perboot_path_str = "file:".to_owned();
- perboot_path_str.push_str(&perboot_path.to_string_lossy());
- let conn = Self::make_connection(&persistent_path_str, &perboot_path_str)?;
+ let conn = Self::make_connection(&persistent_path_str, &Self::PERBOOT_DB_FILE_NAME)?;
- Self::init_tables(&conn)?;
- Ok(Self { conn })
+ // On busy fail Immediately. It is unlikely to succeed given a bug in sqlite.
+ conn.busy_handler(None).context("In KeystoreDB::new: Failed to set busy handler.")?;
+
+ let mut db = Self { conn, gc };
+ db.with_transaction(TransactionBehavior::Immediate, |tx| {
+ Self::init_tables(tx).context("Trying to initialize tables.").no_gc()
+ })?;
+ Ok(db)
}
- fn init_tables(conn: &Connection) -> Result<()> {
- conn.execute(
+ fn init_tables(tx: &Transaction) -> Result<()> {
+ tx.execute(
"CREATE TABLE IF NOT EXISTS persistent.keyentry (
id INTEGER UNIQUE,
key_type INTEGER,
domain INTEGER,
namespace INTEGER,
alias BLOB,
- state INTEGER);",
+ state INTEGER,
+ km_uuid BLOB);",
NO_PARAMS,
)
.context("Failed to initialize \"keyentry\" table.")?;
- conn.execute(
+ tx.execute(
+ "CREATE INDEX IF NOT EXISTS persistent.keyentry_id_index
+ ON keyentry(id);",
+ NO_PARAMS,
+ )
+ .context("Failed to create index keyentry_id_index.")?;
+
+ tx.execute(
+ "CREATE INDEX IF NOT EXISTS persistent.keyentry_domain_namespace_index
+ ON keyentry(domain, namespace, alias);",
+ NO_PARAMS,
+ )
+ .context("Failed to create index keyentry_domain_namespace_index.")?;
+
+ tx.execute(
"CREATE TABLE IF NOT EXISTS persistent.blobentry (
id INTEGER PRIMARY KEY,
subcomponent_type INTEGER,
@@ -645,7 +898,32 @@
)
.context("Failed to initialize \"blobentry\" table.")?;
- conn.execute(
+ tx.execute(
+ "CREATE INDEX IF NOT EXISTS persistent.blobentry_keyentryid_index
+ ON blobentry(keyentryid);",
+ NO_PARAMS,
+ )
+ .context("Failed to create index blobentry_keyentryid_index.")?;
+
+ tx.execute(
+ "CREATE TABLE IF NOT EXISTS persistent.blobmetadata (
+ id INTEGER PRIMARY KEY,
+ blobentryid INTEGER,
+ tag INTEGER,
+ data ANY,
+ UNIQUE (blobentryid, tag));",
+ NO_PARAMS,
+ )
+ .context("Failed to initialize \"blobmetadata\" table.")?;
+
+ tx.execute(
+ "CREATE INDEX IF NOT EXISTS persistent.blobmetadata_blobentryid_index
+ ON blobmetadata(blobentryid);",
+ NO_PARAMS,
+ )
+ .context("Failed to create index blobmetadata_blobentryid_index.")?;
+
+ tx.execute(
"CREATE TABLE IF NOT EXISTS persistent.keyparameter (
keyentryid INTEGER,
tag INTEGER,
@@ -655,16 +933,31 @@
)
.context("Failed to initialize \"keyparameter\" table.")?;
- conn.execute(
+ tx.execute(
+ "CREATE INDEX IF NOT EXISTS persistent.keyparameter_keyentryid_index
+ ON keyparameter(keyentryid);",
+ NO_PARAMS,
+ )
+ .context("Failed to create index keyparameter_keyentryid_index.")?;
+
+ tx.execute(
"CREATE TABLE IF NOT EXISTS persistent.keymetadata (
keyentryid INTEGER,
tag INTEGER,
- data ANY);",
+ data ANY,
+ UNIQUE (keyentryid, tag));",
NO_PARAMS,
)
.context("Failed to initialize \"keymetadata\" table.")?;
- conn.execute(
+ tx.execute(
+ "CREATE INDEX IF NOT EXISTS persistent.keymetadata_keyentryid_index
+ ON keymetadata(keyentryid);",
+ NO_PARAMS,
+ )
+ .context("Failed to create index keymetadata_keyentryid_index.")?;
+
+ tx.execute(
"CREATE TABLE IF NOT EXISTS persistent.grant (
id INTEGER UNIQUE,
grantee INTEGER,
@@ -676,9 +969,9 @@
//TODO: only drop the following two perboot tables if this is the first start up
//during the boot (b/175716626).
- // conn.execute("DROP TABLE IF EXISTS perboot.authtoken;", NO_PARAMS)
+ // tx.execute("DROP TABLE IF EXISTS perboot.authtoken;", NO_PARAMS)
// .context("Failed to drop perboot.authtoken table")?;
- conn.execute(
+ tx.execute(
"CREATE TABLE IF NOT EXISTS perboot.authtoken (
id INTEGER PRIMARY KEY,
challenge INTEGER,
@@ -693,11 +986,11 @@
)
.context("Failed to initialize \"authtoken\" table.")?;
- // conn.execute("DROP TABLE IF EXISTS perboot.metadata;", NO_PARAMS)
+ // tx.execute("DROP TABLE IF EXISTS perboot.metadata;", NO_PARAMS)
// .context("Failed to drop perboot.metadata table")?;
// metadata table stores certain miscellaneous information required for keystore functioning
// during a boot cycle, as key-value pairs.
- conn.execute(
+ tx.execute(
"CREATE TABLE IF NOT EXISTS perboot.metadata (
key TEXT,
value BLOB,
@@ -712,85 +1005,105 @@
let conn =
Connection::open_in_memory().context("Failed to initialize SQLite connection.")?;
- conn.execute("ATTACH DATABASE ? as persistent;", params![persistent_file])
- .context("Failed to attach database persistent.")?;
- conn.execute("ATTACH DATABASE ? as perboot;", params![perboot_file])
- .context("Failed to attach database perboot.")?;
+ loop {
+ if let Err(e) = conn
+ .execute("ATTACH DATABASE ? as persistent;", params![persistent_file])
+ .context("Failed to attach database persistent.")
+ {
+ if Self::is_locked_error(&e) {
+ std::thread::sleep(std::time::Duration::from_micros(500));
+ continue;
+ } else {
+ return Err(e);
+ }
+ }
+ break;
+ }
+ loop {
+ if let Err(e) = conn
+ .execute("ATTACH DATABASE ? as perboot;", params![perboot_file])
+ .context("Failed to attach database perboot.")
+ {
+ if Self::is_locked_error(&e) {
+ std::thread::sleep(std::time::Duration::from_micros(500));
+ continue;
+ } else {
+ return Err(e);
+ }
+ }
+ break;
+ }
Ok(conn)
}
- /// Get one unreferenced key. There is no particular order in which the keys are returned.
- fn get_unreferenced_key_id(tx: &Transaction) -> Result<Option<i64>> {
- tx.query_row(
- "SELECT id FROM persistent.keyentry WHERE state = ?",
- params![KeyLifeCycle::Unreferenced],
- |row| row.get(0),
- )
- .optional()
- .context("In get_unreferenced_key_id: Trying to get unreferenced key id.")
- }
-
- /// Returns a key id guard and key entry for one unreferenced key entry. Of the optional
- /// fields of the key entry only the km_blob field will be populated. This is required
- /// to subject the blob to its KeyMint instance for deletion.
- pub fn get_unreferenced_key(&mut self) -> Result<Option<(KeyIdGuard, KeyEntry)>> {
- self.with_transaction(TransactionBehavior::Deferred, |tx| {
- let key_id = match Self::get_unreferenced_key_id(tx)
- .context("Trying to get unreferenced key id")?
- {
- None => return Ok(None),
- Some(id) => KEY_ID_LOCK.try_get(id).ok_or_else(KsError::sys).context(concat!(
- "A key id lock was held for an unreferenced key. ",
- "This should never happen."
- ))?,
- };
- let key_entry = Self::load_key_components(tx, KeyEntryLoadBits::KM, key_id.id())
- .context("Trying to get key components.")?;
- Ok(Some((key_id, key_entry)))
- })
- .context("In get_unreferenced_key.")
- }
-
- /// This function purges all remnants of a key entry from the database.
- /// Important: This does not check if the key was unreferenced, nor does it
- /// subject the key to its KeyMint instance for permanent invalidation.
- /// This function should only be called by the garbage collector.
- /// To delete a key call `mark_unreferenced`, which transitions the key to the unreferenced
- /// state, deletes all grants to the key, and notifies the garbage collector.
- /// The garbage collector will:
- /// 1. Call get_unreferenced_key.
- /// 2. Determine the proper way to dispose of sensitive key material, e.g., call
- /// `KeyMintDevice::delete()`.
- /// 3. Call `purge_key_entry`.
- pub fn purge_key_entry(&mut self, key_id: KeyIdGuard) -> Result<()> {
+ /// This function is intended to be used by the garbage collector.
+ /// It deletes the blob given by `blob_id_to_delete`. It then tries to find a superseded
+ /// key blob that might need special handling by the garbage collector.
+ /// If no further superseded blobs can be found it deletes all other superseded blobs that don't
+ /// need special handling and returns None.
+ pub fn handle_next_superseded_blob(
+ &mut self,
+ blob_id_to_delete: Option<i64>,
+ ) -> Result<Option<(i64, Vec<u8>, BlobMetaData)>> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- tx.execute("DELETE FROM persistent.keyentry WHERE id = ?;", params![key_id.id()])
- .context("Trying to delete keyentry.")?;
- tx.execute(
- "DELETE FROM persistent.blobentry WHERE keyentryid = ?;",
- params![key_id.id()],
- )
- .context("Trying to delete blobentries.")?;
- tx.execute(
- "DELETE FROM persistent.keymetadata WHERE keyentryid = ?;",
- params![key_id.id()],
- )
- .context("Trying to delete keymetadata.")?;
- tx.execute(
- "DELETE FROM persistent.keyparameter WHERE keyentryid = ?;",
- params![key_id.id()],
- )
- .context("Trying to delete keyparameters.")?;
- let grants_deleted = tx
- .execute("DELETE FROM persistent.grant WHERE keyentryid = ?;", params![key_id.id()])
- .context("Trying to delete grants.")?;
- if grants_deleted != 0 {
- log::error!("Purged key that still had grants. This should not happen.");
+ // Delete the given blob if one was given.
+ if let Some(blob_id_to_delete) = blob_id_to_delete {
+ tx.execute(
+ "DELETE FROM persistent.blobmetadata WHERE blobentryid = ?;",
+ params![blob_id_to_delete],
+ )
+ .context("Trying to delete blob metadata.")?;
+ tx.execute(
+ "DELETE FROM persistent.blobentry WHERE id = ?;",
+ params![blob_id_to_delete],
+ )
+ .context("Trying to blob.")?;
}
- Ok(())
+
+ // Find another superseded keyblob load its metadata and return it.
+ if let Some((blob_id, blob)) = tx
+ .query_row(
+ "SELECT id, blob FROM persistent.blobentry
+ WHERE subcomponent_type = ?
+ AND (
+ id NOT IN (
+ SELECT MAX(id) FROM persistent.blobentry
+ WHERE subcomponent_type = ?
+ GROUP BY keyentryid, subcomponent_type
+ )
+ OR keyentryid NOT IN (SELECT id FROM persistent.keyentry)
+ );",
+ params![SubComponentType::KEY_BLOB, SubComponentType::KEY_BLOB],
+ |row| Ok((row.get(0)?, row.get(1)?)),
+ )
+ .optional()
+ .context("Trying to query superseded blob.")?
+ {
+ let blob_metadata = BlobMetaData::load_from_db(blob_id, tx)
+ .context("Trying to load blob metadata.")?;
+ return Ok(Some((blob_id, blob, blob_metadata))).no_gc();
+ }
+
+ // We did not find any superseded key blob, so let's remove other superseded blob in
+ // one transaction.
+ tx.execute(
+ "DELETE FROM persistent.blobentry
+ WHERE NOT subcomponent_type = ?
+ AND (
+ id NOT IN (
+ SELECT MAX(id) FROM persistent.blobentry
+ WHERE NOT subcomponent_type = ?
+ GROUP BY keyentryid, subcomponent_type
+ ) OR keyentryid NOT IN (SELECT id FROM persistent.keyentry)
+ );",
+ params![SubComponentType::KEY_BLOB, SubComponentType::KEY_BLOB],
+ )
+ .context("Trying to purge superseded blobs.")?;
+
+ Ok(None).no_gc()
})
- .context("In purge_key_entry.")
+ .context("In handle_next_superseded_blob.")
}
/// This maintenance function should be called only once before the database is used for the
@@ -802,12 +1115,115 @@
/// Unlike with `mark_unreferenced`, we don't need to purge grants, because only keys that made
/// it to `KeyLifeCycle::Live` may have grants.
pub fn cleanup_leftovers(&mut self) -> Result<usize> {
- self.conn
- .execute(
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
"UPDATE persistent.keyentry SET state = ? WHERE state = ?;",
params![KeyLifeCycle::Unreferenced, KeyLifeCycle::Existing],
)
- .context("In cleanup_leftovers.")
+ .context("Failed to execute query.")
+ .need_gc()
+ })
+ .context("In cleanup_leftovers.")
+ }
+
+ /// Checks if a key exists with given key type and key descriptor properties.
+ pub fn key_exists(
+ &mut self,
+ domain: Domain,
+ nspace: i64,
+ alias: &str,
+ key_type: KeyType,
+ ) -> Result<bool> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let key_descriptor =
+ KeyDescriptor { domain, nspace, alias: Some(alias.to_string()), blob: None };
+ let result = Self::load_key_entry_id(&tx, &key_descriptor, key_type);
+ match result {
+ Ok(_) => Ok(true),
+ Err(error) => match error.root_cause().downcast_ref::<KsError>() {
+ Some(KsError::Rc(ResponseCode::KEY_NOT_FOUND)) => Ok(false),
+ _ => Err(error).context("In key_exists: Failed to find if the key exists."),
+ },
+ }
+ .no_gc()
+ })
+ .context("In key_exists.")
+ }
+
+ /// Stores a super key in the database.
+ pub fn store_super_key(
+ &mut self,
+ user_id: u32,
+ key_type: &SuperKeyType,
+ blob: &[u8],
+ blob_metadata: &BlobMetaData,
+ key_metadata: &KeyMetaData,
+ ) -> Result<KeyEntry> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let key_id = Self::insert_with_retry(|id| {
+ tx.execute(
+ "INSERT into persistent.keyentry
+ (id, key_type, domain, namespace, alias, state, km_uuid)
+ VALUES(?, ?, ?, ?, ?, ?, ?);",
+ params![
+ id,
+ KeyType::Super,
+ Domain::APP.0,
+ user_id as i64,
+ key_type.alias,
+ KeyLifeCycle::Live,
+ &KEYSTORE_UUID,
+ ],
+ )
+ })
+ .context("Failed to insert into keyentry table.")?;
+
+ key_metadata.store_in_db(key_id, tx).context("KeyMetaData::store_in_db failed")?;
+
+ Self::set_blob_internal(
+ &tx,
+ key_id,
+ SubComponentType::KEY_BLOB,
+ Some(blob),
+ Some(blob_metadata),
+ )
+ .context("Failed to store key blob.")?;
+
+ Self::load_key_components(tx, KeyEntryLoadBits::KM, key_id)
+ .context("Trying to load key components.")
+ .no_gc()
+ })
+ .context("In store_super_key.")
+ }
+
+ /// Loads super key of a given user, if exists
+ pub fn load_super_key(
+ &mut self,
+ key_type: &SuperKeyType,
+ user_id: u32,
+ ) -> Result<Option<(KeyIdGuard, KeyEntry)>> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let key_descriptor = KeyDescriptor {
+ domain: Domain::APP,
+ nspace: user_id as i64,
+ alias: Some(key_type.alias.into()),
+ blob: None,
+ };
+ let id = Self::load_key_entry_id(&tx, &key_descriptor, KeyType::Super);
+ match id {
+ Ok(id) => {
+ let key_entry = Self::load_key_components(&tx, KeyEntryLoadBits::KM, id)
+ .context("In load_super_key. Failed to load key entry.")?;
+ Ok(Some((KEY_ID_LOCK.get(id), key_entry)))
+ }
+ Err(error) => match error.root_cause().downcast_ref::<KsError>() {
+ Some(KsError::Rc(ResponseCode::KEY_NOT_FOUND)) => Ok(None),
+ _ => Err(error).context("In load_super_key."),
+ },
+ }
+ .no_gc()
+ })
+ .context("In load_super_key.")
}
/// Atomically loads a key entry and associated metadata or creates it using the
@@ -819,105 +1235,173 @@
domain: Domain,
namespace: i64,
alias: &str,
+ km_uuid: Uuid,
create_new_key: F,
) -> Result<(KeyIdGuard, KeyEntry)>
where
- F: FnOnce() -> Result<(Vec<u8>, KeyMetaData)>,
+ F: Fn() -> Result<(Vec<u8>, BlobMetaData)>,
{
- let tx = self
- .conn
- .transaction_with_behavior(TransactionBehavior::Immediate)
- .context("In get_or_create_key_with: Failed to initialize transaction.")?;
-
- let id = {
- let mut stmt = tx
- .prepare(
- "SELECT id FROM persistent.keyentry
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let id = {
+ let mut stmt = tx
+ .prepare(
+ "SELECT id FROM persistent.keyentry
WHERE
key_type = ?
AND domain = ?
AND namespace = ?
AND alias = ?
AND state = ?;",
- )
- .context("In get_or_create_key_with: Failed to select from keyentry table.")?;
- let mut rows = stmt
- .query(params![KeyType::Super, domain.0, namespace, alias, KeyLifeCycle::Live])
- .context("In get_or_create_key_with: Failed to query from keyentry table.")?;
-
- db_utils::with_rows_extract_one(&mut rows, |row| {
- Ok(match row {
- Some(r) => r.get(0).context("Failed to unpack id.")?,
- None => None,
- })
- })
- .context("In get_or_create_key_with.")?
- };
-
- let (id, entry) = match id {
- Some(id) => (
- id,
- Self::load_key_components(&tx, KeyEntryLoadBits::KM, id)
- .context("In get_or_create_key_with.")?,
- ),
-
- None => {
- let id = Self::insert_with_retry(|id| {
- tx.execute(
- "INSERT into persistent.keyentry
- (id, key_type, domain, namespace, alias, state)
- VALUES(?, ?, ?, ?, ?, ?);",
- params![id, KeyType::Super, domain.0, namespace, alias, KeyLifeCycle::Live],
)
- })
- .context("In get_or_create_key_with.")?;
+ .context("In get_or_create_key_with: Failed to select from keyentry table.")?;
+ let mut rows = stmt
+ .query(params![KeyType::Super, domain.0, namespace, alias, KeyLifeCycle::Live])
+ .context("In get_or_create_key_with: Failed to query from keyentry table.")?;
- let (blob, metadata) = create_new_key().context("In get_or_create_key_with.")?;
- Self::insert_blob_internal(&tx, id, SubComponentType::KEY_BLOB, &blob)
- .context("In get_of_create_key_with.")?;
- metadata.store_in_db(id, &tx).context("In get_or_create_key_with.")?;
- (id, KeyEntry { id, km_blob: Some(blob), metadata, ..Default::default() })
- }
- };
- tx.commit().context("In get_or_create_key_with: Failed to commit transaction.")?;
- Ok((KEY_ID_LOCK.get(id), entry))
+ db_utils::with_rows_extract_one(&mut rows, |row| {
+ Ok(match row {
+ Some(r) => r.get(0).context("Failed to unpack id.")?,
+ None => None,
+ })
+ })
+ .context("In get_or_create_key_with.")?
+ };
+
+ let (id, entry) = match id {
+ Some(id) => (
+ id,
+ Self::load_key_components(&tx, KeyEntryLoadBits::KM, id)
+ .context("In get_or_create_key_with.")?,
+ ),
+
+ None => {
+ let id = Self::insert_with_retry(|id| {
+ tx.execute(
+ "INSERT into persistent.keyentry
+ (id, key_type, domain, namespace, alias, state, km_uuid)
+ VALUES(?, ?, ?, ?, ?, ?, ?);",
+ params![
+ id,
+ KeyType::Super,
+ domain.0,
+ namespace,
+ alias,
+ KeyLifeCycle::Live,
+ km_uuid,
+ ],
+ )
+ })
+ .context("In get_or_create_key_with.")?;
+
+ let (blob, metadata) =
+ create_new_key().context("In get_or_create_key_with.")?;
+ Self::set_blob_internal(
+ &tx,
+ id,
+ SubComponentType::KEY_BLOB,
+ Some(&blob),
+ Some(&metadata),
+ )
+ .context("In get_or_create_key_with.")?;
+ (
+ id,
+ KeyEntry {
+ id,
+ key_blob_info: Some((blob, metadata)),
+ pure_cert: false,
+ ..Default::default()
+ },
+ )
+ }
+ };
+ Ok((KEY_ID_LOCK.get(id), entry)).no_gc()
+ })
+ .context("In get_or_create_key_with.")
}
+ /// SQLite3 seems to hold a shared mutex while running the busy handler when
+ /// waiting for the database file to become available. This makes it
+ /// impossible to successfully recover from a locked database when the
+ /// transaction holding the device busy is in the same process on a
+ /// different connection. As a result the busy handler has to time out and
+ /// fail in order to make progress.
+ ///
+ /// Instead, we set the busy handler to None (return immediately). And catch
+ /// Busy and Locked errors (the latter occur on in memory databases with
+ /// shared cache, e.g., the per-boot database.) and restart the transaction
+ /// after a grace period of half a millisecond.
+ ///
/// Creates a transaction with the given behavior and executes f with the new transaction.
- /// The transaction is committed only if f returns Ok.
+ /// The transaction is committed only if f returns Ok and retried if DatabaseBusy
+ /// or DatabaseLocked is encountered.
fn with_transaction<T, F>(&mut self, behavior: TransactionBehavior, f: F) -> Result<T>
where
- F: FnOnce(&Transaction) -> Result<T>,
+ F: Fn(&Transaction) -> Result<(bool, T)>,
{
- let tx = self
- .conn
- .transaction_with_behavior(behavior)
- .context("In with_transaction: Failed to initialize transaction.")?;
- f(&tx).and_then(|result| {
- tx.commit().context("In with_transaction: Failed to commit transaction.")?;
- Ok(result)
+ loop {
+ match self
+ .conn
+ .transaction_with_behavior(behavior)
+ .context("In with_transaction.")
+ .and_then(|tx| f(&tx).map(|result| (result, tx)))
+ .and_then(|(result, tx)| {
+ tx.commit().context("In with_transaction: Failed to commit transaction.")?;
+ Ok(result)
+ }) {
+ Ok(result) => break Ok(result),
+ Err(e) => {
+ if Self::is_locked_error(&e) {
+ std::thread::sleep(std::time::Duration::from_micros(500));
+ continue;
+ } else {
+ return Err(e).context("In with_transaction.");
+ }
+ }
+ }
+ }
+ .map(|(need_gc, result)| {
+ if need_gc {
+ if let Some(ref gc) = self.gc {
+ gc.notify_gc();
+ }
+ }
+ result
})
}
+ fn is_locked_error(e: &anyhow::Error) -> bool {
+ matches!(
+ e.root_cause().downcast_ref::<rusqlite::ffi::Error>(),
+ Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseBusy, .. })
+ | Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseLocked, .. })
+ )
+ }
+
/// Creates a new key entry and allocates a new randomized id for the new key.
/// The key id gets associated with a domain and namespace but not with an alias.
/// To complete key generation `rebind_alias` should be called after all of the
/// key artifacts, i.e., blobs and parameters have been associated with the new
/// key id. Finalizing with `rebind_alias` makes the creation of a new key entry
/// atomic even if key generation is not.
- pub fn create_key_entry(&mut self, domain: Domain, namespace: i64) -> Result<KeyIdGuard> {
+ pub fn create_key_entry(
+ &mut self,
+ domain: &Domain,
+ namespace: &i64,
+ km_uuid: &Uuid,
+ ) -> Result<KeyIdGuard> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- Self::create_key_entry_internal(tx, domain, namespace)
+ Self::create_key_entry_internal(tx, domain, namespace, km_uuid).no_gc()
})
.context("In create_key_entry.")
}
fn create_key_entry_internal(
tx: &Transaction,
- domain: Domain,
- namespace: i64,
+ domain: &Domain,
+ namespace: &i64,
+ km_uuid: &Uuid,
) -> Result<KeyIdGuard> {
- match domain {
+ match *domain {
Domain::APP | Domain::SELINUX => {}
_ => {
return Err(KsError::sys())
@@ -928,14 +1412,15 @@
Self::insert_with_retry(|id| {
tx.execute(
"INSERT into persistent.keyentry
- (id, key_type, domain, namespace, alias, state)
- VALUES(?, ?, ?, ?, NULL, ?);",
+ (id, key_type, domain, namespace, alias, state, km_uuid)
+ VALUES(?, ?, ?, ?, NULL, ?, ?);",
params![
id,
KeyType::Client,
domain.0 as u32,
- namespace,
- KeyLifeCycle::Existing
+ *namespace,
+ KeyLifeCycle::Existing,
+ km_uuid,
],
)
})
@@ -943,56 +1428,140 @@
))
}
- /// Inserts a new blob and associates it with the given key id. Each blob
- /// has a sub component type and a security level.
+ /// Creates a new attestation key entry and allocates a new randomized id for the new key.
+ /// The key id gets associated with a domain and namespace later but not with an alias. The
+ /// alias will be used to denote if a key has been signed as each key can only be bound to one
+ /// domain and namespace pairing so there is no need to use them as a value for indexing into
+ /// a key.
+ pub fn create_attestation_key_entry(
+ &mut self,
+ maced_public_key: &[u8],
+ raw_public_key: &[u8],
+ private_key: &[u8],
+ km_uuid: &Uuid,
+ ) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let key_id = KEY_ID_LOCK.get(
+ Self::insert_with_retry(|id| {
+ tx.execute(
+ "INSERT into persistent.keyentry
+ (id, key_type, domain, namespace, alias, state, km_uuid)
+ VALUES(?, ?, NULL, NULL, NULL, ?, ?);",
+ params![id, KeyType::Attestation, KeyLifeCycle::Live, km_uuid],
+ )
+ })
+ .context("In create_key_entry")?,
+ );
+ Self::set_blob_internal(
+ &tx,
+ key_id.0,
+ SubComponentType::KEY_BLOB,
+ Some(private_key),
+ None,
+ )?;
+ let mut metadata = KeyMetaData::new();
+ metadata.add(KeyMetaEntry::AttestationMacedPublicKey(maced_public_key.to_vec()));
+ metadata.add(KeyMetaEntry::AttestationRawPubKey(raw_public_key.to_vec()));
+ metadata.store_in_db(key_id.0, &tx)?;
+ Ok(()).no_gc()
+ })
+ .context("In create_attestation_key_entry")
+ }
+
+ /// Set a new blob and associates it with the given key id. Each blob
+ /// has a sub component type.
/// Each key can have one of each sub component type associated. If more
/// are added only the most recent can be retrieved, and superseded blobs
- /// will get garbage collected. The security level field of components
- /// other than `SubComponentType::KEY_BLOB` are ignored.
- pub fn insert_blob(
+ /// will get garbage collected.
+ /// Components SubComponentType::CERT and SubComponentType::CERT_CHAIN can be
+ /// removed by setting blob to None.
+ pub fn set_blob(
&mut self,
key_id: &KeyIdGuard,
sc_type: SubComponentType,
- blob: &[u8],
+ blob: Option<&[u8]>,
+ blob_metadata: Option<&BlobMetaData>,
) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- Self::insert_blob_internal(&tx, key_id.0, sc_type, blob)
+ Self::set_blob_internal(&tx, key_id.0, sc_type, blob, blob_metadata).need_gc()
})
- .context("In insert_blob.")
+ .context("In set_blob.")
}
- fn insert_blob_internal(
+ /// Why would we insert a deleted blob? This weird function is for the purpose of legacy
+ /// key migration in the case where we bulk delete all the keys of an app or even a user.
+ /// We use this to insert key blobs into the database which can then be garbage collected
+ /// lazily by the key garbage collector.
+ pub fn set_deleted_blob(&mut self, blob: &[u8], blob_metadata: &BlobMetaData) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ Self::set_blob_internal(
+ &tx,
+ Self::UNASSIGNED_KEY_ID,
+ SubComponentType::KEY_BLOB,
+ Some(blob),
+ Some(blob_metadata),
+ )
+ .need_gc()
+ })
+ .context("In set_deleted_blob.")
+ }
+
+ fn set_blob_internal(
tx: &Transaction,
key_id: i64,
sc_type: SubComponentType,
- blob: &[u8],
+ blob: Option<&[u8]>,
+ blob_metadata: Option<&BlobMetaData>,
) -> Result<()> {
- tx.execute(
- "INSERT into persistent.blobentry (subcomponent_type, keyentryid, blob)
- VALUES (?, ?, ?);",
- params![sc_type, key_id, blob],
- )
- .context("In insert_blob_internal: Failed to insert blob.")?;
+ match (blob, sc_type) {
+ (Some(blob), _) => {
+ tx.execute(
+ "INSERT INTO persistent.blobentry
+ (subcomponent_type, keyentryid, blob) VALUES (?, ?, ?);",
+ params![sc_type, key_id, blob],
+ )
+ .context("In set_blob_internal: Failed to insert blob.")?;
+ if let Some(blob_metadata) = blob_metadata {
+ let blob_id = tx
+ .query_row("SELECT MAX(id) FROM persistent.blobentry;", NO_PARAMS, |row| {
+ row.get(0)
+ })
+ .context("In set_blob_internal: Failed to get new blob id.")?;
+ blob_metadata
+ .store_in_db(blob_id, tx)
+ .context("In set_blob_internal: Trying to store blob metadata.")?;
+ }
+ }
+ (None, SubComponentType::CERT) | (None, SubComponentType::CERT_CHAIN) => {
+ tx.execute(
+ "DELETE FROM persistent.blobentry
+ WHERE subcomponent_type = ? AND keyentryid = ?;",
+ params![sc_type, key_id],
+ )
+ .context("In set_blob_internal: Failed to delete blob.")?;
+ }
+ (None, _) => {
+ return Err(KsError::sys())
+ .context("In set_blob_internal: Other blobs cannot be deleted in this way.");
+ }
+ }
Ok(())
}
/// Inserts a collection of key parameters into the `persistent.keyparameter` table
/// and associates them with the given `key_id`.
- pub fn insert_keyparameter<'a>(
- &mut self,
- key_id: &KeyIdGuard,
- params: impl IntoIterator<Item = &'a KeyParameter>,
- ) -> Result<()> {
+ #[cfg(test)]
+ fn insert_keyparameter(&mut self, key_id: &KeyIdGuard, params: &[KeyParameter]) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- Self::insert_keyparameter_internal(tx, key_id, params)
+ Self::insert_keyparameter_internal(tx, key_id, params).no_gc()
})
.context("In insert_keyparameter.")
}
- fn insert_keyparameter_internal<'a>(
+ fn insert_keyparameter_internal(
tx: &Transaction,
key_id: &KeyIdGuard,
- params: impl IntoIterator<Item = &'a KeyParameter>,
+ params: &[KeyParameter],
) -> Result<()> {
let mut stmt = tx
.prepare(
@@ -1001,8 +1570,7 @@
)
.context("In insert_keyparameter_internal: Failed to prepare statement.")?;
- let iter = params.into_iter();
- for p in iter {
+ for p in params.iter() {
stmt.insert(params![
key_id.0,
p.get_tag().0,
@@ -1017,17 +1585,397 @@
}
/// Insert a set of key entry specific metadata into the database.
- pub fn insert_key_metadata(
- &mut self,
- key_id: &KeyIdGuard,
- metadata: &KeyMetaData,
- ) -> Result<()> {
+ #[cfg(test)]
+ fn insert_key_metadata(&mut self, key_id: &KeyIdGuard, metadata: &KeyMetaData) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- metadata.store_in_db(key_id.0, &tx)
+ metadata.store_in_db(key_id.0, &tx).no_gc()
})
.context("In insert_key_metadata.")
}
+ /// Stores a signed certificate chain signed by a remote provisioning server, keyed
+ /// on the public key.
+ pub fn store_signed_attestation_certificate_chain(
+ &mut self,
+ raw_public_key: &[u8],
+ batch_cert: &[u8],
+ cert_chain: &[u8],
+ expiration_date: i64,
+ km_uuid: &Uuid,
+ ) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let mut stmt = tx
+ .prepare(
+ "SELECT keyentryid
+ FROM persistent.keymetadata
+ WHERE tag = ? AND data = ? AND keyentryid IN
+ (SELECT id
+ FROM persistent.keyentry
+ WHERE
+ alias IS NULL AND
+ domain IS NULL AND
+ namespace IS NULL AND
+ key_type = ? AND
+ km_uuid = ?);",
+ )
+ .context("Failed to store attestation certificate chain.")?;
+ let mut rows = stmt
+ .query(params![
+ KeyMetaData::AttestationRawPubKey,
+ raw_public_key,
+ KeyType::Attestation,
+ km_uuid
+ ])
+ .context("Failed to fetch keyid")?;
+ let key_id = db_utils::with_rows_extract_one(&mut rows, |row| {
+ row.map_or_else(|| Err(KsError::Rc(ResponseCode::KEY_NOT_FOUND)), Ok)?
+ .get(0)
+ .context("Failed to unpack id.")
+ })
+ .context("Failed to get key_id.")?;
+ let num_updated = tx
+ .execute(
+ "UPDATE persistent.keyentry
+ SET alias = ?
+ WHERE id = ?;",
+ params!["signed", key_id],
+ )
+ .context("Failed to update alias.")?;
+ if num_updated != 1 {
+ return Err(KsError::sys()).context("Alias not updated for the key.");
+ }
+ let mut metadata = KeyMetaData::new();
+ metadata.add(KeyMetaEntry::AttestationExpirationDate(DateTime::from_millis_epoch(
+ expiration_date,
+ )));
+ metadata.store_in_db(key_id, &tx).context("Failed to insert key metadata.")?;
+ Self::set_blob_internal(
+ &tx,
+ key_id,
+ SubComponentType::CERT_CHAIN,
+ Some(cert_chain),
+ None,
+ )
+ .context("Failed to insert cert chain")?;
+ Self::set_blob_internal(&tx, key_id, SubComponentType::CERT, Some(batch_cert), None)
+ .context("Failed to insert cert")?;
+ Ok(()).no_gc()
+ })
+ .context("In store_signed_attestation_certificate_chain: ")
+ }
+
+ /// Assigns the next unassigned attestation key to a domain/namespace combo that does not
+ /// currently have a key assigned to it.
+ pub fn assign_attestation_key(
+ &mut self,
+ domain: Domain,
+ namespace: i64,
+ km_uuid: &Uuid,
+ ) -> Result<()> {
+ match domain {
+ Domain::APP | Domain::SELINUX => {}
+ _ => {
+ return Err(KsError::sys()).context(format!(
+ concat!(
+ "In assign_attestation_key: Domain {:?} ",
+ "must be either App or SELinux.",
+ ),
+ domain
+ ));
+ }
+ }
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let result = tx
+ .execute(
+ "UPDATE persistent.keyentry
+ SET domain=?1, namespace=?2
+ WHERE
+ id =
+ (SELECT MIN(id)
+ FROM persistent.keyentry
+ WHERE ALIAS IS NOT NULL
+ AND domain IS NULL
+ AND key_type IS ?3
+ AND state IS ?4
+ AND km_uuid IS ?5)
+ AND
+ (SELECT COUNT(*)
+ FROM persistent.keyentry
+ WHERE domain=?1
+ AND namespace=?2
+ AND key_type IS ?3
+ AND state IS ?4
+ AND km_uuid IS ?5) = 0;",
+ params![
+ domain.0 as u32,
+ namespace,
+ KeyType::Attestation,
+ KeyLifeCycle::Live,
+ km_uuid,
+ ],
+ )
+ .context("Failed to assign attestation key")?;
+ if result == 0 {
+ return Err(KsError::Rc(ResponseCode::OUT_OF_KEYS)).context("Out of keys.");
+ } else if result > 1 {
+ return Err(KsError::sys())
+ .context(format!("Expected to update 1 entry, instead updated {}", result));
+ }
+ Ok(()).no_gc()
+ })
+ .context("In assign_attestation_key: ")
+ }
+
+ /// Retrieves num_keys number of attestation keys that have not yet been signed by a remote
+ /// provisioning server, or the maximum number available if there are not num_keys number of
+ /// entries in the table.
+ pub fn fetch_unsigned_attestation_keys(
+ &mut self,
+ num_keys: i32,
+ km_uuid: &Uuid,
+ ) -> Result<Vec<Vec<u8>>> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let mut stmt = tx
+ .prepare(
+ "SELECT data
+ FROM persistent.keymetadata
+ WHERE tag = ? AND keyentryid IN
+ (SELECT id
+ FROM persistent.keyentry
+ WHERE
+ alias IS NULL AND
+ domain IS NULL AND
+ namespace IS NULL AND
+ key_type = ? AND
+ km_uuid = ?
+ LIMIT ?);",
+ )
+ .context("Failed to prepare statement")?;
+ let rows = stmt
+ .query_map(
+ params![
+ KeyMetaData::AttestationMacedPublicKey,
+ KeyType::Attestation,
+ km_uuid,
+ num_keys
+ ],
+ |row| Ok(row.get(0)?),
+ )?
+ .collect::<rusqlite::Result<Vec<Vec<u8>>>>()
+ .context("Failed to execute statement")?;
+ Ok(rows).no_gc()
+ })
+ .context("In fetch_unsigned_attestation_keys")
+ }
+
+ /// Removes any keys that have expired as of the current time. Returns the number of keys
+ /// marked unreferenced that are bound to be garbage collected.
+ pub fn delete_expired_attestation_keys(&mut self) -> Result<i32> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let mut stmt = tx
+ .prepare(
+ "SELECT keyentryid, data
+ FROM persistent.keymetadata
+ WHERE tag = ? AND keyentryid IN
+ (SELECT id
+ FROM persistent.keyentry
+ WHERE key_type = ?);",
+ )
+ .context("Failed to prepare query")?;
+ let key_ids_to_check = stmt
+ .query_map(
+ params![KeyMetaData::AttestationExpirationDate, KeyType::Attestation],
+ |row| Ok((row.get(0)?, row.get(1)?)),
+ )?
+ .collect::<rusqlite::Result<Vec<(i64, DateTime)>>>()
+ .context("Failed to get date metadata")?;
+ let curr_time = DateTime::from_millis_epoch(
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64,
+ );
+ let mut num_deleted = 0;
+ for id in key_ids_to_check.iter().filter(|kt| kt.1 < curr_time).map(|kt| kt.0) {
+ if Self::mark_unreferenced(&tx, id)? {
+ num_deleted += 1;
+ }
+ }
+ Ok(num_deleted).do_gc(num_deleted != 0)
+ })
+ .context("In delete_expired_attestation_keys: ")
+ }
+
+ /// Deletes all remotely provisioned attestation keys in the system, regardless of the state
+ /// they are in. This is useful primarily as a testing mechanism.
+ pub fn delete_all_attestation_keys(&mut self) -> Result<i64> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let mut stmt = tx
+ .prepare(
+ "SELECT id FROM persistent.keyentry
+ WHERE key_type IS ?;",
+ )
+ .context("Failed to prepare statement")?;
+ let keys_to_delete = stmt
+ .query_map(params![KeyType::Attestation], |row| Ok(row.get(0)?))?
+ .collect::<rusqlite::Result<Vec<i64>>>()
+ .context("Failed to execute statement")?;
+ let num_deleted = keys_to_delete
+ .iter()
+ .map(|id| Self::mark_unreferenced(&tx, *id))
+ .collect::<Result<Vec<bool>>>()
+ .context("Failed to execute mark_unreferenced on a keyid")?
+ .into_iter()
+ .filter(|result| *result)
+ .count() as i64;
+ Ok(num_deleted).do_gc(num_deleted != 0)
+ })
+ .context("In delete_all_attestation_keys: ")
+ }
+
+ /// Counts the number of keys that will expire by the provided epoch date and the number of
+ /// keys not currently assigned to a domain.
+ pub fn get_attestation_pool_status(
+ &mut self,
+ date: i64,
+ km_uuid: &Uuid,
+ ) -> Result<AttestationPoolStatus> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let mut stmt = tx.prepare(
+ "SELECT data
+ FROM persistent.keymetadata
+ WHERE tag = ? AND keyentryid IN
+ (SELECT id
+ FROM persistent.keyentry
+ WHERE alias IS NOT NULL
+ AND key_type = ?
+ AND km_uuid = ?
+ AND state = ?);",
+ )?;
+ let times = stmt
+ .query_map(
+ params![
+ KeyMetaData::AttestationExpirationDate,
+ KeyType::Attestation,
+ km_uuid,
+ KeyLifeCycle::Live
+ ],
+ |row| Ok(row.get(0)?),
+ )?
+ .collect::<rusqlite::Result<Vec<DateTime>>>()
+ .context("Failed to execute metadata statement")?;
+ let expiring =
+ times.iter().filter(|time| time < &&DateTime::from_millis_epoch(date)).count()
+ as i32;
+ stmt = tx.prepare(
+ "SELECT alias, domain
+ FROM persistent.keyentry
+ WHERE key_type = ? AND km_uuid = ? AND state = ?;",
+ )?;
+ let rows = stmt
+ .query_map(params![KeyType::Attestation, km_uuid, KeyLifeCycle::Live], |row| {
+ Ok((row.get(0)?, row.get(1)?))
+ })?
+ .collect::<rusqlite::Result<Vec<(Option<String>, Option<u32>)>>>()
+ .context("Failed to execute keyentry statement")?;
+ let mut unassigned = 0i32;
+ let mut attested = 0i32;
+ let total = rows.len() as i32;
+ for (alias, domain) in rows {
+ match (alias, domain) {
+ (Some(_alias), None) => {
+ attested += 1;
+ unassigned += 1;
+ }
+ (Some(_alias), Some(_domain)) => {
+ attested += 1;
+ }
+ _ => {}
+ }
+ }
+ Ok(AttestationPoolStatus { expiring, unassigned, attested, total }).no_gc()
+ })
+ .context("In get_attestation_pool_status: ")
+ }
+
+ /// Fetches the private key and corresponding certificate chain assigned to a
+ /// domain/namespace pair. Will either return nothing if the domain/namespace is
+ /// not assigned, or one CertificateChain.
+ pub fn retrieve_attestation_key_and_cert_chain(
+ &mut self,
+ domain: Domain,
+ namespace: i64,
+ km_uuid: &Uuid,
+ ) -> Result<Option<CertificateChain>> {
+ match domain {
+ Domain::APP | Domain::SELINUX => {}
+ _ => {
+ return Err(KsError::sys())
+ .context(format!("Domain {:?} must be either App or SELinux.", domain));
+ }
+ }
+ self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ let mut stmt = tx.prepare(
+ "SELECT subcomponent_type, blob
+ FROM persistent.blobentry
+ WHERE keyentryid IN
+ (SELECT id
+ FROM persistent.keyentry
+ WHERE key_type = ?
+ AND domain = ?
+ AND namespace = ?
+ AND state = ?
+ AND km_uuid = ?);",
+ )?;
+ let rows = stmt
+ .query_map(
+ params![
+ KeyType::Attestation,
+ domain.0 as u32,
+ namespace,
+ KeyLifeCycle::Live,
+ km_uuid
+ ],
+ |row| Ok((row.get(0)?, row.get(1)?)),
+ )?
+ .collect::<rusqlite::Result<Vec<(SubComponentType, Vec<u8>)>>>()
+ .context("query failed.")?;
+ if rows.is_empty() {
+ return Ok(None).no_gc();
+ } else if rows.len() != 3 {
+ return Err(KsError::sys()).context(format!(
+ concat!(
+ "Expected to get a single attestation",
+ "key, cert, and cert chain for a total of 3 entries, but instead got {}."
+ ),
+ rows.len()
+ ));
+ }
+ let mut km_blob: Vec<u8> = Vec::new();
+ let mut cert_chain_blob: Vec<u8> = Vec::new();
+ let mut batch_cert_blob: Vec<u8> = Vec::new();
+ for row in rows {
+ let sub_type: SubComponentType = row.0;
+ match sub_type {
+ SubComponentType::KEY_BLOB => {
+ km_blob = row.1;
+ }
+ SubComponentType::CERT_CHAIN => {
+ cert_chain_blob = row.1;
+ }
+ SubComponentType::CERT => {
+ batch_cert_blob = row.1;
+ }
+ _ => Err(KsError::sys()).context("Unknown or incorrect subcomponent type.")?,
+ }
+ }
+ Ok(Some(CertificateChain {
+ private_key: ZVec::try_from(km_blob)?,
+ batch_cert: batch_cert_blob,
+ cert_chain: cert_chain_blob,
+ }))
+ .no_gc()
+ })
+ .context("In retrieve_attestation_key_and_cert_chain:")
+ }
+
/// Updates the alias column of the given key id `newid` with the given alias,
/// and atomically, removes the alias, domain, and namespace from another row
/// with the same alias-domain-namespace tuple if such row exits.
@@ -1037,10 +1985,10 @@
tx: &Transaction,
newid: &KeyIdGuard,
alias: &str,
- domain: Domain,
- namespace: i64,
+ domain: &Domain,
+ namespace: &i64,
) -> Result<bool> {
- match domain {
+ match *domain {
Domain::APP | Domain::SELINUX => {}
_ => {
return Err(KsError::sys()).context(format!(
@@ -1067,8 +2015,8 @@
KeyLifeCycle::Live,
newid.0,
domain.0 as u32,
- namespace,
- KeyLifeCycle::Existing
+ *namespace,
+ KeyLifeCycle::Existing,
],
)
.context("In rebind_alias: Failed to set alias.")?;
@@ -1086,15 +2034,15 @@
/// fields, and rebinds the given alias to the new key.
/// The boolean returned is a hint for the garbage collector. If true, a key was replaced,
/// is now unreferenced and needs to be collected.
- pub fn store_new_key<'a>(
+ pub fn store_new_key(
&mut self,
- key: KeyDescriptor,
- params: impl IntoIterator<Item = &'a KeyParameter>,
- blob: &[u8],
- cert: Option<&[u8]>,
- cert_chain: Option<&[u8]>,
+ key: &KeyDescriptor,
+ params: &[KeyParameter],
+ blob_info: &(&[u8], &BlobMetaData),
+ cert_info: &CertificateInfo,
metadata: &KeyMetaData,
- ) -> Result<(bool, KeyIdGuard)> {
+ km_uuid: &Uuid,
+ ) -> Result<KeyIdGuard> {
let (alias, domain, namespace) = match key {
KeyDescriptor { alias: Some(alias), domain: Domain::APP, nspace, blob: None }
| KeyDescriptor { alias: Some(alias), domain: Domain::SELINUX, nspace, blob: None } => {
@@ -1106,33 +2054,88 @@
}
};
self.with_transaction(TransactionBehavior::Immediate, |tx| {
- let key_id = Self::create_key_entry_internal(tx, domain, namespace)
+ let key_id = Self::create_key_entry_internal(tx, &domain, namespace, km_uuid)
.context("Trying to create new key entry.")?;
- Self::insert_blob_internal(tx, key_id.id(), SubComponentType::KEY_BLOB, blob)
- .context("Trying to insert the key blob.")?;
- if let Some(cert) = cert {
- Self::insert_blob_internal(tx, key_id.id(), SubComponentType::CERT, cert)
+ let (blob, blob_metadata) = *blob_info;
+ Self::set_blob_internal(
+ tx,
+ key_id.id(),
+ SubComponentType::KEY_BLOB,
+ Some(blob),
+ Some(&blob_metadata),
+ )
+ .context("Trying to insert the key blob.")?;
+ if let Some(cert) = &cert_info.cert {
+ Self::set_blob_internal(tx, key_id.id(), SubComponentType::CERT, Some(&cert), None)
.context("Trying to insert the certificate.")?;
}
- if let Some(cert_chain) = cert_chain {
- Self::insert_blob_internal(
+ if let Some(cert_chain) = &cert_info.cert_chain {
+ Self::set_blob_internal(
tx,
key_id.id(),
SubComponentType::CERT_CHAIN,
- cert_chain,
+ Some(&cert_chain),
+ None,
)
.context("Trying to insert the certificate chain.")?;
}
Self::insert_keyparameter_internal(tx, &key_id, params)
.context("Trying to insert key parameters.")?;
- metadata.store_in_db(key_id.id(), tx).context("Tryin to insert key metadata.")?;
- let need_gc = Self::rebind_alias(tx, &key_id, &alias, domain, namespace)
+ metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
+ let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace)
.context("Trying to rebind alias.")?;
- Ok((need_gc, key_id))
+ Ok(key_id).do_gc(need_gc)
})
.context("In store_new_key.")
}
+ /// Store a new certificate
+ /// The function creates a new key entry, populates the blob field and metadata, and rebinds
+ /// the given alias to the new cert.
+ pub fn store_new_certificate(
+ &mut self,
+ key: &KeyDescriptor,
+ cert: &[u8],
+ km_uuid: &Uuid,
+ ) -> Result<KeyIdGuard> {
+ let (alias, domain, namespace) = match key {
+ KeyDescriptor { alias: Some(alias), domain: Domain::APP, nspace, blob: None }
+ | KeyDescriptor { alias: Some(alias), domain: Domain::SELINUX, nspace, blob: None } => {
+ (alias, key.domain, nspace)
+ }
+ _ => {
+ return Err(KsError::Rc(ResponseCode::INVALID_ARGUMENT)).context(
+ "In store_new_certificate: Need alias and domain must be APP or SELINUX.",
+ )
+ }
+ };
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let key_id = Self::create_key_entry_internal(tx, &domain, namespace, km_uuid)
+ .context("Trying to create new key entry.")?;
+
+ Self::set_blob_internal(
+ tx,
+ key_id.id(),
+ SubComponentType::CERT_CHAIN,
+ Some(cert),
+ None,
+ )
+ .context("Trying to insert certificate.")?;
+
+ let mut metadata = KeyMetaData::new();
+ metadata.add(KeyMetaEntry::CreationDate(
+ DateTime::now().context("Trying to make creation time.")?,
+ ));
+
+ metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
+
+ let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace)
+ .context("Trying to rebind alias.")?;
+ Ok(key_id).do_gc(need_gc)
+ })
+ .context("In store_new_certificate.")
+ }
+
// Helper function loading the key_id given the key descriptor
// tuple comprising domain, namespace, and alias.
// Requires a valid transaction.
@@ -1146,7 +2149,7 @@
.prepare(
"SELECT id FROM persistent.keyentry
WHERE
- key_type = ?
+ key_type = ?
AND domain = ?
AND namespace = ?
AND alias = ?
@@ -1179,7 +2182,7 @@
/// check and the key id can be used to load further key artifacts.
fn load_access_tuple(
tx: &Transaction,
- key: KeyDescriptor,
+ key: &KeyDescriptor,
key_type: KeyType,
caller_uid: u32,
) -> Result<(i64, KeyDescriptor, Option<KeyPermSet>)> {
@@ -1191,7 +2194,7 @@
// of the caller supplied namespace if the domain field is
// Domain::APP.
Domain::APP | Domain::SELINUX => {
- let mut access_key = key;
+ let mut access_key = key.clone();
if access_key.domain == Domain::APP {
access_key.nspace = caller_uid as i64;
}
@@ -1223,7 +2226,7 @@
))
})
.context("Domain::GRANT.")?;
- Ok((key_id, key, Some(access_vector.into())))
+ Ok((key_id, key.clone(), Some(access_vector.into())))
}
// Domain::KEY_ID. In this case we load the domain and namespace from the
@@ -1275,7 +2278,7 @@
};
let key_id = key.nspace;
- let mut access_key = key;
+ let mut access_key: KeyDescriptor = key.clone();
access_key.domain = domain;
access_key.nspace = namespace;
@@ -1289,7 +2292,7 @@
key_id: i64,
load_bits: KeyEntryLoadBits,
tx: &Transaction,
- ) -> Result<(Option<Vec<u8>>, Option<Vec<u8>>, Option<Vec<u8>>)> {
+ ) -> Result<(bool, Option<(Vec<u8>, BlobMetaData)>, Option<Vec<u8>>, Option<Vec<u8>>)> {
let mut stmt = tx
.prepare(
"SELECT MAX(id), subcomponent_type, blob FROM persistent.blobentry
@@ -1300,15 +2303,20 @@
let mut rows =
stmt.query(params![key_id]).context("In load_blob_components: query failed.")?;
- let mut km_blob: Option<Vec<u8>> = None;
+ let mut key_blob: Option<(i64, Vec<u8>)> = None;
let mut cert_blob: Option<Vec<u8>> = None;
let mut cert_chain_blob: Option<Vec<u8>> = None;
+ let mut has_km_blob: bool = false;
db_utils::with_rows_extract_all(&mut rows, |row| {
let sub_type: SubComponentType =
row.get(1).context("Failed to extract subcomponent_type.")?;
+ has_km_blob = has_km_blob || sub_type == SubComponentType::KEY_BLOB;
match (sub_type, load_bits.load_public(), load_bits.load_km()) {
(SubComponentType::KEY_BLOB, _, true) => {
- km_blob = Some(row.get(2).context("Failed to extract KM blob.")?);
+ key_blob = Some((
+ row.get(0).context("Failed to extract key blob id.")?,
+ row.get(2).context("Failed to extract key blob.")?,
+ ));
}
(SubComponentType::CERT, true, _) => {
cert_blob =
@@ -1327,7 +2335,15 @@
})
.context("In load_blob_components.")?;
- Ok((km_blob, cert_blob, cert_chain_blob))
+ let blob_info = key_blob.map_or::<Result<_>, _>(Ok(None), |(blob_id, blob)| {
+ Ok(Some((
+ blob,
+ BlobMetaData::load_from_db(blob_id, tx)
+ .context("In load_blob_components: Trying to load blob_metadata.")?,
+ )))
+ })?;
+
+ Ok((has_km_blob, blob_info, cert_blob, cert_chain_blob))
}
fn load_key_parameters(key_id: i64, tx: &Transaction) -> Result<Vec<KeyParameter>> {
@@ -1360,7 +2376,7 @@
/// usage has been exhausted, if not, decreases the usage count. If the usage count reaches
/// zero, the key also gets marked unreferenced and scheduled for deletion.
/// Returns Ok(true) if the key was marked unreferenced as a hint to the garbage collector.
- pub fn check_and_update_key_usage_count(&mut self, key_id: i64) -> Result<bool> {
+ pub fn check_and_update_key_usage_count(&mut self, key_id: i64) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let limit: Option<i32> = tx
.query_row(
@@ -1385,9 +2401,10 @@
match limit {
1 => Self::mark_unreferenced(tx, key_id)
+ .map(|need_gc| (need_gc, ()))
.context("Trying to mark limited use key for deletion."),
0 => Err(KsError::Km(ErrorCode::INVALID_KEY_BLOB)).context("Key is exhausted."),
- _ => Ok(false),
+ _ => Ok(()).no_gc(),
}
})
.context("In check_and_update_key_usage_count.")
@@ -1400,11 +2417,40 @@
/// the blob database.
pub fn load_key_entry(
&mut self,
- key: KeyDescriptor,
+ key: &KeyDescriptor,
key_type: KeyType,
load_bits: KeyEntryLoadBits,
caller_uid: u32,
- check_permission: impl FnOnce(&KeyDescriptor, Option<KeyPermSet>) -> Result<()>,
+ check_permission: impl Fn(&KeyDescriptor, Option<KeyPermSet>) -> Result<()>,
+ ) -> Result<(KeyIdGuard, KeyEntry)> {
+ loop {
+ match self.load_key_entry_internal(
+ key,
+ key_type,
+ load_bits,
+ caller_uid,
+ &check_permission,
+ ) {
+ Ok(result) => break Ok(result),
+ Err(e) => {
+ if Self::is_locked_error(&e) {
+ std::thread::sleep(std::time::Duration::from_micros(500));
+ continue;
+ } else {
+ return Err(e).context("In load_key_entry.");
+ }
+ }
+ }
+ }
+ }
+
+ fn load_key_entry_internal(
+ &mut self,
+ key: &KeyDescriptor,
+ key_type: KeyType,
+ load_bits: KeyEntryLoadBits,
+ caller_uid: u32,
+ check_permission: &impl Fn(&KeyDescriptor, Option<KeyPermSet>) -> Result<()>,
) -> Result<(KeyIdGuard, KeyEntry)> {
// KEY ID LOCK 1/2
// If we got a key descriptor with a key id we can get the lock right away.
@@ -1449,15 +2495,16 @@
let key_id_guard = KEY_ID_LOCK.get(key_id);
// Create a new transaction.
- let tx = self.conn.unchecked_transaction().context(
- "In load_key_entry: Failed to initialize transaction. (deferred key lock)",
- )?;
+ let tx = self
+ .conn
+ .unchecked_transaction()
+ .context("In load_key_entry: Failed to initialize transaction.")?;
Self::load_access_tuple(
&tx,
// This time we have to load the key by the retrieved key id, because the
// alias may have been rebound after we rolled back the transaction.
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::KEY_ID,
nspace: key_id,
..Default::default()
@@ -1483,13 +2530,14 @@
fn mark_unreferenced(tx: &Transaction, key_id: i64) -> Result<bool> {
let updated = tx
- .execute(
- "UPDATE persistent.keyentry SET state = ? WHERE id = ?;",
- params![KeyLifeCycle::Unreferenced, key_id],
- )
- .context("In mark_unreferenced: Failed to update state of key entry.")?;
- tx.execute("DELETE from persistent.grant WHERE keyentryid = ?;", params![key_id])
- .context("In mark_unreferenced: Failed to drop grants.")?;
+ .execute("DELETE FROM persistent.keyentry WHERE id = ?;", params![key_id])
+ .context("Trying to delete keyentry.")?;
+ tx.execute("DELETE FROM persistent.keymetadata WHERE keyentryid = ?;", params![key_id])
+ .context("Trying to delete keymetadata.")?;
+ tx.execute("DELETE FROM persistent.keyparameter WHERE keyentryid = ?;", params![key_id])
+ .context("Trying to delete keyparameters.")?;
+ tx.execute("DELETE FROM persistent.grant WHERE keyentryid = ?;", params![key_id])
+ .context("Trying to delete grants.")?;
Ok(updated != 0)
}
@@ -1497,11 +2545,11 @@
/// Returns Ok(true) if a key was marked unreferenced as a hint for the garbage collector.
pub fn unbind_key(
&mut self,
- key: KeyDescriptor,
+ key: &KeyDescriptor,
key_type: KeyType,
caller_uid: u32,
- check_permission: impl FnOnce(&KeyDescriptor, Option<KeyPermSet>) -> Result<()>,
- ) -> Result<bool> {
+ check_permission: impl Fn(&KeyDescriptor, Option<KeyPermSet>) -> Result<()>,
+ ) -> Result<()> {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let (key_id, access_key_descriptor, access_vector) =
Self::load_access_tuple(tx, key, key_type, caller_uid)
@@ -1512,11 +2560,143 @@
check_permission(&access_key_descriptor, access_vector)
.context("While checking permission.")?;
- Self::mark_unreferenced(tx, key_id).context("Trying to mark the key unreferenced.")
+ Self::mark_unreferenced(tx, key_id)
+ .map(|need_gc| (need_gc, ()))
+ .context("Trying to mark the key unreferenced.")
})
.context("In unbind_key.")
}
+ fn get_key_km_uuid(tx: &Transaction, key_id: i64) -> Result<Uuid> {
+ tx.query_row(
+ "SELECT km_uuid FROM persistent.keyentry WHERE id = ?",
+ params![key_id],
+ |row| row.get(0),
+ )
+ .context("In get_key_km_uuid.")
+ }
+
+ /// Delete all artifacts belonging to the namespace given by the domain-namespace tuple.
+ /// This leaves all of the blob entries orphaned for subsequent garbage collection.
+ pub fn unbind_keys_for_namespace(&mut self, domain: Domain, namespace: i64) -> Result<()> {
+ if !(domain == Domain::APP || domain == Domain::SELINUX) {
+ return Err(KsError::Rc(ResponseCode::INVALID_ARGUMENT))
+ .context("In unbind_keys_for_namespace.");
+ }
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
+ "DELETE FROM persistent.keymetadata
+ WHERE keyentryid IN (
+ SELECT id FROM persistent.keyentry
+ WHERE domain = ? AND namespace = ?
+ );",
+ params![domain.0, namespace],
+ )
+ .context("Trying to delete keymetadata.")?;
+ tx.execute(
+ "DELETE FROM persistent.keyparameter
+ WHERE keyentryid IN (
+ SELECT id FROM persistent.keyentry
+ WHERE domain = ? AND namespace = ?
+ );",
+ params![domain.0, namespace],
+ )
+ .context("Trying to delete keyparameters.")?;
+ tx.execute(
+ "DELETE FROM persistent.grant
+ WHERE keyentryid IN (
+ SELECT id FROM persistent.keyentry
+ WHERE domain = ? AND namespace = ?
+ );",
+ params![domain.0, namespace],
+ )
+ .context("Trying to delete grants.")?;
+ tx.execute(
+ "DELETE FROM persistent.keyentry WHERE domain = ? AND namespace = ?;",
+ params![domain.0, namespace],
+ )
+ .context("Trying to delete keyentry.")?;
+ Ok(()).need_gc()
+ })
+ .context("In unbind_keys_for_namespace")
+ }
+
+ /// Delete the keys created on behalf of the user, denoted by the user id.
+ /// Delete all the keys unless 'keep_non_super_encrypted_keys' set to true.
+ /// Returned boolean is to hint the garbage collector to delete the unbound keys.
+ /// The caller of this function should notify the gc if the returned value is true.
+ pub fn unbind_keys_for_user(
+ &mut self,
+ user_id: u32,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ let mut stmt = tx
+ .prepare(&format!(
+ "SELECT id from persistent.keyentry
+ WHERE (
+ key_type = ?
+ AND domain = ?
+ AND cast ( (namespace/{aid_user_offset}) as int) = ?
+ AND state = ?
+ ) OR (
+ key_type = ?
+ AND namespace = ?
+ AND alias = ?
+ AND state = ?
+ );",
+ aid_user_offset = AID_USER_OFFSET
+ ))
+ .context(concat!(
+ "In unbind_keys_for_user. ",
+ "Failed to prepare the query to find the keys created by apps."
+ ))?;
+
+ let mut rows = stmt
+ .query(params![
+ // WHERE client key:
+ KeyType::Client,
+ Domain::APP.0 as u32,
+ user_id,
+ KeyLifeCycle::Live,
+ // OR super key:
+ KeyType::Super,
+ user_id,
+ USER_SUPER_KEY.alias,
+ KeyLifeCycle::Live
+ ])
+ .context("In unbind_keys_for_user. Failed to query the keys created by apps.")?;
+
+ let mut key_ids: Vec<i64> = Vec::new();
+ db_utils::with_rows_extract_all(&mut rows, |row| {
+ key_ids
+ .push(row.get(0).context("Failed to read key id of a key created by an app.")?);
+ Ok(())
+ })
+ .context("In unbind_keys_for_user.")?;
+
+ let mut notify_gc = false;
+ for key_id in key_ids {
+ if keep_non_super_encrypted_keys {
+ // Load metadata and filter out non-super-encrypted keys.
+ if let (_, Some((_, blob_metadata)), _, _) =
+ Self::load_blob_components(key_id, KeyEntryLoadBits::KM, tx)
+ .context("In unbind_keys_for_user: Trying to load blob info.")?
+ {
+ if blob_metadata.encrypted_by().is_none() {
+ continue;
+ }
+ }
+ }
+ notify_gc = Self::mark_unreferenced(&tx, key_id)
+ .context("In unbind_keys_for_user.")?
+ || notify_gc;
+ }
+ Ok(()).do_gc(notify_gc)
+ })
+ .context("In unbind_keys_for_user.")
+ }
+
fn load_key_components(
tx: &Transaction,
load_bits: KeyEntryLoadBits,
@@ -1524,31 +2704,25 @@
) -> Result<KeyEntry> {
let metadata = KeyMetaData::load_from_db(key_id, &tx).context("In load_key_components.")?;
- let (km_blob, cert_blob, cert_chain_blob) =
+ let (has_km_blob, key_blob_info, cert_blob, cert_chain_blob) =
Self::load_blob_components(key_id, load_bits, &tx)
.context("In load_key_components.")?;
- let parameters =
- Self::load_key_parameters(key_id, &tx).context("In load_key_components.")?;
+ let parameters = Self::load_key_parameters(key_id, &tx)
+ .context("In load_key_components: Trying to load key parameters.")?;
- // Extract the security level by checking the security level of the origin tag.
- // Super keys don't have key parameters so we use security_level software by default.
- let sec_level = parameters
- .iter()
- .find_map(|k| match k.get_tag() {
- Tag::ORIGIN => Some(*k.security_level()),
- _ => None,
- })
- .unwrap_or(SecurityLevel::SOFTWARE);
+ let km_uuid = Self::get_key_km_uuid(&tx, key_id)
+ .context("In load_key_components: Trying to get KM uuid.")?;
Ok(KeyEntry {
id: key_id,
- km_blob,
+ key_blob_info,
cert: cert_blob,
cert_chain: cert_chain_blob,
- sec_level,
+ km_uuid,
parameters,
metadata,
+ pure_cert: !has_km_blob,
})
}
@@ -1556,30 +2730,31 @@
/// The key descriptors will have the domain, nspace, and alias field set.
/// Domain must be APP or SELINUX, the caller must make sure of that.
pub fn list(&mut self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
- let mut stmt = self
- .conn
- .prepare(
- "SELECT alias FROM persistent.keyentry
+ self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ let mut stmt = tx
+ .prepare(
+ "SELECT alias FROM persistent.keyentry
WHERE domain = ? AND namespace = ? AND alias IS NOT NULL AND state = ?;",
- )
- .context("In list: Failed to prepare.")?;
+ )
+ .context("In list: Failed to prepare.")?;
- let mut rows = stmt
- .query(params![domain.0 as u32, namespace, KeyLifeCycle::Live])
- .context("In list: Failed to query.")?;
+ let mut rows = stmt
+ .query(params![domain.0 as u32, namespace, KeyLifeCycle::Live])
+ .context("In list: Failed to query.")?;
- let mut descriptors: Vec<KeyDescriptor> = Vec::new();
- db_utils::with_rows_extract_all(&mut rows, |row| {
- descriptors.push(KeyDescriptor {
- domain,
- nspace: namespace,
- alias: Some(row.get(0).context("Trying to extract alias.")?),
- blob: None,
- });
- Ok(())
+ let mut descriptors: Vec<KeyDescriptor> = Vec::new();
+ db_utils::with_rows_extract_all(&mut rows, |row| {
+ descriptors.push(KeyDescriptor {
+ domain,
+ nspace: namespace,
+ alias: Some(row.get(0).context("Trying to extract alias.")?),
+ blob: None,
+ });
+ Ok(())
+ })
+ .context("In list: Failed to extract rows.")?;
+ Ok(descriptors).no_gc()
})
- .context("In list.")?;
- Ok(descriptors)
}
/// Adds a grant to the grant table.
@@ -1590,104 +2765,98 @@
/// grant id in the namespace field of the resulting KeyDescriptor.
pub fn grant(
&mut self,
- key: KeyDescriptor,
+ key: &KeyDescriptor,
caller_uid: u32,
grantee_uid: u32,
access_vector: KeyPermSet,
- check_permission: impl FnOnce(&KeyDescriptor, &KeyPermSet) -> Result<()>,
+ check_permission: impl Fn(&KeyDescriptor, &KeyPermSet) -> Result<()>,
) -> Result<KeyDescriptor> {
- let tx = self
- .conn
- .transaction_with_behavior(TransactionBehavior::Immediate)
- .context("In grant: Failed to initialize transaction.")?;
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ // Load the key_id and complete the access control tuple.
+ // We ignore the access vector here because grants cannot be granted.
+ // The access vector returned here expresses the permissions the
+ // grantee has if key.domain == Domain::GRANT. But this vector
+ // cannot include the grant permission by design, so there is no way the
+ // subsequent permission check can pass.
+ // We could check key.domain == Domain::GRANT and fail early.
+ // But even if we load the access tuple by grant here, the permission
+ // check denies the attempt to create a grant by grant descriptor.
+ let (key_id, access_key_descriptor, _) =
+ Self::load_access_tuple(&tx, key, KeyType::Client, caller_uid)
+ .context("In grant")?;
- // Load the key_id and complete the access control tuple.
- // We ignore the access vector here because grants cannot be granted.
- // The access vector returned here expresses the permissions the
- // grantee has if key.domain == Domain::GRANT. But this vector
- // cannot include the grant permission by design, so there is no way the
- // subsequent permission check can pass.
- // We could check key.domain == Domain::GRANT and fail early.
- // But even if we load the access tuple by grant here, the permission
- // check denies the attempt to create a grant by grant descriptor.
- let (key_id, access_key_descriptor, _) =
- Self::load_access_tuple(&tx, key, KeyType::Client, caller_uid).context("In grant")?;
+ // Perform access control. It is vital that we return here if the permission
+ // was denied. So do not touch that '?' at the end of the line.
+ // This permission check checks if the caller has the grant permission
+ // for the given key and in addition to all of the permissions
+ // expressed in `access_vector`.
+ check_permission(&access_key_descriptor, &access_vector)
+ .context("In grant: check_permission failed.")?;
- // Perform access control. It is vital that we return here if the permission
- // was denied. So do not touch that '?' at the end of the line.
- // This permission check checks if the caller has the grant permission
- // for the given key and in addition to all of the permissions
- // expressed in `access_vector`.
- check_permission(&access_key_descriptor, &access_vector)
- .context("In grant: check_permission failed.")?;
-
- let grant_id = if let Some(grant_id) = tx
- .query_row(
- "SELECT id FROM persistent.grant
+ let grant_id = if let Some(grant_id) = tx
+ .query_row(
+ "SELECT id FROM persistent.grant
WHERE keyentryid = ? AND grantee = ?;",
- params![key_id, grantee_uid],
- |row| row.get(0),
- )
- .optional()
- .context("In grant: Failed get optional existing grant id.")?
- {
- tx.execute(
- "UPDATE persistent.grant
+ params![key_id, grantee_uid],
+ |row| row.get(0),
+ )
+ .optional()
+ .context("In grant: Failed get optional existing grant id.")?
+ {
+ tx.execute(
+ "UPDATE persistent.grant
SET access_vector = ?
WHERE id = ?;",
- params![i32::from(access_vector), grant_id],
- )
- .context("In grant: Failed to update existing grant.")?;
- grant_id
- } else {
- Self::insert_with_retry(|id| {
- tx.execute(
- "INSERT INTO persistent.grant (id, grantee, keyentryid, access_vector)
- VALUES (?, ?, ?, ?);",
- params![id, grantee_uid, key_id, i32::from(access_vector)],
+ params![i32::from(access_vector), grant_id],
)
- })
- .context("In grant")?
- };
- tx.commit().context("In grant: failed to commit transaction.")?;
+ .context("In grant: Failed to update existing grant.")?;
+ grant_id
+ } else {
+ Self::insert_with_retry(|id| {
+ tx.execute(
+ "INSERT INTO persistent.grant (id, grantee, keyentryid, access_vector)
+ VALUES (?, ?, ?, ?);",
+ params![id, grantee_uid, key_id, i32::from(access_vector)],
+ )
+ })
+ .context("In grant")?
+ };
- Ok(KeyDescriptor { domain: Domain::GRANT, nspace: grant_id, alias: None, blob: None })
+ Ok(KeyDescriptor { domain: Domain::GRANT, nspace: grant_id, alias: None, blob: None })
+ .no_gc()
+ })
}
/// This function checks permissions like `grant` and `load_key_entry`
/// before removing a grant from the grant table.
pub fn ungrant(
&mut self,
- key: KeyDescriptor,
+ key: &KeyDescriptor,
caller_uid: u32,
grantee_uid: u32,
- check_permission: impl FnOnce(&KeyDescriptor) -> Result<()>,
+ check_permission: impl Fn(&KeyDescriptor) -> Result<()>,
) -> Result<()> {
- let tx = self
- .conn
- .transaction_with_behavior(TransactionBehavior::Immediate)
- .context("In ungrant: Failed to initialize transaction.")?;
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ // Load the key_id and complete the access control tuple.
+ // We ignore the access vector here because grants cannot be granted.
+ let (key_id, access_key_descriptor, _) =
+ Self::load_access_tuple(&tx, key, KeyType::Client, caller_uid)
+ .context("In ungrant.")?;
- // Load the key_id and complete the access control tuple.
- // We ignore the access vector here because grants cannot be granted.
- let (key_id, access_key_descriptor, _) =
- Self::load_access_tuple(&tx, key, KeyType::Client, caller_uid)
- .context("In ungrant.")?;
+ // Perform access control. We must return here if the permission
+ // was denied. So do not touch the '?' at the end of this line.
+ check_permission(&access_key_descriptor)
+ .context("In grant: check_permission failed.")?;
- // Perform access control. We must return here if the permission
- // was denied. So do not touch the '?' at the end of this line.
- check_permission(&access_key_descriptor).context("In grant: check_permission failed.")?;
-
- tx.execute(
- "DELETE FROM persistent.grant
+ tx.execute(
+ "DELETE FROM persistent.grant
WHERE keyentryid = ? AND grantee = ?;",
- params![key_id, grantee_uid],
- )
- .context("Failed to delete grant.")?;
+ params![key_id, grantee_uid],
+ )
+ .context("Failed to delete grant.")?;
- tx.commit().context("In ungrant: failed to commit transaction.")?;
-
- Ok(())
+ Ok(()).no_gc()
+ })
}
// Generates a random id and passes it to the given function, which will
@@ -1695,7 +2864,10 @@
// otherwise return the id.
fn insert_with_retry(inserter: impl Fn(i64) -> rusqlite::Result<usize>) -> Result<i64> {
loop {
- let newid: i64 = random();
+ let newid: i64 = match random() {
+ Self::UNASSIGNED_KEY_ID => continue, // UNASSIGNED_KEY_ID cannot be assigned.
+ i => i,
+ };
match inserter(newid) {
// If the id already existed, try again.
Err(rusqlite::Error::SqliteFailure(
@@ -1715,8 +2887,8 @@
/// Insert or replace the auth token based on the UNIQUE constraint of the auth token table
pub fn insert_auth_token(&mut self, auth_token: &HardwareAuthToken) -> Result<()> {
- self.conn
- .execute(
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
"INSERT OR REPLACE INTO perboot.authtoken (challenge, user_id, auth_id,
authenticator_type, timestamp, mac, time_received) VALUES(?, ?, ?, ?, ?, ?, ?);",
params![
@@ -1730,7 +2902,8 @@
],
)
.context("In insert_auth_token: failed to insert auth token into the database")?;
- Ok(())
+ Ok(()).no_gc()
+ })
}
/// Find the newest auth token matching the given predicate.
@@ -1765,34 +2938,37 @@
entry,
Self::get_last_off_body(tx)
.context("In find_auth_token_entry: Trying to get last off body")?,
- )));
+ )))
+ .no_gc();
}
}
- Ok(None)
+ Ok(None).no_gc()
})
.context("In find_auth_token_entry.")
}
/// Insert last_off_body into the metadata table at the initialization of auth token table
- pub fn insert_last_off_body(&self, last_off_body: MonotonicRawTime) -> Result<()> {
- self.conn
- .execute(
+ pub fn insert_last_off_body(&mut self, last_off_body: MonotonicRawTime) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
"INSERT OR REPLACE INTO perboot.metadata (key, value) VALUES (?, ?);",
params!["last_off_body", last_off_body],
)
.context("In insert_last_off_body: failed to insert.")?;
- Ok(())
+ Ok(()).no_gc()
+ })
}
/// Update last_off_body when on_device_off_body is called
- pub fn update_last_off_body(&self, last_off_body: MonotonicRawTime) -> Result<()> {
- self.conn
- .execute(
+ pub fn update_last_off_body(&mut self, last_off_body: MonotonicRawTime) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
"UPDATE perboot.metadata SET value = ? WHERE key = ?;",
params![last_off_body, "last_off_body"],
)
.context("In update_last_off_body: failed to update.")?;
- Ok(())
+ Ok(()).no_gc()
+ })
}
/// Get last_off_body time when finding auth tokens
@@ -1816,6 +2992,7 @@
};
use crate::key_perm_set;
use crate::permission::{KeyPerm, KeyPermSet};
+ use crate::super_key::SuperKeyManager;
use keystore2_test_utils::TempDir;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
HardwareAuthToken::HardwareAuthToken,
@@ -1831,12 +3008,29 @@
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime};
+ #[cfg(disabled)]
+ use std::time::Instant;
fn new_test_db() -> Result<KeystoreDB> {
let conn = KeystoreDB::make_connection("file::memory:", "file::memory:")?;
- KeystoreDB::init_tables(&conn).context("Failed to initialize tables.")?;
- Ok(KeystoreDB { conn })
+ let mut db = KeystoreDB { conn, gc: None };
+ db.with_transaction(TransactionBehavior::Immediate, |tx| {
+ KeystoreDB::init_tables(tx).context("Failed to initialize tables.").no_gc()
+ })?;
+ Ok(db)
+ }
+
+ fn new_test_db_with_gc<F>(path: &Path, cb: F) -> Result<KeystoreDB>
+ where
+ F: Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static,
+ {
+ let super_key = Arc::new(SuperKeyManager::new());
+
+ let gc_db = KeystoreDB::new(path, None).expect("Failed to open test gc db_connection.");
+ let gc = Gc::new_init_with(Default::default(), move || (Box::new(cb), gc_db, super_key));
+
+ KeystoreDB::new(path, Some(gc))
}
fn rebind_alias(
@@ -1847,7 +3041,7 @@
namespace: i64,
) -> Result<bool> {
db.with_transaction(TransactionBehavior::Immediate, |tx| {
- KeystoreDB::rebind_alias(tx, newid, alias, domain, namespace)
+ KeystoreDB::rebind_alias(tx, newid, alias, &domain, &namespace).no_gc()
})
.context("In rebind_alias.")
}
@@ -1899,12 +3093,13 @@
.prepare("SELECT name from persistent.sqlite_master WHERE type='table' ORDER BY name;")?
.query_map(params![], |row| row.get(0))?
.collect::<rusqlite::Result<Vec<String>>>()?;
- assert_eq!(tables.len(), 5);
+ assert_eq!(tables.len(), 6);
assert_eq!(tables[0], "blobentry");
- assert_eq!(tables[1], "grant");
- assert_eq!(tables[2], "keyentry");
- assert_eq!(tables[3], "keymetadata");
- assert_eq!(tables[4], "keyparameter");
+ assert_eq!(tables[1], "blobmetadata");
+ assert_eq!(tables[2], "grant");
+ assert_eq!(tables[3], "keyentry");
+ assert_eq!(tables[4], "keymetadata");
+ assert_eq!(tables[5], "keyparameter");
let tables = db
.conn
.prepare("SELECT name from perboot.sqlite_master WHERE type='table' ORDER BY name;")?
@@ -1994,13 +3189,13 @@
#[test]
fn test_persistence_for_files() -> Result<()> {
let temp_dir = TempDir::new("persistent_db_test")?;
- let mut db = KeystoreDB::new(temp_dir.path())?;
+ let mut db = KeystoreDB::new(temp_dir.path(), None)?;
- db.create_key_entry(Domain::APP, 100)?;
+ db.create_key_entry(&Domain::APP, &100, &KEYSTORE_UUID)?;
let entries = get_keyentry(&db)?;
assert_eq!(entries.len(), 1);
- let db = KeystoreDB::new(temp_dir.path())?;
+ let db = KeystoreDB::new(temp_dir.path(), None)?;
let entries_new = get_keyentry(&db)?;
assert_eq!(entries, entries_new);
@@ -2009,31 +3204,31 @@
#[test]
fn test_create_key_entry() -> Result<()> {
- fn extractor(ke: &KeyEntryRow) -> (Domain, i64, Option<&str>) {
- (ke.domain.unwrap(), ke.namespace.unwrap(), ke.alias.as_deref())
+ fn extractor(ke: &KeyEntryRow) -> (Domain, i64, Option<&str>, Uuid) {
+ (ke.domain.unwrap(), ke.namespace.unwrap(), ke.alias.as_deref(), ke.km_uuid.unwrap())
}
let mut db = new_test_db()?;
- db.create_key_entry(Domain::APP, 100)?;
- db.create_key_entry(Domain::SELINUX, 101)?;
+ db.create_key_entry(&Domain::APP, &100, &KEYSTORE_UUID)?;
+ db.create_key_entry(&Domain::SELINUX, &101, &KEYSTORE_UUID)?;
let entries = get_keyentry(&db)?;
assert_eq!(entries.len(), 2);
- assert_eq!(extractor(&entries[0]), (Domain::APP, 100, None));
- assert_eq!(extractor(&entries[1]), (Domain::SELINUX, 101, None));
+ assert_eq!(extractor(&entries[0]), (Domain::APP, 100, None, KEYSTORE_UUID));
+ assert_eq!(extractor(&entries[1]), (Domain::SELINUX, 101, None, KEYSTORE_UUID));
// Test that we must pass in a valid Domain.
check_result_is_error_containing_string(
- db.create_key_entry(Domain::GRANT, 102),
+ db.create_key_entry(&Domain::GRANT, &102, &KEYSTORE_UUID),
"Domain Domain(1) must be either App or SELinux.",
);
check_result_is_error_containing_string(
- db.create_key_entry(Domain::BLOB, 103),
+ db.create_key_entry(&Domain::BLOB, &103, &KEYSTORE_UUID),
"Domain Domain(3) must be either App or SELinux.",
);
check_result_is_error_containing_string(
- db.create_key_entry(Domain::KEY_ID, 104),
+ db.create_key_entry(&Domain::KEY_ID, &104, &KEYSTORE_UUID),
"Domain Domain(4) must be either App or SELinux.",
);
@@ -2041,32 +3236,222 @@
}
#[test]
+ fn test_add_unsigned_key() -> Result<()> {
+ let mut db = new_test_db()?;
+ let public_key: Vec<u8> = vec![0x01, 0x02, 0x03];
+ let private_key: Vec<u8> = vec![0x04, 0x05, 0x06];
+ let raw_public_key: Vec<u8> = vec![0x07, 0x08, 0x09];
+ db.create_attestation_key_entry(
+ &public_key,
+ &raw_public_key,
+ &private_key,
+ &KEYSTORE_UUID,
+ )?;
+ let keys = db.fetch_unsigned_attestation_keys(5, &KEYSTORE_UUID)?;
+ assert_eq!(keys.len(), 1);
+ assert_eq!(keys[0], public_key);
+ Ok(())
+ }
+
+ #[test]
+ fn test_store_signed_attestation_certificate_chain() -> Result<()> {
+ let mut db = new_test_db()?;
+ let expiration_date: i64 = 20;
+ let namespace: i64 = 30;
+ let base_byte: u8 = 1;
+ let loaded_values =
+ load_attestation_key_pool(&mut db, expiration_date, namespace, base_byte)?;
+ let chain =
+ db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
+ assert_eq!(true, chain.is_some());
+ let cert_chain = chain.unwrap();
+ assert_eq!(cert_chain.private_key.to_vec(), loaded_values.priv_key);
+ assert_eq!(cert_chain.batch_cert, loaded_values.batch_cert);
+ assert_eq!(cert_chain.cert_chain, loaded_values.cert_chain);
+ Ok(())
+ }
+
+ #[test]
+ fn test_get_attestation_pool_status() -> Result<()> {
+ let mut db = new_test_db()?;
+ let namespace: i64 = 30;
+ load_attestation_key_pool(
+ &mut db, 10, /* expiration */
+ namespace, 0x01, /* base_byte */
+ )?;
+ load_attestation_key_pool(&mut db, 20 /* expiration */, namespace + 1, 0x02)?;
+ load_attestation_key_pool(&mut db, 40 /* expiration */, namespace + 2, 0x03)?;
+ let mut status = db.get_attestation_pool_status(9 /* expiration */, &KEYSTORE_UUID)?;
+ assert_eq!(status.expiring, 0);
+ assert_eq!(status.attested, 3);
+ assert_eq!(status.unassigned, 0);
+ assert_eq!(status.total, 3);
+ assert_eq!(
+ db.get_attestation_pool_status(15 /* expiration */, &KEYSTORE_UUID)?.expiring,
+ 1
+ );
+ assert_eq!(
+ db.get_attestation_pool_status(25 /* expiration */, &KEYSTORE_UUID)?.expiring,
+ 2
+ );
+ assert_eq!(
+ db.get_attestation_pool_status(60 /* expiration */, &KEYSTORE_UUID)?.expiring,
+ 3
+ );
+ let public_key: Vec<u8> = vec![0x01, 0x02, 0x03];
+ let private_key: Vec<u8> = vec![0x04, 0x05, 0x06];
+ let raw_public_key: Vec<u8> = vec![0x07, 0x08, 0x09];
+ let cert_chain: Vec<u8> = vec![0x0a, 0x0b, 0x0c];
+ let batch_cert: Vec<u8> = vec![0x0d, 0x0e, 0x0f];
+ db.create_attestation_key_entry(
+ &public_key,
+ &raw_public_key,
+ &private_key,
+ &KEYSTORE_UUID,
+ )?;
+ status = db.get_attestation_pool_status(0 /* expiration */, &KEYSTORE_UUID)?;
+ assert_eq!(status.attested, 3);
+ assert_eq!(status.unassigned, 0);
+ assert_eq!(status.total, 4);
+ db.store_signed_attestation_certificate_chain(
+ &raw_public_key,
+ &batch_cert,
+ &cert_chain,
+ 20,
+ &KEYSTORE_UUID,
+ )?;
+ status = db.get_attestation_pool_status(0 /* expiration */, &KEYSTORE_UUID)?;
+ assert_eq!(status.attested, 4);
+ assert_eq!(status.unassigned, 1);
+ assert_eq!(status.total, 4);
+ Ok(())
+ }
+
+ #[test]
+ fn test_remove_expired_certs() -> Result<()> {
+ let temp_dir =
+ TempDir::new("test_remove_expired_certs_").expect("Failed to create temp dir.");
+ let mut db = new_test_db_with_gc(temp_dir.path(), |_, _| Ok(()))?;
+ let expiration_date: i64 =
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64 + 10000;
+ let namespace: i64 = 30;
+ let namespace_del1: i64 = 45;
+ let namespace_del2: i64 = 60;
+ let entry_values = load_attestation_key_pool(
+ &mut db,
+ expiration_date,
+ namespace,
+ 0x01, /* base_byte */
+ )?;
+ load_attestation_key_pool(&mut db, 45, namespace_del1, 0x02)?;
+ load_attestation_key_pool(&mut db, 60, namespace_del2, 0x03)?;
+
+ let blob_entry_row_count: u32 = db
+ .conn
+ .query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
+ .expect("Failed to get blob entry row count.");
+ // We expect 9 rows here because there are three blobs per attestation key, i.e.,
+ // one key, one certificate chain, and one certificate.
+ assert_eq!(blob_entry_row_count, 9);
+
+ assert_eq!(db.delete_expired_attestation_keys()?, 2);
+
+ let mut cert_chain =
+ db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
+ assert!(cert_chain.is_some());
+ let value = cert_chain.unwrap();
+ assert_eq!(entry_values.batch_cert, value.batch_cert);
+ assert_eq!(entry_values.cert_chain, value.cert_chain);
+ assert_eq!(entry_values.priv_key, value.private_key.to_vec());
+
+ cert_chain = db.retrieve_attestation_key_and_cert_chain(
+ Domain::APP,
+ namespace_del1,
+ &KEYSTORE_UUID,
+ )?;
+ assert!(!cert_chain.is_some());
+ cert_chain = db.retrieve_attestation_key_and_cert_chain(
+ Domain::APP,
+ namespace_del2,
+ &KEYSTORE_UUID,
+ )?;
+ assert!(!cert_chain.is_some());
+
+ // Give the garbage collector half a second to catch up.
+ std::thread::sleep(Duration::from_millis(500));
+
+ let blob_entry_row_count: u32 = db
+ .conn
+ .query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
+ .expect("Failed to get blob entry row count.");
+ // There shound be 3 blob entries left, because we deleted two of the attestation
+ // key entries with three blobs each.
+ assert_eq!(blob_entry_row_count, 3);
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_delete_all_attestation_keys() -> Result<()> {
+ let mut db = new_test_db()?;
+ load_attestation_key_pool(&mut db, 45 /* expiration */, 1 /* namespace */, 0x02)?;
+ load_attestation_key_pool(&mut db, 80 /* expiration */, 2 /* namespace */, 0x03)?;
+ db.create_key_entry(&Domain::APP, &42, &KEYSTORE_UUID)?;
+ let result = db.delete_all_attestation_keys()?;
+
+ // Give the garbage collector half a second to catch up.
+ std::thread::sleep(Duration::from_millis(500));
+
+ // Attestation keys should be deleted, and the regular key should remain.
+ assert_eq!(result, 2);
+
+ Ok(())
+ }
+
+ #[test]
fn test_rebind_alias() -> Result<()> {
- fn extractor(ke: &KeyEntryRow) -> (Option<Domain>, Option<i64>, Option<&str>) {
- (ke.domain, ke.namespace, ke.alias.as_deref())
+ fn extractor(
+ ke: &KeyEntryRow,
+ ) -> (Option<Domain>, Option<i64>, Option<&str>, Option<Uuid>) {
+ (ke.domain, ke.namespace, ke.alias.as_deref(), ke.km_uuid)
}
let mut db = new_test_db()?;
- db.create_key_entry(Domain::APP, 42)?;
- db.create_key_entry(Domain::APP, 42)?;
+ db.create_key_entry(&Domain::APP, &42, &KEYSTORE_UUID)?;
+ db.create_key_entry(&Domain::APP, &42, &KEYSTORE_UUID)?;
let entries = get_keyentry(&db)?;
assert_eq!(entries.len(), 2);
- assert_eq!(extractor(&entries[0]), (Some(Domain::APP), Some(42), None));
- assert_eq!(extractor(&entries[1]), (Some(Domain::APP), Some(42), None));
+ assert_eq!(
+ extractor(&entries[0]),
+ (Some(Domain::APP), Some(42), None, Some(KEYSTORE_UUID))
+ );
+ assert_eq!(
+ extractor(&entries[1]),
+ (Some(Domain::APP), Some(42), None, Some(KEYSTORE_UUID))
+ );
// Test that the first call to rebind_alias sets the alias.
rebind_alias(&mut db, &KEY_ID_LOCK.get(entries[0].id), "foo", Domain::APP, 42)?;
let entries = get_keyentry(&db)?;
assert_eq!(entries.len(), 2);
- assert_eq!(extractor(&entries[0]), (Some(Domain::APP), Some(42), Some("foo")));
- assert_eq!(extractor(&entries[1]), (Some(Domain::APP), Some(42), None));
+ assert_eq!(
+ extractor(&entries[0]),
+ (Some(Domain::APP), Some(42), Some("foo"), Some(KEYSTORE_UUID))
+ );
+ assert_eq!(
+ extractor(&entries[1]),
+ (Some(Domain::APP), Some(42), None, Some(KEYSTORE_UUID))
+ );
// Test that the second call to rebind_alias also empties the old one.
rebind_alias(&mut db, &KEY_ID_LOCK.get(entries[1].id), "foo", Domain::APP, 42)?;
let entries = get_keyentry(&db)?;
assert_eq!(entries.len(), 2);
- assert_eq!(extractor(&entries[0]), (None, None, None));
- assert_eq!(extractor(&entries[1]), (Some(Domain::APP), Some(42), Some("foo")));
+ assert_eq!(extractor(&entries[0]), (None, None, None, Some(KEYSTORE_UUID)));
+ assert_eq!(
+ extractor(&entries[1]),
+ (Some(Domain::APP), Some(42), Some("foo"), Some(KEYSTORE_UUID))
+ );
// Test that we must pass in a valid Domain.
check_result_is_error_containing_string(
@@ -2090,8 +3475,11 @@
// Test that we correctly abort the transaction in this case.
let entries = get_keyentry(&db)?;
assert_eq!(entries.len(), 2);
- assert_eq!(extractor(&entries[0]), (None, None, None));
- assert_eq!(extractor(&entries[1]), (Some(Domain::APP), Some(42), Some("foo")));
+ assert_eq!(extractor(&entries[0]), (None, None, None, Some(KEYSTORE_UUID)));
+ assert_eq!(
+ extractor(&entries[1]),
+ (Some(Domain::APP), Some(42), Some("foo"), Some(KEYSTORE_UUID))
+ );
Ok(())
}
@@ -2104,9 +3492,9 @@
let mut db = new_test_db()?;
db.conn.execute(
- "INSERT INTO persistent.keyentry (id, key_type, domain, namespace, alias, state)
- VALUES (1, 0, 0, 15, 'key', 1), (2, 0, 2, 7, 'yek', 1);",
- NO_PARAMS,
+ "INSERT INTO persistent.keyentry (id, key_type, domain, namespace, alias, state, km_uuid)
+ VALUES (1, 0, 0, 15, 'key', 1, ?), (2, 0, 2, 7, 'yek', 1, ?);",
+ params![KEYSTORE_UUID, KEYSTORE_UUID],
)?;
let app_key = KeyDescriptor {
domain: super::Domain::APP,
@@ -2123,7 +3511,7 @@
let next_random = 0i64;
let app_granted_key = db
- .grant(app_key.clone(), CALLER_UID, GRANTEE_UID, PVEC1, |k, a| {
+ .grant(&app_key, CALLER_UID, GRANTEE_UID, PVEC1, |k, a| {
assert_eq!(*a, PVEC1);
assert_eq!(
*k,
@@ -2158,7 +3546,7 @@
};
let selinux_granted_key = db
- .grant(selinux_key.clone(), CALLER_UID, 12, PVEC1, |k, a| {
+ .grant(&selinux_key, CALLER_UID, 12, PVEC1, |k, a| {
assert_eq!(*a, PVEC1);
assert_eq!(
*k,
@@ -2188,7 +3576,7 @@
// This should update the existing grant with PVEC2.
let selinux_granted_key = db
- .grant(selinux_key.clone(), CALLER_UID, 12, PVEC2, |k, a| {
+ .grant(&selinux_key, CALLER_UID, 12, PVEC2, |k, a| {
assert_eq!(*a, PVEC2);
assert_eq!(
*k,
@@ -2242,8 +3630,8 @@
println!("app_key {:?}", app_key);
println!("selinux_key {:?}", selinux_key);
- db.ungrant(app_key, CALLER_UID, GRANTEE_UID, |_| Ok(()))?;
- db.ungrant(selinux_key, CALLER_UID, GRANTEE_UID, |_| Ok(()))?;
+ db.ungrant(&app_key, CALLER_UID, GRANTEE_UID, |_| Ok(()))?;
+ db.ungrant(&selinux_key, CALLER_UID, GRANTEE_UID, |_| Ok(()))?;
Ok(())
}
@@ -2253,29 +3641,46 @@
static TEST_CERT_CHAIN_BLOB: &[u8] = b"my test cert_chain";
#[test]
- fn test_insert_blob() -> Result<()> {
+ fn test_set_blob() -> Result<()> {
let key_id = KEY_ID_LOCK.get(3000);
let mut db = new_test_db()?;
- db.insert_blob(&key_id, SubComponentType::KEY_BLOB, TEST_KEY_BLOB)?;
- db.insert_blob(&key_id, SubComponentType::CERT, TEST_CERT_BLOB)?;
- db.insert_blob(&key_id, SubComponentType::CERT_CHAIN, TEST_CERT_CHAIN_BLOB)?;
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::KmUuid(KEYSTORE_UUID));
+ db.set_blob(
+ &key_id,
+ SubComponentType::KEY_BLOB,
+ Some(TEST_KEY_BLOB),
+ Some(&blob_metadata),
+ )?;
+ db.set_blob(&key_id, SubComponentType::CERT, Some(TEST_CERT_BLOB), None)?;
+ db.set_blob(&key_id, SubComponentType::CERT_CHAIN, Some(TEST_CERT_CHAIN_BLOB), None)?;
drop(key_id);
let mut stmt = db.conn.prepare(
- "SELECT subcomponent_type, keyentryid, blob FROM persistent.blobentry
+ "SELECT subcomponent_type, keyentryid, blob, id FROM persistent.blobentry
ORDER BY subcomponent_type ASC;",
)?;
let mut rows = stmt
- .query_map::<(SubComponentType, i64, Vec<u8>), _, _>(NO_PARAMS, |row| {
- Ok((row.get(0)?, row.get(1)?, row.get(2)?))
+ .query_map::<((SubComponentType, i64, Vec<u8>), i64), _, _>(NO_PARAMS, |row| {
+ Ok(((row.get(0)?, row.get(1)?, row.get(2)?), row.get(3)?))
})?;
- let r = rows.next().unwrap().unwrap();
+ let (r, id) = rows.next().unwrap().unwrap();
assert_eq!(r, (SubComponentType::KEY_BLOB, 3000, TEST_KEY_BLOB.to_vec()));
- let r = rows.next().unwrap().unwrap();
+ let (r, _) = rows.next().unwrap().unwrap();
assert_eq!(r, (SubComponentType::CERT, 3000, TEST_CERT_BLOB.to_vec()));
- let r = rows.next().unwrap().unwrap();
+ let (r, _) = rows.next().unwrap().unwrap();
assert_eq!(r, (SubComponentType::CERT_CHAIN, 3000, TEST_CERT_CHAIN_BLOB.to_vec()));
+ drop(rows);
+ drop(stmt);
+
+ assert_eq!(
+ db.with_transaction(TransactionBehavior::Immediate, |tx| {
+ BlobMetaData::load_from_db(id, tx).no_gc()
+ })
+ .expect("Should find blob metadata."),
+ blob_metadata
+ );
Ok(())
}
@@ -2289,7 +3694,7 @@
.0;
let (_key_guard, key_entry) = db
.load_key_entry(
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::APP,
nspace: 0,
alias: Some(TEST_ALIAS.to_string()),
@@ -2304,7 +3709,7 @@
assert_eq!(key_entry, make_test_key_entry_test_vector(key_id, None));
db.unbind_key(
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::APP,
nspace: 0,
alias: Some(TEST_ALIAS.to_string()),
@@ -2319,7 +3724,7 @@
assert_eq!(
Some(&KsError::Rc(ResponseCode::KEY_NOT_FOUND)),
db.load_key_entry(
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::APP,
nspace: 0,
alias: Some(TEST_ALIAS.to_string()),
@@ -2339,6 +3744,76 @@
}
#[test]
+ fn test_insert_and_load_certificate_entry_domain_app() -> Result<()> {
+ let mut db = new_test_db()?;
+
+ db.store_new_certificate(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: 1,
+ alias: Some(TEST_ALIAS.to_string()),
+ blob: None,
+ },
+ TEST_CERT_BLOB,
+ &KEYSTORE_UUID,
+ )
+ .expect("Trying to insert cert.");
+
+ let (_key_guard, mut key_entry) = db
+ .load_key_entry(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: 1,
+ alias: Some(TEST_ALIAS.to_string()),
+ blob: None,
+ },
+ KeyType::Client,
+ KeyEntryLoadBits::PUBLIC,
+ 1,
+ |_k, _av| Ok(()),
+ )
+ .expect("Trying to read certificate entry.");
+
+ assert!(key_entry.pure_cert());
+ assert!(key_entry.cert().is_none());
+ assert_eq!(key_entry.take_cert_chain(), Some(TEST_CERT_BLOB.to_vec()));
+
+ db.unbind_key(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: 1,
+ alias: Some(TEST_ALIAS.to_string()),
+ blob: None,
+ },
+ KeyType::Client,
+ 1,
+ |_, _| Ok(()),
+ )
+ .unwrap();
+
+ assert_eq!(
+ Some(&KsError::Rc(ResponseCode::KEY_NOT_FOUND)),
+ db.load_key_entry(
+ &KeyDescriptor {
+ domain: Domain::APP,
+ nspace: 1,
+ alias: Some(TEST_ALIAS.to_string()),
+ blob: None,
+ },
+ KeyType::Client,
+ KeyEntryLoadBits::NONE,
+ 1,
+ |_k, _av| Ok(()),
+ )
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<KsError>()
+ );
+
+ Ok(())
+ }
+
+ #[test]
fn test_insert_and_load_full_keyentry_domain_selinux() -> Result<()> {
let mut db = new_test_db()?;
let key_id = make_test_key_entry(&mut db, Domain::SELINUX, 1, TEST_ALIAS, None)
@@ -2346,7 +3821,7 @@
.0;
let (_key_guard, key_entry) = db
.load_key_entry(
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::SELINUX,
nspace: 1,
alias: Some(TEST_ALIAS.to_string()),
@@ -2361,7 +3836,7 @@
assert_eq!(key_entry, make_test_key_entry_test_vector(key_id, None));
db.unbind_key(
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::SELINUX,
nspace: 1,
alias: Some(TEST_ALIAS.to_string()),
@@ -2376,7 +3851,7 @@
assert_eq!(
Some(&KsError::Rc(ResponseCode::KEY_NOT_FOUND)),
db.load_key_entry(
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::SELINUX,
nspace: 1,
alias: Some(TEST_ALIAS.to_string()),
@@ -2403,7 +3878,7 @@
.0;
let (_, key_entry) = db
.load_key_entry(
- KeyDescriptor { domain: Domain::KEY_ID, nspace: key_id, alias: None, blob: None },
+ &KeyDescriptor { domain: Domain::KEY_ID, nspace: key_id, alias: None, blob: None },
KeyType::Client,
KeyEntryLoadBits::BOTH,
1,
@@ -2414,7 +3889,7 @@
assert_eq!(key_entry, make_test_key_entry_test_vector(key_id, None));
db.unbind_key(
- KeyDescriptor { domain: Domain::KEY_ID, nspace: key_id, alias: None, blob: None },
+ &KeyDescriptor { domain: Domain::KEY_ID, nspace: key_id, alias: None, blob: None },
KeyType::Client,
1,
|_, _| Ok(()),
@@ -2424,7 +3899,7 @@
assert_eq!(
Some(&KsError::Rc(ResponseCode::KEY_NOT_FOUND)),
db.load_key_entry(
- KeyDescriptor { domain: Domain::KEY_ID, nspace: key_id, alias: None, blob: None },
+ &KeyDescriptor { domain: Domain::KEY_ID, nspace: key_id, alias: None, blob: None },
KeyType::Client,
KeyEntryLoadBits::NONE,
1,
@@ -2448,7 +3923,7 @@
db.check_and_update_key_usage_count(key_id)?;
let (_key_guard, key_entry) = db.load_key_entry(
- KeyDescriptor { domain: Domain::KEY_ID, nspace: key_id, alias: None, blob: None },
+ &KeyDescriptor { domain: Domain::KEY_ID, nspace: key_id, alias: None, blob: None },
KeyType::Client,
KeyEntryLoadBits::BOTH,
1,
@@ -2495,7 +3970,7 @@
let granted_key = db
.grant(
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::APP,
nspace: 0,
alias: Some(TEST_ALIAS.to_string()),
@@ -2511,27 +3986,21 @@
debug_dump_grant_table(&mut db)?;
let (_key_guard, key_entry) = db
- .load_key_entry(
- granted_key.clone(),
- KeyType::Client,
- KeyEntryLoadBits::BOTH,
- 2,
- |k, av| {
- assert_eq!(Domain::GRANT, k.domain);
- assert!(av.unwrap().includes(KeyPerm::use_()));
- Ok(())
- },
- )
+ .load_key_entry(&granted_key, KeyType::Client, KeyEntryLoadBits::BOTH, 2, |k, av| {
+ assert_eq!(Domain::GRANT, k.domain);
+ assert!(av.unwrap().includes(KeyPerm::use_()));
+ Ok(())
+ })
.unwrap();
assert_eq!(key_entry, make_test_key_entry_test_vector(key_id, None));
- db.unbind_key(granted_key.clone(), KeyType::Client, 2, |_, _| Ok(())).unwrap();
+ db.unbind_key(&granted_key, KeyType::Client, 2, |_, _| Ok(())).unwrap();
assert_eq!(
Some(&KsError::Rc(ResponseCode::KEY_NOT_FOUND)),
db.load_key_entry(
- granted_key,
+ &granted_key,
KeyType::Client,
KeyEntryLoadBits::NONE,
2,
@@ -2558,7 +4027,7 @@
.0;
db.grant(
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::APP,
nspace: 0,
alias: Some(TEST_ALIAS.to_string()),
@@ -2578,7 +4047,7 @@
let (_, key_entry) = db
.load_key_entry(
- id_descriptor.clone(),
+ &id_descriptor,
KeyType::Client,
KeyEntryLoadBits::BOTH,
GRANTEE_UID,
@@ -2595,7 +4064,7 @@
let (_, key_entry) = db
.load_key_entry(
- id_descriptor.clone(),
+ &id_descriptor,
KeyType::Client,
KeyEntryLoadBits::BOTH,
SOMEONE_ELSE_UID,
@@ -2610,12 +4079,12 @@
assert_eq!(key_entry, make_test_key_entry_test_vector(key_id, None));
- db.unbind_key(id_descriptor.clone(), KeyType::Client, OWNER_UID, |_, _| Ok(())).unwrap();
+ db.unbind_key(&id_descriptor, KeyType::Client, OWNER_UID, |_, _| Ok(())).unwrap();
assert_eq!(
Some(&KsError::Rc(ResponseCode::KEY_NOT_FOUND)),
db.load_key_entry(
- id_descriptor,
+ &id_descriptor,
KeyType::Client,
KeyEntryLoadBits::NONE,
GRANTEE_UID,
@@ -2636,13 +4105,13 @@
let handle = {
let temp_dir = Arc::new(TempDir::new("id_lock_test")?);
let temp_dir_clone = temp_dir.clone();
- let mut db = KeystoreDB::new(temp_dir.path())?;
+ let mut db = KeystoreDB::new(temp_dir.path(), None)?;
let key_id = make_test_key_entry(&mut db, Domain::APP, 33, KEY_LOCK_TEST_ALIAS, None)
.context("test_insert_and_load_full_keyentry_domain_app")?
.0;
let (_key_guard, key_entry) = db
.load_key_entry(
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::APP,
nspace: 0,
alias: Some(KEY_LOCK_TEST_ALIAS.to_string()),
@@ -2667,10 +4136,10 @@
// the primary thread.
let handle = thread::spawn(move || {
let temp_dir = temp_dir_clone;
- let mut db = KeystoreDB::new(temp_dir.path()).unwrap();
+ let mut db = KeystoreDB::new(temp_dir.path(), None).unwrap();
assert!(db
.load_key_entry(
- KeyDescriptor {
+ &KeyDescriptor {
domain: Domain::APP,
nspace: 0,
alias: Some(KEY_LOCK_TEST_ALIAS.to_string()),
@@ -2706,9 +4175,172 @@
}
#[test]
+ fn teset_database_busy_error_code() {
+ let temp_dir =
+ TempDir::new("test_database_busy_error_code_").expect("Failed to create temp dir.");
+
+ let mut db1 = KeystoreDB::new(temp_dir.path(), None).expect("Failed to open database1.");
+ let mut db2 = KeystoreDB::new(temp_dir.path(), None).expect("Failed to open database2.");
+
+ let _tx1 = db1
+ .conn
+ .transaction_with_behavior(TransactionBehavior::Immediate)
+ .expect("Failed to create first transaction.");
+
+ let error = db2
+ .conn
+ .transaction_with_behavior(TransactionBehavior::Immediate)
+ .context("Transaction begin failed.")
+ .expect_err("This should fail.");
+ let root_cause = error.root_cause();
+ if let Some(rusqlite::ffi::Error { code: rusqlite::ErrorCode::DatabaseBusy, .. }) =
+ root_cause.downcast_ref::<rusqlite::ffi::Error>()
+ {
+ return;
+ }
+ panic!(
+ "Unexpected error {:?} \n{:?} \n{:?}",
+ error,
+ root_cause,
+ root_cause.downcast_ref::<rusqlite::ffi::Error>()
+ )
+ }
+
+ #[cfg(disabled)]
+ #[test]
+ fn test_large_number_of_concurrent_db_manipulations() -> Result<()> {
+ let temp_dir = Arc::new(
+ TempDir::new("test_large_number_of_concurrent_db_manipulations_")
+ .expect("Failed to create temp dir."),
+ );
+
+ let test_begin = Instant::now();
+
+ let mut db = KeystoreDB::new(temp_dir.path()).expect("Failed to open database.");
+ const KEY_COUNT: u32 = 500u32;
+ const OPEN_DB_COUNT: u32 = 50u32;
+
+ let mut actual_key_count = KEY_COUNT;
+ // First insert KEY_COUNT keys.
+ for count in 0..KEY_COUNT {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(15) {
+ actual_key_count = count;
+ break;
+ }
+ let alias = format!("test_alias_{}", count);
+ make_test_key_entry(&mut db, Domain::APP, 1, &alias, None)
+ .expect("Failed to make key entry.");
+ }
+
+ // Insert more keys from a different thread and into a different namespace.
+ let temp_dir1 = temp_dir.clone();
+ let handle1 = thread::spawn(move || {
+ let mut db = KeystoreDB::new(temp_dir1.path()).expect("Failed to open database.");
+
+ for count in 0..actual_key_count {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let alias = format!("test_alias_{}", count);
+ make_test_key_entry(&mut db, Domain::APP, 2, &alias, None)
+ .expect("Failed to make key entry.");
+ }
+
+ // then unbind them again.
+ for count in 0..actual_key_count {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let key = KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(format!("test_alias_{}", count)),
+ blob: None,
+ };
+ db.unbind_key(&key, KeyType::Client, 2, |_, _| Ok(())).expect("Unbind Failed.");
+ }
+ });
+
+ // And start unbinding the first set of keys.
+ let temp_dir2 = temp_dir.clone();
+ let handle2 = thread::spawn(move || {
+ let mut db = KeystoreDB::new(temp_dir2.path()).expect("Failed to open database.");
+
+ for count in 0..actual_key_count {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let key = KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(format!("test_alias_{}", count)),
+ blob: None,
+ };
+ db.unbind_key(&key, KeyType::Client, 1, |_, _| Ok(())).expect("Unbind Failed.");
+ }
+ });
+
+ let stop_deleting = Arc::new(AtomicU8::new(0));
+ let stop_deleting2 = stop_deleting.clone();
+
+ // And delete anything that is unreferenced keys.
+ let temp_dir3 = temp_dir.clone();
+ let handle3 = thread::spawn(move || {
+ let mut db = KeystoreDB::new(temp_dir3.path()).expect("Failed to open database.");
+
+ while stop_deleting2.load(Ordering::Relaxed) != 1 {
+ while let Some((key_guard, _key)) =
+ db.get_unreferenced_key().expect("Failed to get unreferenced Key.")
+ {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ db.purge_key_entry(key_guard).expect("Failed to purge key.");
+ }
+ std::thread::sleep(std::time::Duration::from_millis(100));
+ }
+ });
+
+ // While a lot of inserting and deleting is going on we have to open database connections
+ // successfully and use them.
+ // This clone is not redundant, because temp_dir needs to be kept alive until db goes
+ // out of scope.
+ #[allow(clippy::redundant_clone)]
+ let temp_dir4 = temp_dir.clone();
+ let handle4 = thread::spawn(move || {
+ for count in 0..OPEN_DB_COUNT {
+ if Instant::now().duration_since(test_begin) >= Duration::from_secs(40) {
+ return;
+ }
+ let mut db = KeystoreDB::new(temp_dir4.path()).expect("Failed to open database.");
+
+ let alias = format!("test_alias_{}", count);
+ make_test_key_entry(&mut db, Domain::APP, 3, &alias, None)
+ .expect("Failed to make key entry.");
+ let key = KeyDescriptor {
+ domain: Domain::APP,
+ nspace: -1,
+ alias: Some(alias),
+ blob: None,
+ };
+ db.unbind_key(&key, KeyType::Client, 3, |_, _| Ok(())).expect("Unbind Failed.");
+ }
+ });
+
+ handle1.join().expect("Thread 1 panicked.");
+ handle2.join().expect("Thread 2 panicked.");
+ handle4.join().expect("Thread 4 panicked.");
+
+ stop_deleting.store(1, Ordering::Relaxed);
+ handle3.join().expect("Thread 3 panicked.");
+
+ Ok(())
+ }
+
+ #[test]
fn list() -> Result<()> {
let temp_dir = TempDir::new("list_test")?;
- let mut db = KeystoreDB::new(temp_dir.path())?;
+ let mut db = KeystoreDB::new(temp_dir.path(), None)?;
static LIST_O_ENTRIES: &[(Domain, i64, &str)] = &[
(Domain::APP, 1, "test1"),
(Domain::APP, 1, "test2"),
@@ -2769,7 +4401,7 @@
.map(|d| {
let (_, entry) = db
.load_key_entry(
- d,
+ &d,
KeyType::Client,
KeyEntryLoadBits::NONE,
*namespace as u32,
@@ -2812,7 +4444,6 @@
}
#[derive(Debug, PartialEq)]
- #[allow(dead_code)]
struct KeyEntryRow {
id: i64,
key_type: KeyType,
@@ -2820,6 +4451,7 @@
namespace: Option<i64>,
alias: Option<String>,
state: KeyLifeCycle,
+ km_uuid: Option<Uuid>,
}
fn get_keyentry(db: &KeystoreDB) -> Result<Vec<KeyEntryRow>> {
@@ -2836,12 +4468,42 @@
namespace: row.get(3)?,
alias: row.get(4)?,
state: row.get(5)?,
+ km_uuid: row.get(6)?,
})
})?
.map(|r| r.context("Could not read keyentry row."))
.collect::<Result<Vec<_>>>()
}
+ struct RemoteProvValues {
+ cert_chain: Vec<u8>,
+ priv_key: Vec<u8>,
+ batch_cert: Vec<u8>,
+ }
+
+ fn load_attestation_key_pool(
+ db: &mut KeystoreDB,
+ expiration_date: i64,
+ namespace: i64,
+ base_byte: u8,
+ ) -> Result<RemoteProvValues> {
+ let public_key: Vec<u8> = vec![base_byte, 0x02 * base_byte];
+ let cert_chain: Vec<u8> = vec![0x03 * base_byte, 0x04 * base_byte];
+ let priv_key: Vec<u8> = vec![0x05 * base_byte, 0x06 * base_byte];
+ let raw_public_key: Vec<u8> = vec![0x0b * base_byte, 0x0c * base_byte];
+ let batch_cert: Vec<u8> = vec![base_byte * 0x0d, base_byte * 0x0e];
+ db.create_attestation_key_entry(&public_key, &raw_public_key, &priv_key, &KEYSTORE_UUID)?;
+ db.store_signed_attestation_certificate_chain(
+ &raw_public_key,
+ &batch_cert,
+ &cert_chain,
+ expiration_date,
+ &KEYSTORE_UUID,
+ )?;
+ db.assign_attestation_key(Domain::APP, namespace, &KEYSTORE_UUID)?;
+ Ok(RemoteProvValues { cert_chain, priv_key, batch_cert })
+ }
+
// Note: The parameters and SecurityLevel associations are nonsensical. This
// collection is only used to check if the parameters are preserved as expected by the
// database.
@@ -3078,19 +4740,28 @@
alias: &str,
max_usage_count: Option<i32>,
) -> Result<KeyIdGuard> {
- let key_id = db.create_key_entry(domain, namespace)?;
- db.insert_blob(&key_id, SubComponentType::KEY_BLOB, TEST_KEY_BLOB)?;
- db.insert_blob(&key_id, SubComponentType::CERT, TEST_CERT_BLOB)?;
- db.insert_blob(&key_id, SubComponentType::CERT_CHAIN, TEST_CERT_CHAIN_BLOB)?;
+ let key_id = db.create_key_entry(&domain, &namespace, &KEYSTORE_UUID)?;
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
+ blob_metadata.add(BlobMetaEntry::Salt(vec![1, 2, 3]));
+ blob_metadata.add(BlobMetaEntry::Iv(vec![2, 3, 1]));
+ blob_metadata.add(BlobMetaEntry::AeadTag(vec![3, 1, 2]));
+ blob_metadata.add(BlobMetaEntry::KmUuid(KEYSTORE_UUID));
+
+ db.set_blob(
+ &key_id,
+ SubComponentType::KEY_BLOB,
+ Some(TEST_KEY_BLOB),
+ Some(&blob_metadata),
+ )?;
+ db.set_blob(&key_id, SubComponentType::CERT, Some(TEST_CERT_BLOB), None)?;
+ db.set_blob(&key_id, SubComponentType::CERT_CHAIN, Some(TEST_CERT_CHAIN_BLOB), None)?;
let params = make_test_params(max_usage_count);
db.insert_keyparameter(&key_id, ¶ms)?;
let mut metadata = KeyMetaData::new();
- metadata.add(KeyMetaEntry::EncryptedBy(EncryptedBy::Password));
- metadata.add(KeyMetaEntry::Salt(vec![1, 2, 3]));
- metadata.add(KeyMetaEntry::Iv(vec![2, 3, 1]));
- metadata.add(KeyMetaEntry::AeadTag(vec![3, 1, 2]));
+ metadata.add(KeyMetaEntry::CreationDate(DateTime::from_millis_epoch(123456789)));
db.insert_key_metadata(&key_id, &metadata)?;
rebind_alias(db, &key_id, alias, domain, namespace)?;
Ok(key_id)
@@ -3099,40 +4770,53 @@
fn make_test_key_entry_test_vector(key_id: i64, max_usage_count: Option<i32>) -> KeyEntry {
let params = make_test_params(max_usage_count);
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
+ blob_metadata.add(BlobMetaEntry::Salt(vec![1, 2, 3]));
+ blob_metadata.add(BlobMetaEntry::Iv(vec![2, 3, 1]));
+ blob_metadata.add(BlobMetaEntry::AeadTag(vec![3, 1, 2]));
+ blob_metadata.add(BlobMetaEntry::KmUuid(KEYSTORE_UUID));
+
let mut metadata = KeyMetaData::new();
- metadata.add(KeyMetaEntry::EncryptedBy(EncryptedBy::Password));
- metadata.add(KeyMetaEntry::Salt(vec![1, 2, 3]));
- metadata.add(KeyMetaEntry::Iv(vec![2, 3, 1]));
- metadata.add(KeyMetaEntry::AeadTag(vec![3, 1, 2]));
+ metadata.add(KeyMetaEntry::CreationDate(DateTime::from_millis_epoch(123456789)));
KeyEntry {
id: key_id,
- km_blob: Some(TEST_KEY_BLOB.to_vec()),
+ key_blob_info: Some((TEST_KEY_BLOB.to_vec(), blob_metadata)),
cert: Some(TEST_CERT_BLOB.to_vec()),
cert_chain: Some(TEST_CERT_CHAIN_BLOB.to_vec()),
- sec_level: SecurityLevel::TRUSTED_ENVIRONMENT,
+ km_uuid: KEYSTORE_UUID,
parameters: params,
metadata,
+ pure_cert: false,
}
}
fn debug_dump_keyentry_table(db: &mut KeystoreDB) -> Result<()> {
let mut stmt = db.conn.prepare(
- "SELECT id, key_type, domain, namespace, alias, state FROM persistent.keyentry;",
+ "SELECT id, key_type, domain, namespace, alias, state, km_uuid FROM persistent.keyentry;",
)?;
- let rows = stmt.query_map::<(i64, KeyType, i32, i64, String, KeyLifeCycle), _, _>(
+ let rows = stmt.query_map::<(i64, KeyType, i32, i64, String, KeyLifeCycle, Uuid), _, _>(
NO_PARAMS,
|row| {
- Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?, row.get(4)?, row.get(5)?))
+ Ok((
+ row.get(0)?,
+ row.get(1)?,
+ row.get(2)?,
+ row.get(3)?,
+ row.get(4)?,
+ row.get(5)?,
+ row.get(6)?,
+ ))
},
)?;
println!("Key entry table rows:");
for r in rows {
- let (id, key_type, domain, namespace, alias, state) = r.unwrap();
+ let (id, key_type, domain, namespace, alias, state, km_uuid) = r.unwrap();
println!(
- " id: {} KeyType: {:?} Domain: {} Namespace: {} Alias: {} State: {:?}",
- id, key_type, domain, namespace, alias, state
+ " id: {} KeyType: {:?} Domain: {} Namespace: {} Alias: {} State: {:?} KmUuid: {:?}",
+ id, key_type, domain, namespace, alias, state, km_uuid
);
}
Ok(())
@@ -3191,4 +4875,58 @@
assert!(last_off_body_1.seconds() < last_off_body_2.seconds());
Ok(())
}
+
+ #[test]
+ fn test_unbind_keys_for_user() -> Result<()> {
+ let mut db = new_test_db()?;
+ db.unbind_keys_for_user(1, false)?;
+
+ make_test_key_entry(&mut db, Domain::APP, 210000, TEST_ALIAS, None)?;
+ make_test_key_entry(&mut db, Domain::APP, 110000, TEST_ALIAS, None)?;
+ db.unbind_keys_for_user(2, false)?;
+
+ assert_eq!(1, db.list(Domain::APP, 110000)?.len());
+ assert_eq!(0, db.list(Domain::APP, 210000)?.len());
+
+ db.unbind_keys_for_user(1, true)?;
+ assert_eq!(0, db.list(Domain::APP, 110000)?.len());
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_store_super_key() -> Result<()> {
+ let mut db = new_test_db()?;
+ let pw: keystore2_crypto::Password = (&b"xyzabc"[..]).into();
+ let super_key = keystore2_crypto::generate_aes256_key()?;
+ let secret_bytes = b"keystore2 is great.";
+ let (encrypted_secret, iv, tag) =
+ keystore2_crypto::aes_gcm_encrypt(secret_bytes, &super_key)?;
+
+ let (encrypted_super_key, metadata) =
+ SuperKeyManager::encrypt_with_password(&super_key, &pw)?;
+ db.store_super_key(
+ 1,
+ &USER_SUPER_KEY,
+ &encrypted_super_key,
+ &metadata,
+ &KeyMetaData::new(),
+ )?;
+
+ //check if super key exists
+ assert!(db.key_exists(Domain::APP, 1, &USER_SUPER_KEY.alias, KeyType::Super)?);
+
+ let (_, key_entry) = db.load_super_key(&USER_SUPER_KEY, 1)?.unwrap();
+ let loaded_super_key = SuperKeyManager::extract_super_key_from_key_entry(
+ USER_SUPER_KEY.algorithm,
+ key_entry,
+ &pw,
+ None,
+ )?;
+
+ let decrypted_secret_bytes =
+ loaded_super_key.aes_gcm_decrypt(&encrypted_secret, &iv, &tag)?;
+ assert_eq!(secret_bytes, &*decrypted_secret_bytes);
+ Ok(())
+ }
}
diff --git a/keystore2/src/ec_crypto.rs b/keystore2/src/ec_crypto.rs
new file mode 100644
index 0000000..0425d4a
--- /dev/null
+++ b/keystore2/src/ec_crypto.rs
@@ -0,0 +1,137 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Implement ECDH-based encryption.
+
+use anyhow::{Context, Result};
+use keystore2_crypto::{
+ aes_gcm_decrypt, aes_gcm_encrypt, ec_key_generate_key, ec_key_get0_public_key,
+ ec_key_marshal_private_key, ec_key_parse_private_key, ec_point_oct_to_point,
+ ec_point_point_to_oct, ecdh_compute_key, generate_salt, hkdf_expand, hkdf_extract, ECKey, ZVec,
+ AES_256_KEY_LENGTH,
+};
+
+/// Private key for ECDH encryption.
+pub struct ECDHPrivateKey(ECKey);
+
+impl ECDHPrivateKey {
+ /// Randomly generate a fresh keypair.
+ pub fn generate() -> Result<ECDHPrivateKey> {
+ ec_key_generate_key()
+ .map(ECDHPrivateKey)
+ .context("In ECDHPrivateKey::generate: generation failed")
+ }
+
+ /// Deserialize bytes into an ECDH keypair
+ pub fn from_private_key(buf: &[u8]) -> Result<ECDHPrivateKey> {
+ ec_key_parse_private_key(buf)
+ .map(ECDHPrivateKey)
+ .context("In ECDHPrivateKey::from_private_key: parsing failed")
+ }
+
+ /// Serialize the ECDH key into bytes
+ pub fn private_key(&self) -> Result<ZVec> {
+ ec_key_marshal_private_key(&self.0)
+ .context("In ECDHPrivateKey::private_key: marshalling failed")
+ }
+
+ /// Generate the serialization of the corresponding public key
+ pub fn public_key(&self) -> Result<Vec<u8>> {
+ let point = ec_key_get0_public_key(&self.0);
+ ec_point_point_to_oct(point.get_point())
+ .context("In ECDHPrivateKey::public_key: marshalling failed")
+ }
+
+ /// Use ECDH to agree an AES key with another party whose public key we have.
+ /// Sender and recipient public keys are passed separately because they are
+ /// switched in encryption vs decryption.
+ fn agree_key(
+ &self,
+ salt: &[u8],
+ other_public_key: &[u8],
+ sender_public_key: &[u8],
+ recipient_public_key: &[u8],
+ ) -> Result<ZVec> {
+ let hkdf = hkdf_extract(sender_public_key, salt)
+ .context("In ECDHPrivateKey::agree_key: hkdf_extract on sender_public_key failed")?;
+ let hkdf = hkdf_extract(recipient_public_key, &hkdf)
+ .context("In ECDHPrivateKey::agree_key: hkdf_extract on recipient_public_key failed")?;
+ let other_public_key = ec_point_oct_to_point(other_public_key)
+ .context("In ECDHPrivateKey::agree_key: ec_point_oct_to_point failed")?;
+ let secret = ecdh_compute_key(other_public_key.get_point(), &self.0)
+ .context("In ECDHPrivateKey::agree_key: ecdh_compute_key failed")?;
+ let prk = hkdf_extract(&secret, &hkdf)
+ .context("In ECDHPrivateKey::agree_key: hkdf_extract on secret failed")?;
+
+ let aes_key = hkdf_expand(AES_256_KEY_LENGTH, &prk, b"AES-256-GCM key")
+ .context("In ECDHPrivateKey::agree_key: hkdf_expand failed")?;
+ Ok(aes_key)
+ }
+
+ /// Encrypt a message to the party with the given public key
+ pub fn encrypt_message(
+ recipient_public_key: &[u8],
+ message: &[u8],
+ ) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>)> {
+ let sender_key =
+ Self::generate().context("In ECDHPrivateKey::encrypt_message: generate failed")?;
+ let sender_public_key = sender_key
+ .public_key()
+ .context("In ECDHPrivateKey::encrypt_message: public_key failed")?;
+ let salt =
+ generate_salt().context("In ECDHPrivateKey::encrypt_message: generate_salt failed")?;
+ let aes_key = sender_key
+ .agree_key(&salt, recipient_public_key, &sender_public_key, recipient_public_key)
+ .context("In ECDHPrivateKey::encrypt_message: agree_key failed")?;
+ let (ciphertext, iv, tag) = aes_gcm_encrypt(message, &aes_key)
+ .context("In ECDHPrivateKey::encrypt_message: aes_gcm_encrypt failed")?;
+ Ok((sender_public_key, salt, iv, ciphertext, tag))
+ }
+
+ /// Decrypt a message sent to us
+ pub fn decrypt_message(
+ &self,
+ sender_public_key: &[u8],
+ salt: &[u8],
+ iv: &[u8],
+ ciphertext: &[u8],
+ tag: &[u8],
+ ) -> Result<ZVec> {
+ let recipient_public_key = self.public_key()?;
+ let aes_key = self
+ .agree_key(salt, sender_public_key, sender_public_key, &recipient_public_key)
+ .context("In ECDHPrivateKey::decrypt_message: agree_key failed")?;
+ aes_gcm_decrypt(ciphertext, iv, tag, &aes_key)
+ .context("In ECDHPrivateKey::decrypt_message: aes_gcm_decrypt failed")
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_crypto_roundtrip() -> Result<()> {
+ let message = b"Hello world";
+ let recipient = ECDHPrivateKey::generate()?;
+ let (sender_public_key, salt, iv, ciphertext, tag) =
+ ECDHPrivateKey::encrypt_message(&recipient.public_key()?, message)?;
+ let recipient = ECDHPrivateKey::from_private_key(&recipient.private_key()?)?;
+ let decrypted =
+ recipient.decrypt_message(&sender_public_key, &salt, &iv, &ciphertext, &tag)?;
+ let dc: &[u8] = &decrypted;
+ assert_eq!(message, dc);
+ Ok(())
+ }
+}
diff --git a/keystore2/src/enforcements.rs b/keystore2/src/enforcements.rs
index 3195ee0..3f003be 100644
--- a/keystore2/src/enforcements.rs
+++ b/keystore2/src/enforcements.rs
@@ -14,13 +14,11 @@
//! This is the Keystore 2.0 Enforcements module.
// TODO: more description to follow.
+use crate::database::{AuthTokenEntry, MonotonicRawTime};
use crate::error::{map_binder_status, Error, ErrorCode};
use crate::globals::{get_timestamp_service, ASYNC_TASK, DB, ENFORCEMENTS};
use crate::key_parameter::{KeyParameter, KeyParameterValue};
-use crate::{
- database::{AuthTokenEntry, MonotonicRawTime},
- gc::Gc,
-};
+use crate::{authorization::Error as AuthzError, super_key::SuperEncryptionType};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
Algorithm::Algorithm, ErrorCode::ErrorCode as Ec, HardwareAuthToken::HardwareAuthToken,
HardwareAuthenticatorType::HardwareAuthenticatorType,
@@ -29,14 +27,23 @@
use android_hardware_security_secureclock::aidl::android::hardware::security::secureclock::{
ISecureClock::ISecureClock, TimeStampToken::TimeStampToken,
};
-use android_system_keystore2::aidl::android::system::keystore2::OperationChallenge::OperationChallenge;
-use anyhow::{Context, Result};
-use std::collections::{HashMap, HashSet};
-use std::sync::{
- mpsc::{channel, Receiver, Sender},
- Arc, Mutex, Weak,
+use android_security_authorization::aidl::android::security::authorization::ResponseCode::ResponseCode as AuthzResponseCode;
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, IKeystoreSecurityLevel::KEY_FLAG_AUTH_BOUND_WITHOUT_CRYPTOGRAPHIC_LSKF_BINDING,
+ OperationChallenge::OperationChallenge,
};
-use std::time::SystemTime;
+use android_system_keystore2::binder::Strong;
+use anyhow::{Context, Result};
+use keystore2_system_property::PropertyWatcher;
+use std::{
+ collections::{HashMap, HashSet},
+ sync::{
+ atomic::{AtomicI32, Ordering},
+ mpsc::{channel, Receiver, Sender, TryRecvError},
+ Arc, Mutex, Weak,
+ },
+ time::SystemTime,
+};
#[derive(Debug)]
enum AuthRequestState {
@@ -133,6 +140,7 @@
state: DeferredAuthState,
/// An optional key id required to update the usage count if the key usage is limited.
key_usage_limited: Option<i64>,
+ confirmation_token_receiver: Option<Arc<Mutex<Option<Receiver<Vec<u8>>>>>>,
}
struct TokenReceiverMap {
@@ -198,7 +206,7 @@
}
fn get_timestamp_token(challenge: i64) -> Result<TimeStampToken, Error> {
- let dev: Box<dyn ISecureClock> = get_timestamp_service()
+ let dev: Strong<dyn ISecureClock> = get_timestamp_service()
.expect(concat!(
"Secure Clock service must be present ",
"if TimeStampTokens are required."
@@ -212,7 +220,7 @@
if let Err(e) = sender.send(get_timestamp_token(challenge)) {
log::info!(
concat!(
- "In timestamp_token_request: Operation hung up ",
+ "In timestamp_token_request: Receiver hung up ",
"before timestamp token could be delivered. {:?}"
),
e
@@ -240,7 +248,7 @@
let token_receiver = TokenReceiver(Arc::downgrade(&auth_request));
ENFORCEMENTS.register_op_auth_receiver(challenge, token_receiver);
- ASYNC_TASK.queue_hi(move || timestamp_token_request(challenge, sender));
+ ASYNC_TASK.queue_hi(move |_| timestamp_token_request(challenge, sender));
self.state = DeferredAuthState::Waiting(auth_request);
Some(OperationChallenge { challenge })
}
@@ -248,7 +256,7 @@
let hat = (*hat).clone();
let (sender, receiver) = channel::<Result<TimeStampToken, Error>>();
let auth_request = AuthRequest::timestamp(hat, receiver);
- ASYNC_TASK.queue_hi(move || timestamp_token_request(challenge, sender));
+ ASYNC_TASK.queue_hi(move |_| timestamp_token_request(challenge, sender));
self.state = DeferredAuthState::Waiting(auth_request);
None
}
@@ -264,8 +272,32 @@
/// This function is the authorization hook called before operation finish.
/// It returns the auth tokens required by the operation to commence finish.
- pub fn before_finish(&mut self) -> Result<(Option<HardwareAuthToken>, Option<TimeStampToken>)> {
- self.get_auth_tokens()
+ /// The third token is a confirmation token.
+ pub fn before_finish(
+ &mut self,
+ ) -> Result<(Option<HardwareAuthToken>, Option<TimeStampToken>, Option<Vec<u8>>)> {
+ let mut confirmation_token: Option<Vec<u8>> = None;
+ if let Some(ref confirmation_token_receiver) = self.confirmation_token_receiver {
+ let locked_receiver = confirmation_token_receiver.lock().unwrap();
+ if let Some(ref receiver) = *locked_receiver {
+ loop {
+ match receiver.try_recv() {
+ // As long as we get tokens we loop and discard all but the most
+ // recent one.
+ Ok(t) => confirmation_token = Some(t),
+ Err(TryRecvError::Empty) => break,
+ Err(TryRecvError::Disconnected) => {
+ log::error!(concat!(
+ "We got disconnected from the APC service, ",
+ "this should never happen."
+ ));
+ break;
+ }
+ }
+ }
+ }
+ }
+ self.get_auth_tokens().map(|(hat, tst)| (hat, tst, confirmation_token))
}
/// This function is the authorization hook called after finish succeeded.
@@ -276,16 +308,12 @@
if let Some(key_id) = self.key_usage_limited {
// On the last successful use, the key gets deleted. In this case we
// have to notify the garbage collector.
- let need_gc = DB
- .with(|db| {
- db.borrow_mut()
- .check_and_update_key_usage_count(key_id)
- .context("Trying to update key usage count.")
- })
- .context("In after_finish.")?;
- if need_gc {
- Gc::notify_gc();
- }
+ DB.with(|db| {
+ db.borrow_mut()
+ .check_and_update_key_usage_count(key_id)
+ .context("Trying to update key usage count.")
+ })
+ .context("In after_finish.")?;
}
Ok(())
}
@@ -327,6 +355,7 @@
}
/// Enforcements data structure
+#[derive(Default)]
pub struct Enforcements {
/// This hash set contains the user ids for whom the device is currently unlocked. If a user id
/// is not in the set, it implies that the device is locked for the user.
@@ -337,15 +366,22 @@
/// stale, because the operation gets dropped before an auth token is received, the map
/// is cleaned up in regular intervals.
op_auth_map: TokenReceiverMap,
+ /// The enforcement module will try to get a confirmation token from this channel whenever
+ /// an operation that requires confirmation finishes.
+ confirmation_token_receiver: Arc<Mutex<Option<Receiver<Vec<u8>>>>>,
+ /// Highest boot level seen in keystore.boot_level; used to enforce MAX_BOOT_LEVEL tag.
+ boot_level: AtomicI32,
}
impl Enforcements {
- /// Creates an enforcement object with the two data structures it holds and the sender as None.
- pub fn new() -> Self {
- Enforcements {
- device_unlocked_set: Mutex::new(HashSet::new()),
- op_auth_map: Default::default(),
- }
+ /// Install the confirmation token receiver. The enforcement module will try to get a
+ /// confirmation token from this channel whenever an operation that requires confirmation
+ /// finishes.
+ pub fn install_confirmation_token_receiver(
+ &self,
+ confirmation_token_receiver: Receiver<Vec<u8>>,
+ ) {
+ *self.confirmation_token_receiver.lock().unwrap() = Some(confirmation_token_receiver);
}
/// Checks if a create call is authorized, given key parameters and operation parameters.
@@ -371,7 +407,11 @@
None => {
return Ok((
None,
- AuthInfo { state: DeferredAuthState::NoAuthRequired, key_usage_limited: None },
+ AuthInfo {
+ state: DeferredAuthState::NoAuthRequired,
+ key_usage_limited: None,
+ confirmation_token_receiver: None,
+ },
))
}
};
@@ -431,6 +471,8 @@
let mut allow_while_on_body = false;
let mut unlocked_device_required = false;
let mut key_usage_limited: Option<i64> = None;
+ let mut confirmation_token_receiver: Option<Arc<Mutex<Option<Receiver<Vec<u8>>>>>> = None;
+ let mut max_boot_level: Option<i32> = None;
// iterate through key parameters, recording information we need for authorization
// enforcements later, or enforcing authorizations in place, where applicable
@@ -494,6 +536,12 @@
// in the database again and check and update the counter.
key_usage_limited = Some(key_id);
}
+ KeyParameterValue::TrustedConfirmationRequired => {
+ confirmation_token_receiver = Some(self.confirmation_token_receiver.clone());
+ }
+ KeyParameterValue::MaxBootLevel(level) => {
+ max_boot_level = Some(*level);
+ }
// NOTE: as per offline discussion, sanitizing key parameters and rejecting
// create operation if any non-allowed tags are present, is not done in
// authorize_create (unlike in legacy keystore where AuthorizeBegin is rejected if
@@ -547,10 +595,21 @@
}
}
+ if let Some(level) = max_boot_level {
+ if level < self.boot_level.load(Ordering::SeqCst) {
+ return Err(Error::Km(Ec::BOOT_LEVEL_EXCEEDED))
+ .context("In authorize_create: boot level is too late.");
+ }
+ }
+
if !unlocked_device_required && no_auth_required {
return Ok((
None,
- AuthInfo { state: DeferredAuthState::NoAuthRequired, key_usage_limited },
+ AuthInfo {
+ state: DeferredAuthState::NoAuthRequired,
+ key_usage_limited,
+ confirmation_token_receiver,
+ },
));
}
@@ -625,7 +684,9 @@
(None, _, true) => (None, DeferredAuthState::OpAuthRequired),
(None, _, false) => (None, DeferredAuthState::NoAuthRequired),
})
- .map(|(hat, state)| (hat, AuthInfo { state, key_usage_limited }))
+ .map(|(hat, state)| {
+ (hat, AuthInfo { state, key_usage_limited, confirmation_token_receiver })
+ })
}
fn find_auth_token<F>(p: F) -> Result<Option<(AuthTokenEntry, MonotonicRawTime)>>
@@ -694,11 +755,123 @@
fn register_op_auth_receiver(&self, challenge: i64, recv: TokenReceiver) {
self.op_auth_map.add_receiver(challenge, recv);
}
-}
-impl Default for Enforcements {
- fn default() -> Self {
- Self::new()
+ /// Given the set of key parameters and flags, check if super encryption is required.
+ pub fn super_encryption_required(
+ domain: &Domain,
+ key_parameters: &[KeyParameter],
+ flags: Option<i32>,
+ ) -> SuperEncryptionType {
+ if *domain != Domain::APP {
+ return SuperEncryptionType::None;
+ }
+ if let Some(flags) = flags {
+ if (flags & KEY_FLAG_AUTH_BOUND_WITHOUT_CRYPTOGRAPHIC_LSKF_BINDING) != 0 {
+ return SuperEncryptionType::None;
+ }
+ }
+ if key_parameters
+ .iter()
+ .any(|kp| matches!(kp.key_parameter_value(), KeyParameterValue::UnlockedDeviceRequired))
+ {
+ return SuperEncryptionType::ScreenLockBound;
+ }
+ if key_parameters
+ .iter()
+ .any(|kp| matches!(kp.key_parameter_value(), KeyParameterValue::UserSecureID(_)))
+ {
+ return SuperEncryptionType::LskfBound;
+ }
+ SuperEncryptionType::None
+ }
+
+ /// Finds a matching auth token along with a timestamp token.
+ /// This method looks through auth-tokens cached by keystore which satisfy the given
+ /// authentication information (i.e. |secureUserId|).
+ /// The most recent matching auth token which has a |challenge| field which matches
+ /// the passed-in |challenge| parameter is returned.
+ /// In this case the |authTokenMaxAgeMillis| parameter is not used.
+ ///
+ /// Otherwise, the most recent matching auth token which is younger than |authTokenMaxAgeMillis|
+ /// is returned.
+ pub fn get_auth_tokens(
+ &self,
+ challenge: i64,
+ secure_user_id: i64,
+ auth_token_max_age_millis: i64,
+ ) -> Result<(HardwareAuthToken, TimeStampToken)> {
+ let auth_type = HardwareAuthenticatorType::ANY;
+ let sids: Vec<i64> = vec![secure_user_id];
+ // Filter the matching auth tokens by challenge
+ let result = Self::find_auth_token(|hat: &AuthTokenEntry| {
+ (challenge == hat.challenge()) && hat.satisfies(&sids, auth_type)
+ })
+ .context(
+ "In get_auth_tokens: Failed to get a matching auth token filtered by challenge.",
+ )?;
+
+ let auth_token = if let Some((auth_token_entry, _)) = result {
+ auth_token_entry.take_auth_token()
+ } else {
+ // Filter the matching auth tokens by age.
+ if auth_token_max_age_millis != 0 {
+ let now_in_millis = MonotonicRawTime::now().milli_seconds();
+ let result = Self::find_auth_token(|auth_token_entry: &AuthTokenEntry| {
+ let token_valid = now_in_millis
+ .checked_sub(auth_token_entry.time_received().milli_seconds())
+ .map_or(false, |token_age_in_millis| {
+ auth_token_max_age_millis > token_age_in_millis
+ });
+ token_valid && auth_token_entry.satisfies(&sids, auth_type)
+ })
+ .context(
+ "In get_auth_tokens: Failed to get a matching auth token filtered by age.",
+ )?;
+
+ if let Some((auth_token_entry, _)) = result {
+ auth_token_entry.take_auth_token()
+ } else {
+ return Err(AuthzError::Rc(AuthzResponseCode::NO_AUTH_TOKEN_FOUND))
+ .context("In get_auth_tokens: No auth token found.");
+ }
+ } else {
+ return Err(AuthzError::Rc(AuthzResponseCode::NO_AUTH_TOKEN_FOUND))
+ .context("In get_auth_tokens: Passed-in auth token max age is zero.");
+ }
+ };
+ // Wait and obtain the timestamp token from secure clock service.
+ let tst = get_timestamp_token(challenge)
+ .context("In get_auth_tokens. Error in getting timestamp token.")?;
+ Ok((auth_token, tst))
+ }
+
+ /// Watch the `keystore.boot_level` system property, and keep self.boot_level up to date.
+ /// Blocks waiting for system property changes, so must be run in its own thread.
+ pub fn watch_boot_level(&self) -> Result<()> {
+ let mut w = PropertyWatcher::new("keystore.boot_level")?;
+ loop {
+ fn parse_value(_name: &str, value: &str) -> Result<Option<i32>> {
+ Ok(if value == "end" { None } else { Some(value.parse::<i32>()?) })
+ }
+ match w.read(parse_value)? {
+ Some(level) => {
+ let old = self.boot_level.fetch_max(level, Ordering::SeqCst);
+ log::info!(
+ "Read keystore.boot_level: {}; boot level {} -> {}",
+ level,
+ old,
+ std::cmp::max(old, level)
+ );
+ }
+ None => {
+ log::info!("keystore.boot_level is `end`, finishing.");
+ self.boot_level.fetch_max(i32::MAX, Ordering::SeqCst);
+ break;
+ }
+ }
+ w.wait()?;
+ }
+ Ok(())
}
}
diff --git a/keystore2/src/entropy.rs b/keystore2/src/entropy.rs
new file mode 100644
index 0000000..de38187
--- /dev/null
+++ b/keystore2/src/entropy.rs
@@ -0,0 +1,98 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module holds functionality for retrieving and distributing entropy.
+
+use anyhow::{Context, Result};
+use log::error;
+use std::time::{Duration, Instant};
+
+static ENTROPY_SIZE: usize = 64;
+static MIN_FEED_INTERVAL_SECS: u64 = 30;
+
+#[derive(Default)]
+struct FeederInfo {
+ last_feed: Option<Instant>,
+}
+
+/// Register the entropy feeder as an idle callback.
+pub fn register_feeder() {
+ crate::globals::ASYNC_TASK.add_idle(|shelf| {
+ let mut info = shelf.get_mut::<FeederInfo>();
+ let now = Instant::now();
+ let feed_needed = match info.last_feed {
+ None => true,
+ Some(last) => now.duration_since(last) > Duration::from_secs(MIN_FEED_INTERVAL_SECS),
+ };
+ if feed_needed {
+ info.last_feed = Some(now);
+ feed_devices();
+ }
+ });
+}
+
+fn get_entropy(size: usize) -> Result<Vec<u8>> {
+ keystore2_crypto::generate_random_data(size).context("Retrieving entropy for KeyMint device")
+}
+
+/// Feed entropy to all known KeyMint devices.
+pub fn feed_devices() {
+ let km_devs = crate::globals::get_keymint_devices();
+ if km_devs.is_empty() {
+ return;
+ }
+ let data = match get_entropy(km_devs.len() * ENTROPY_SIZE) {
+ Ok(data) => data,
+ Err(e) => {
+ error!(
+ "Failed to retrieve {}*{} bytes of entropy: {:?}",
+ km_devs.len(),
+ ENTROPY_SIZE,
+ e
+ );
+ return;
+ }
+ };
+ for (i, km_dev) in km_devs.iter().enumerate() {
+ let offset = i * ENTROPY_SIZE;
+ let sub_data = &data[offset..(offset + ENTROPY_SIZE)];
+ if let Err(e) = km_dev.addRngEntropy(sub_data) {
+ error!("Failed to feed entropy to KeyMint device: {:?}", e);
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::collections::HashSet;
+
+ #[test]
+ fn test_entropy_size() {
+ for size in &[0, 1, 4, 8, 256, 4096] {
+ let data = get_entropy(*size).expect("failed to get entropy");
+ assert_eq!(data.len(), *size);
+ }
+ }
+ #[test]
+ fn test_entropy_uniqueness() {
+ let count = 10;
+ let mut seen = HashSet::new();
+ for _i in 0..count {
+ let data = get_entropy(16).expect("failed to get entropy");
+ seen.insert(data);
+ }
+ assert_eq!(seen.len(), count);
+ }
+}
diff --git a/keystore2/src/error.rs b/keystore2/src/error.rs
index 7227f62..465dcfa 100644
--- a/keystore2/src/error.rs
+++ b/keystore2/src/error.rs
@@ -57,6 +57,10 @@
/// Wraps a Binder status code.
#[error("Binder transaction error {0:?}")]
BinderTransaction(StatusCode),
+ /// Wraps a Remote Provisioning ErrorCode as defined by the IRemotelyProvisionedComponent
+ /// AIDL interface spec.
+ #[error("Error::Rp({0:?})")]
+ Rp(ErrorCode),
}
impl Error {
@@ -101,6 +105,16 @@
})
}
+/// Helper function to map the binder status we get from calls into a RemotelyProvisionedComponent
+/// to a Keystore Error. We don't create an anyhow error here to make
+/// it easier to evaluate service specific errors.
+pub fn map_rem_prov_error<T>(r: BinderResult<T>) -> Result<T, Error> {
+ r.map_err(|s| match s.exception_code() {
+ ExceptionCode::SERVICE_SPECIFIC => Error::Rp(ErrorCode(s.service_specific_error())),
+ e_code => Error::Binder(e_code, 0),
+ })
+}
+
/// This function is similar to map_km_error only that we don't expect
/// any KeyMint error codes, we simply preserve the exception code and optional
/// service specific exception.
@@ -157,30 +171,58 @@
where
F: FnOnce(U) -> BinderResult<T>,
{
- result.map_or_else(
+ map_err_with(
+ result,
|e| {
log::error!("{:?}", e);
- let root_cause = e.root_cause();
- let rc = match root_cause.downcast_ref::<Error>() {
- Some(Error::Rc(rcode)) => rcode.0,
- Some(Error::Km(ec)) => ec.0,
- // If an Error::Binder reaches this stage we report a system error.
- // The exception code and possible service specific error will be
- // printed in the error log above.
- Some(Error::Binder(_, _)) | Some(Error::BinderTransaction(_)) => {
- ResponseCode::SYSTEM_ERROR.0
- }
- None => match root_cause.downcast_ref::<selinux::Error>() {
- Some(selinux::Error::PermissionDenied) => ResponseCode::PERMISSION_DENIED.0,
- _ => ResponseCode::SYSTEM_ERROR.0,
- },
- };
+ e
+ },
+ handle_ok,
+ )
+}
+
+/// This function behaves similar to map_or_log_error, but it does not log the errors, instead
+/// it calls map_err on the error before mapping it to a binder result allowing callers to
+/// log or transform the error before mapping it.
+pub fn map_err_with<T, U, F1, F2>(
+ result: anyhow::Result<U>,
+ map_err: F1,
+ handle_ok: F2,
+) -> BinderResult<T>
+where
+ F1: FnOnce(anyhow::Error) -> anyhow::Error,
+ F2: FnOnce(U) -> BinderResult<T>,
+{
+ result.map_or_else(
+ |e| {
+ let e = map_err(e);
+ let rc = get_error_code(&e);
Err(BinderStatus::new_service_specific_error(rc, None))
},
handle_ok,
)
}
+/// Returns the error code given a reference to the error
+pub fn get_error_code(e: &anyhow::Error) -> i32 {
+ let root_cause = e.root_cause();
+ match root_cause.downcast_ref::<Error>() {
+ Some(Error::Rc(rcode)) => rcode.0,
+ Some(Error::Km(ec)) => ec.0,
+ Some(Error::Rp(_)) => ResponseCode::SYSTEM_ERROR.0,
+ // If an Error::Binder reaches this stage we report a system error.
+ // The exception code and possible service specific error will be
+ // printed in the error log above.
+ Some(Error::Binder(_, _)) | Some(Error::BinderTransaction(_)) => {
+ ResponseCode::SYSTEM_ERROR.0
+ }
+ None => match root_cause.downcast_ref::<selinux::Error>() {
+ Some(selinux::Error::PermissionDenied) => ResponseCode::PERMISSION_DENIED.0,
+ _ => ResponseCode::SYSTEM_ERROR.0,
+ },
+ }
+}
+
#[cfg(test)]
pub mod tests {
diff --git a/keystore2/src/gc.rs b/keystore2/src/gc.rs
index b5bdd98..6cc0f27 100644
--- a/keystore2/src/gc.rs
+++ b/keystore2/src/gc.rs
@@ -18,77 +18,106 @@
//! optionally dispose of sensitive key material appropriately, and then delete
//! the key entry from the database.
-use crate::globals::{get_keymint_device, DB};
-use crate::{error::map_km_error, globals::ASYNC_TASK};
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::IKeyMintDevice::IKeyMintDevice;
-use anyhow::Result;
+use crate::{
+ async_task,
+ database::{KeystoreDB, Uuid},
+ super_key::SuperKeyManager,
+};
+use anyhow::{Context, Result};
+use async_task::AsyncTask;
+use std::sync::Arc;
-#[derive(Clone, Copy)]
pub struct Gc {
- remaining_tries: u32,
+ async_task: Arc<AsyncTask>,
}
impl Gc {
- const MAX_ERROR_RETRIES: u32 = 3u32;
+ /// Creates a garbage collector using the given async_task.
+ /// The garbage collector needs a function to invalidate key blobs and a database connection.
+ /// Both are obtained from the init function. The function is only called if this is first
+ /// time a garbage collector was initialized with the given AsyncTask instance.
+ pub fn new_init_with<F>(async_task: Arc<AsyncTask>, init: F) -> Self
+ where
+ F: FnOnce() -> (
+ Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
+ KeystoreDB,
+ Arc<SuperKeyManager>,
+ ) + Send
+ + 'static,
+ {
+ let weak_at = Arc::downgrade(&async_task);
+ // Initialize the task's shelf.
+ async_task.queue_hi(move |shelf| {
+ let (invalidate_key, db, super_key) = init();
+ shelf.get_or_put_with(|| GcInternal {
+ blob_id_to_delete: None,
+ invalidate_key,
+ db,
+ async_task: weak_at,
+ super_key,
+ });
+ });
+ Self { async_task }
+ }
- /// Attempts to process one unreferenced key from the database.
- /// Returns Ok(true) if a key was deleted and Ok(false) if there were no more keys to process.
+ /// Notifies the key garbage collector to iterate through orphaned and superseded blobs and
+ /// attempts their deletion. We only process one key at a time and then schedule another
+ /// attempt by queueing it in the async_task (low priority) queue.
+ pub fn notify_gc(&self) {
+ self.async_task.queue_lo(|shelf| shelf.get_downcast_mut::<GcInternal>().unwrap().step())
+ }
+}
+
+struct GcInternal {
+ blob_id_to_delete: Option<i64>,
+ invalidate_key: Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>,
+ db: KeystoreDB,
+ async_task: std::sync::Weak<AsyncTask>,
+ super_key: Arc<SuperKeyManager>,
+}
+
+impl GcInternal {
+ /// Attempts to process one blob from the database.
/// We process one key at a time, because deleting a key is a time consuming process which
/// may involve calling into the KeyMint backend and we don't want to hog neither the backend
/// nor the database for extended periods of time.
- fn process_one_key() -> Result<bool> {
- DB.with(|db| {
- let mut db = db.borrow_mut();
- if let Some((key_id, mut key_entry)) = db.get_unreferenced_key()? {
- if let Some(blob) = key_entry.take_km_blob() {
- let km_dev: Box<dyn IKeyMintDevice> =
- get_keymint_device(key_entry.sec_level())?.get_interface()?;
- if let Err(e) = map_km_error(km_dev.deleteKey(&blob)) {
- // Log but ignore error.
- log::error!("Error trying to delete key. {:?}", e);
- }
- }
- db.purge_key_entry(key_id)?;
- return Ok(true);
- }
- Ok(false)
- })
- }
+ fn process_one_key(&mut self) -> Result<()> {
+ if let Some((blob_id, blob, blob_metadata)) = self
+ .db
+ .handle_next_superseded_blob(self.blob_id_to_delete.take())
+ .context("In process_one_key: Trying to handle superseded blob.")?
+ {
+ // Set the blob_id as the next to be deleted blob. So it will be
+ // removed from the database regardless of whether the following
+ // succeeds or not.
+ self.blob_id_to_delete = Some(blob_id);
- /// Processes one key and then schedules another attempt until it runs out of tries or keys
- /// to delete.
- fn process_all(mut self) {
- match Self::process_one_key() {
- // We successfully removed a key.
- Ok(true) => self.remaining_tries = Self::MAX_ERROR_RETRIES,
- // There were no more keys to remove. We may exit.
- Ok(false) => self.remaining_tries = 0,
- // An error occurred. We retry in case the error was transient, but
- // we also count down the number of tries so that we don't spin
- // indefinitely.
- Err(e) => {
- self.remaining_tries -= 1;
- log::error!(
- concat!(
- "Failed to delete key. Retrying in case this error was transient. ",
- "(Tries remaining {}) {:?}"
- ),
- self.remaining_tries,
- e
- )
+ // If the key has a km_uuid we try to get the corresponding device
+ // and delete the key, unwrapping if necessary and possible.
+ // (At this time keys may get deleted without having the super encryption
+ // key in this case we can only delete the key from the database.)
+ if let Some(uuid) = blob_metadata.km_uuid() {
+ let blob = self
+ .super_key
+ .unwrap_key_if_required(&blob_metadata, &blob)
+ .context("In process_one_key: Trying to unwrap to-be-deleted blob.")?;
+ (self.invalidate_key)(&uuid, &*blob)
+ .context("In process_one_key: Trying to invalidate key.")?;
}
}
- if self.remaining_tries != 0 {
- ASYNC_TASK.queue_lo(move || {
- self.process_all();
- })
- }
+ Ok(())
}
- /// Notifies the key garbage collector to iterate through unreferenced keys and attempt
- /// their deletion. We only process one key at a time and then schedule another
- /// attempt by queueing it in the async_task (low priority) queue.
- pub fn notify_gc() {
- ASYNC_TASK.queue_lo(|| Self { remaining_tries: Self::MAX_ERROR_RETRIES }.process_all())
+ /// Processes one key and then schedules another attempt until it runs out of blobs to delete.
+ fn step(&mut self) {
+ if let Err(e) = self.process_one_key() {
+ log::error!("Error trying to delete blob entry. {:?}", e);
+ }
+ // Schedule the next step. This gives high priority requests a chance to interleave.
+ if self.blob_id_to_delete.is_some() {
+ if let Some(at) = self.async_task.upgrade() {
+ at.queue_lo(move |shelf| shelf.get_downcast_mut::<GcInternal>().unwrap().step());
+ }
+ }
}
}
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 50ec26c..54f7dc7 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -16,38 +16,60 @@
//! database connections and connections to services that Keystore needs
//! to talk to.
-use crate::enforcements::Enforcements;
use crate::gc::Gc;
use crate::legacy_blob::LegacyBlobLoader;
+use crate::legacy_migrator::LegacyMigrator;
use crate::super_key::SuperKeyManager;
use crate::utils::Asp;
use crate::{async_task::AsyncTask, database::MonotonicRawTime};
use crate::{
database::KeystoreDB,
+ database::Uuid,
error::{map_binder_status, map_binder_status_code, Error, ErrorCode},
};
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
-use android_hardware_security_keymint::binder::StatusCode;
+use crate::{enforcements::Enforcements, error::map_km_error};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ IKeyMintDevice::IKeyMintDevice, IRemotelyProvisionedComponent::IRemotelyProvisionedComponent,
+ KeyMintHardwareInfo::KeyMintHardwareInfo, SecurityLevel::SecurityLevel,
+};
+use android_hardware_security_keymint::binder::{StatusCode, Strong};
use android_security_compat::aidl::android::security::compat::IKeystoreCompatService::IKeystoreCompatService;
use anyhow::{Context, Result};
+use binder::FromIBinder;
+use keystore2_vintf::get_aidl_instances;
use lazy_static::lazy_static;
-use std::collections::HashMap;
-use std::sync::Mutex;
+use std::sync::{Arc, Mutex};
use std::{cell::RefCell, sync::Once};
+use std::{collections::HashMap, path::Path, path::PathBuf};
static DB_INIT: Once = Once::new();
/// Open a connection to the Keystore 2.0 database. This is called during the initialization of
/// the thread local DB field. It should never be called directly. The first time this is called
/// we also call KeystoreDB::cleanup_leftovers to restore the key lifecycle invariant. See the
-/// documentation of cleanup_leftovers for more details.
-fn create_thread_local_db() -> KeystoreDB {
- let mut db = KeystoreDB::new(
- // Keystore changes to the database directory on startup
- // (see keystore2_main.rs).
- &std::env::current_dir().expect("Could not get the current working directory."),
- )
- .expect("Failed to open database.");
+/// documentation of cleanup_leftovers for more details. The function also constructs a blob
+/// garbage collector. The initializing closure constructs another database connection without
+/// a gc. Although one GC is created for each thread local database connection, this closure
+/// is run only once, as long as the ASYNC_TASK instance is the same. So only one additional
+/// database connection is created for the garbage collector worker.
+pub fn create_thread_local_db() -> KeystoreDB {
+ let gc = Gc::new_init_with(ASYNC_TASK.clone(), || {
+ (
+ Box::new(|uuid, blob| {
+ let km_dev: Strong<dyn IKeyMintDevice> =
+ get_keymint_dev_by_uuid(uuid).map(|(dev, _)| dev)?.get_interface()?;
+ map_km_error(km_dev.deleteKey(&*blob))
+ .context("In invalidate key closure: Trying to invalidate key blob.")
+ }),
+ KeystoreDB::new(&DB_PATH.lock().expect("Could not get the database directory."), None)
+ .expect("Failed to open database."),
+ SUPER_KEY.clone(),
+ )
+ });
+
+ let mut db =
+ KeystoreDB::new(&DB_PATH.lock().expect("Could not get the database directory."), Some(gc))
+ .expect("Failed to open database.");
DB_INIT.call_once(|| {
log::info!("Touching Keystore 2.0 database for this first time since boot.");
db.insert_last_off_body(MonotonicRawTime::now())
@@ -63,7 +85,6 @@
n
);
}
- Gc::notify_gc();
});
db
}
@@ -77,22 +98,81 @@
RefCell::new(create_thread_local_db());
}
+#[derive(Default)]
+struct DevicesMap {
+ devices_by_uuid: HashMap<Uuid, (Asp, KeyMintHardwareInfo)>,
+ uuid_by_sec_level: HashMap<SecurityLevel, Uuid>,
+}
+
+impl DevicesMap {
+ fn dev_by_sec_level(
+ &self,
+ sec_level: &SecurityLevel,
+ ) -> Option<(Asp, KeyMintHardwareInfo, Uuid)> {
+ self.uuid_by_sec_level.get(sec_level).and_then(|uuid| self.dev_by_uuid(uuid))
+ }
+
+ fn dev_by_uuid(&self, uuid: &Uuid) -> Option<(Asp, KeyMintHardwareInfo, Uuid)> {
+ self.devices_by_uuid
+ .get(uuid)
+ .map(|(dev, hw_info)| ((*dev).clone(), (*hw_info).clone(), *uuid))
+ }
+
+ fn devices<T: FromIBinder + ?Sized>(&self) -> Vec<Strong<T>> {
+ self.devices_by_uuid.values().filter_map(|(asp, _)| asp.get_interface::<T>().ok()).collect()
+ }
+
+ /// The requested security level and the security level of the actual implementation may
+ /// differ. So we map the requested security level to the uuid of the implementation
+ /// so that there cannot be any confusion as to which KeyMint instance is requested.
+ fn insert(&mut self, sec_level: SecurityLevel, dev: Asp, hw_info: KeyMintHardwareInfo) {
+ // For now we use the reported security level of the KM instance as UUID.
+ // TODO update this section once UUID was added to the KM hardware info.
+ let uuid: Uuid = sec_level.into();
+ self.devices_by_uuid.insert(uuid, (dev, hw_info));
+ self.uuid_by_sec_level.insert(sec_level, uuid);
+ }
+}
+
+#[derive(Default)]
+struct RemotelyProvisionedDevicesMap {
+ devices_by_sec_level: HashMap<SecurityLevel, Asp>,
+}
+
+impl RemotelyProvisionedDevicesMap {
+ fn dev_by_sec_level(&self, sec_level: &SecurityLevel) -> Option<Asp> {
+ self.devices_by_sec_level.get(sec_level).map(|dev| (*dev).clone())
+ }
+
+ fn insert(&mut self, sec_level: SecurityLevel, dev: Asp) {
+ self.devices_by_sec_level.insert(sec_level, dev);
+ }
+}
+
lazy_static! {
+ /// The path where keystore stores all its keys.
+ pub static ref DB_PATH: Mutex<PathBuf> = Mutex::new(
+ Path::new("/data/misc/keystore").to_path_buf());
/// Runtime database of unwrapped super keys.
- pub static ref SUPER_KEY: SuperKeyManager = Default::default();
+ pub static ref SUPER_KEY: Arc<SuperKeyManager> = Default::default();
/// Map of KeyMint devices.
- static ref KEY_MINT_DEVICES: Mutex<HashMap<SecurityLevel, Asp>> = Default::default();
+ static ref KEY_MINT_DEVICES: Mutex<DevicesMap> = Default::default();
/// Timestamp service.
static ref TIME_STAMP_DEVICE: Mutex<Option<Asp>> = Default::default();
+ /// RemotelyProvisionedComponent HAL devices.
+ static ref REMOTELY_PROVISIONED_COMPONENT_DEVICES: Mutex<RemotelyProvisionedDevicesMap> = Default::default();
/// A single on-demand worker thread that handles deferred tasks with two different
/// priorities.
- pub static ref ASYNC_TASK: AsyncTask = Default::default();
+ pub static ref ASYNC_TASK: Arc<AsyncTask> = Default::default();
/// Singleton for enforcements.
- pub static ref ENFORCEMENTS: Enforcements = Enforcements::new();
+ pub static ref ENFORCEMENTS: Enforcements = Default::default();
/// LegacyBlobLoader is initialized and exists globally.
/// The same directory used by the database is used by the LegacyBlobLoader as well.
- pub static ref LEGACY_BLOB_LOADER: LegacyBlobLoader = LegacyBlobLoader::new(
- &std::env::current_dir().expect("Could not get the current working directory."));
+ pub static ref LEGACY_BLOB_LOADER: Arc<LegacyBlobLoader> = Arc::new(LegacyBlobLoader::new(
+ &DB_PATH.lock().expect("Could not get the database path for legacy blob loader.")));
+ /// Legacy migrator. Atomically migrates legacy blobs to the database.
+ pub static ref LEGACY_MIGRATOR: Arc<LegacyMigrator> =
+ Arc::new(LegacyMigrator::new(ASYNC_TASK.clone()));
}
static KEYMINT_SERVICE_NAME: &str = "android.hardware.security.keymint.IKeyMintDevice";
@@ -100,89 +180,125 @@
/// Make a new connection to a KeyMint device of the given security level.
/// If no native KeyMint device can be found this function also brings
/// up the compatibility service and attempts to connect to the legacy wrapper.
-fn connect_keymint(security_level: SecurityLevel) -> Result<Asp> {
- let service_name = match security_level {
- SecurityLevel::TRUSTED_ENVIRONMENT => format!("{}/default", KEYMINT_SERVICE_NAME),
- SecurityLevel::STRONGBOX => format!("{}/strongbox", KEYMINT_SERVICE_NAME),
+fn connect_keymint(security_level: &SecurityLevel) -> Result<(Asp, KeyMintHardwareInfo)> {
+ let keymint_instances =
+ get_aidl_instances("android.hardware.security.keymint", 1, "IKeyMintDevice");
+
+ let service_name = match *security_level {
+ SecurityLevel::TRUSTED_ENVIRONMENT => {
+ if keymint_instances.as_vec()?.iter().any(|instance| *instance == "default") {
+ Some(format!("{}/default", KEYMINT_SERVICE_NAME))
+ } else {
+ None
+ }
+ }
+ SecurityLevel::STRONGBOX => {
+ if keymint_instances.as_vec()?.iter().any(|instance| *instance == "strongbox") {
+ Some(format!("{}/strongbox", KEYMINT_SERVICE_NAME))
+ } else {
+ None
+ }
+ }
_ => {
return Err(Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE))
.context("In connect_keymint.")
}
};
- let keymint = map_binder_status_code(binder::get_interface(&service_name))
- .context("In connect_keymint: Trying to connect to genuine KeyMint service.")
- .or_else(|e| {
- match e.root_cause().downcast_ref::<Error>() {
- Some(Error::BinderTransaction(StatusCode::NAME_NOT_FOUND)) => {
- // This is a no-op if it was called before.
- keystore2_km_compat::add_keymint_device_service();
+ let keymint = if let Some(service_name) = service_name {
+ map_binder_status_code(binder::get_interface(&service_name))
+ .context("In connect_keymint: Trying to connect to genuine KeyMint service.")
+ } else {
+ // This is a no-op if it was called before.
+ keystore2_km_compat::add_keymint_device_service();
- let keystore_compat_service: Box<dyn IKeystoreCompatService> =
- map_binder_status_code(binder::get_interface("android.security.compat"))
- .context("In connect_keymint: Trying to connect to compat service.")?;
- map_binder_status(keystore_compat_service.getKeyMintDevice(security_level))
- .map_err(|e| match e {
- Error::BinderTransaction(StatusCode::NAME_NOT_FOUND) => {
- Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)
- }
- e => e,
- })
- .context("In connext_keymint: Trying to get Legacy wrapper.")
+ let keystore_compat_service: Strong<dyn IKeystoreCompatService> =
+ map_binder_status_code(binder::get_interface("android.security.compat"))
+ .context("In connect_keymint: Trying to connect to compat service.")?;
+ map_binder_status(keystore_compat_service.getKeyMintDevice(*security_level))
+ .map_err(|e| match e {
+ Error::BinderTransaction(StatusCode::NAME_NOT_FOUND) => {
+ Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)
}
- _ => Err(e),
- }
- })?;
+ e => e,
+ })
+ .context("In connect_keymint: Trying to get Legacy wrapper.")
+ }?;
- Ok(Asp::new(keymint.as_binder()))
+ let hw_info = map_km_error(keymint.getHardwareInfo())
+ .context("In connect_keymint: Failed to get hardware info.")?;
+
+ Ok((Asp::new(keymint.as_binder()), hw_info))
}
/// Get a keymint device for the given security level either from our cache or
-/// by making a new connection.
-pub fn get_keymint_device(security_level: SecurityLevel) -> Result<Asp> {
+/// by making a new connection. Returns the device, the hardware info and the uuid.
+/// TODO the latter can be removed when the uuid is part of the hardware info.
+pub fn get_keymint_device(
+ security_level: &SecurityLevel,
+) -> Result<(Asp, KeyMintHardwareInfo, Uuid)> {
let mut devices_map = KEY_MINT_DEVICES.lock().unwrap();
- if let Some(dev) = devices_map.get(&security_level) {
- Ok(dev.clone())
+ if let Some((dev, hw_info, uuid)) = devices_map.dev_by_sec_level(&security_level) {
+ Ok((dev, hw_info, uuid))
} else {
- let dev = connect_keymint(security_level).context("In get_keymint_device.")?;
- devices_map.insert(security_level, dev.clone());
- Ok(dev)
+ let (dev, hw_info) = connect_keymint(security_level).context("In get_keymint_device.")?;
+ devices_map.insert(*security_level, dev, hw_info);
+ // Unwrap must succeed because we just inserted it.
+ Ok(devices_map.dev_by_sec_level(security_level).unwrap())
}
}
+/// Get a keymint device for the given uuid. This will only access the cache, but will not
+/// attempt to establish a new connection. It is assumed that the cache is already populated
+/// when this is called. This is a fair assumption, because service.rs iterates through all
+/// security levels when it gets instantiated.
+pub fn get_keymint_dev_by_uuid(uuid: &Uuid) -> Result<(Asp, KeyMintHardwareInfo)> {
+ let devices_map = KEY_MINT_DEVICES.lock().unwrap();
+ if let Some((dev, hw_info, _)) = devices_map.dev_by_uuid(uuid) {
+ Ok((dev, hw_info))
+ } else {
+ Err(Error::sys()).context("In get_keymint_dev_by_uuid: No KeyMint instance found.")
+ }
+}
+
+/// Return all known keymint devices.
+pub fn get_keymint_devices() -> Vec<Strong<dyn IKeyMintDevice>> {
+ KEY_MINT_DEVICES.lock().unwrap().devices()
+}
+
static TIME_STAMP_SERVICE_NAME: &str = "android.hardware.security.secureclock.ISecureClock";
/// Make a new connection to a secure clock service.
/// If no native SecureClock device can be found brings up the compatibility service and attempts
/// to connect to the legacy wrapper.
fn connect_secureclock() -> Result<Asp> {
- let secureclock = map_binder_status_code(binder::get_interface(TIME_STAMP_SERVICE_NAME))
- .context("In connect_secureclock: Trying to connect to genuine secure clock service.")
- .or_else(|e| {
- match e.root_cause().downcast_ref::<Error>() {
- Some(Error::BinderTransaction(StatusCode::NAME_NOT_FOUND)) => {
- // This is a no-op if it was called before.
- keystore2_km_compat::add_keymint_device_service();
+ let secureclock_instances =
+ get_aidl_instances("android.hardware.security.secureclock", 1, "ISecureClock");
- let keystore_compat_service: Box<dyn IKeystoreCompatService> =
- map_binder_status_code(binder::get_interface("android.security.compat"))
- .context(
- "In connect_secureclock: Trying to connect to compat service.",
- )?;
+ let secure_clock_available =
+ secureclock_instances.as_vec()?.iter().any(|instance| *instance == "default");
- // Legacy secure clock services were only implemented by TEE.
- map_binder_status(keystore_compat_service.getSecureClock())
- .map_err(|e| match e {
- Error::BinderTransaction(StatusCode::NAME_NOT_FOUND) => {
- Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)
- }
- e => e,
- })
- .context("In connect_secureclock: Trying to get Legacy wrapper.")
+ let secureclock = if secure_clock_available {
+ map_binder_status_code(binder::get_interface(TIME_STAMP_SERVICE_NAME))
+ .context("In connect_secureclock: Trying to connect to genuine secure clock service.")
+ } else {
+ // This is a no-op if it was called before.
+ keystore2_km_compat::add_keymint_device_service();
+
+ let keystore_compat_service: Strong<dyn IKeystoreCompatService> =
+ map_binder_status_code(binder::get_interface("android.security.compat"))
+ .context("In connect_secureclock: Trying to connect to compat service.")?;
+
+ // Legacy secure clock services were only implemented by TEE.
+ map_binder_status(keystore_compat_service.getSecureClock())
+ .map_err(|e| match e {
+ Error::BinderTransaction(StatusCode::NAME_NOT_FOUND) => {
+ Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)
}
- _ => Err(e),
- }
- })?;
+ e => e,
+ })
+ .context("In connect_secureclock: Trying to get Legacy wrapper.")
+ }?;
Ok(Asp::new(secureclock.as_binder()))
}
@@ -199,3 +315,55 @@
Ok(dev)
}
}
+
+static REMOTE_PROVISIONING_HAL_SERVICE_NAME: &str =
+ "android.hardware.security.keymint.IRemotelyProvisionedComponent";
+
+fn connect_remotely_provisioned_component(security_level: &SecurityLevel) -> Result<Asp> {
+ let remotely_prov_instances =
+ get_aidl_instances("android.hardware.security.keymint", 1, "IRemotelyProvisionedComponent");
+
+ let service_name = match *security_level {
+ SecurityLevel::TRUSTED_ENVIRONMENT => {
+ if remotely_prov_instances.as_vec()?.iter().any(|instance| *instance == "default") {
+ Some(format!("{}/default", REMOTE_PROVISIONING_HAL_SERVICE_NAME))
+ } else {
+ None
+ }
+ }
+ SecurityLevel::STRONGBOX => {
+ if remotely_prov_instances.as_vec()?.iter().any(|instance| *instance == "strongbox") {
+ Some(format!("{}/strongbox", REMOTE_PROVISIONING_HAL_SERVICE_NAME))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ }
+ .ok_or(Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE))
+ .context("In connect_remotely_provisioned_component.")?;
+
+ let rem_prov_hal: Strong<dyn IRemotelyProvisionedComponent> =
+ map_binder_status_code(binder::get_interface(&service_name))
+ .context(concat!(
+ "In connect_remotely_provisioned_component: Trying to connect to",
+ " RemotelyProvisionedComponent service."
+ ))
+ .map_err(|e| e)?;
+ Ok(Asp::new(rem_prov_hal.as_binder()))
+}
+
+/// Get a remote provisiong component device for the given security level either from the cache or
+/// by making a new connection. Returns the device.
+pub fn get_remotely_provisioned_component(security_level: &SecurityLevel) -> Result<Asp> {
+ let mut devices_map = REMOTELY_PROVISIONED_COMPONENT_DEVICES.lock().unwrap();
+ if let Some(dev) = devices_map.dev_by_sec_level(&security_level) {
+ Ok(dev)
+ } else {
+ let dev = connect_remotely_provisioned_component(security_level)
+ .context("In get_remotely_provisioned_component.")?;
+ devices_map.insert(*security_level, dev);
+ // Unwrap must succeed because we just inserted it.
+ Ok(devices_map.dev_by_sec_level(security_level).unwrap())
+ }
+}
diff --git a/keystore2/src/key_parameter.rs b/keystore2/src/key_parameter.rs
index 93de6f2..c10da95 100644
--- a/keystore2/src/key_parameter.rs
+++ b/keystore2/src/key_parameter.rs
@@ -948,9 +948,26 @@
#[key_param(tag = RESET_SINCE_ID_ROTATION, field = BoolValue)]
ResetSinceIdRotation,
/// Used to deliver a cryptographic token proving that the user
- /// confirmed a signing request
+ /// confirmed a signing request
#[key_param(tag = CONFIRMATION_TOKEN, field = Blob)]
ConfirmationToken(Vec<u8>),
+ /// Used to deliver the certificate serial number to the KeyMint instance
+ /// certificate generation.
+ #[key_param(tag = CERTIFICATE_SERIAL, field = Blob)]
+ CertificateSerial(Vec<u8>),
+ /// Used to deliver the certificate subject to the KeyMint instance
+ /// certificate generation. This must be DER encoded X509 name.
+ #[key_param(tag = CERTIFICATE_SUBJECT, field = Blob)]
+ CertificateSubject(Vec<u8>),
+ /// Used to deliver the not before date in milliseconds to KeyMint during key generation/import.
+ #[key_param(tag = CERTIFICATE_NOT_BEFORE, field = DateTime)]
+ CertificateNotBefore(i64),
+ /// Used to deliver the not after date in milliseconds to KeyMint during key generation/import.
+ #[key_param(tag = CERTIFICATE_NOT_AFTER, field = DateTime)]
+ CertificateNotAfter(i64),
+ /// Specifies a maximum boot level at which a key should function
+ #[key_param(tag = MAX_BOOT_LEVEL, field = Integer)]
+ MaxBootLevel(i32),
}
}
diff --git a/keystore2/src/keystore2_main.rs b/keystore2/src/keystore2_main.rs
index 2ea41aa..e745697 100644
--- a/keystore2/src/keystore2_main.rs
+++ b/keystore2/src/keystore2_main.rs
@@ -14,16 +14,23 @@
//! This crate implements the Keystore 2.0 service entry point.
-use binder::Interface;
-use keystore2::apc::ApcManager;
use keystore2::authorization::AuthorizationManager;
+use keystore2::entropy;
+use keystore2::globals::ENFORCEMENTS;
+use keystore2::maintenance::Maintenance;
+use keystore2::remote_provisioning::RemoteProvisioningService;
use keystore2::service::KeystoreService;
+use keystore2::{apc::ApcManager, shared_secret_negotiation};
use log::{error, info};
-use std::panic;
+use std::{panic, path::Path, sync::mpsc::channel};
+use vpnprofilestore::VpnProfileStore;
-static KS2_SERVICE_NAME: &str = "android.system.keystore2";
+static KS2_SERVICE_NAME: &str = "android.system.keystore2.IKeystoreService/default";
static APC_SERVICE_NAME: &str = "android.security.apc";
static AUTHORIZATION_SERVICE_NAME: &str = "android.security.authorization";
+static REMOTE_PROVISIONING_SERVICE_NAME: &str = "android.security.remoteprovisioning";
+static USER_MANAGER_SERVICE_NAME: &str = "android.security.maintenance";
+static VPNPROFILESTORE_SERVICE_NAME: &str = "android.security.vpnprofilestore";
/// Keystore 2.0 takes one argument which is a path indicating its designated working directory.
fn main() {
@@ -39,19 +46,38 @@
// Saying hi.
info!("Keystore2 is starting.");
+ // Initialize the per boot database.
+ let _keep_me_alive = keystore2::database::KeystoreDB::keep_perboot_db_alive()
+ .expect("Failed to initialize the perboot database.");
+
let mut args = std::env::args();
args.next().expect("That's odd. How is there not even a first argument?");
- // Keystore changes to the database directory on startup (typically /data/misc/keystore).
+ // Keystore 2.0 cannot change to the database directory (typically /data/misc/keystore) on
+ // startup as Keystore 1.0 did because Keystore 2.0 is intended to run much earlier than
+ // Keystore 1.0. Instead we set a global variable to the database path.
// For the ground truth check the service startup rule for init (typically in keystore2.rc).
if let Some(dir) = args.next() {
- if std::env::set_current_dir(dir.clone()).is_err() {
- panic!("Failed to set working directory {}.", dir)
- }
+ *keystore2::globals::DB_PATH.lock().expect("Could not lock DB_PATH.") =
+ Path::new(&dir).to_path_buf();
} else {
panic!("Must specify a working directory.");
}
+ let (confirmation_token_sender, confirmation_token_receiver) = channel();
+
+ ENFORCEMENTS.install_confirmation_token_receiver(confirmation_token_receiver);
+
+ info!("Starting boot level watcher.");
+ std::thread::spawn(|| {
+ keystore2::globals::ENFORCEMENTS
+ .watch_boot_level()
+ .unwrap_or_else(|e| error!("watch_boot_level failed: {}", e));
+ });
+
+ entropy::register_feeder();
+ shared_secret_negotiation::perform_shared_secret_negotiation();
+
info!("Starting thread pool now.");
binder::ProcessState::start_thread_pool();
@@ -62,9 +88,10 @@
panic!("Failed to register service {} because of {:?}.", KS2_SERVICE_NAME, e);
});
- let apc_service = ApcManager::new_native_binder().unwrap_or_else(|e| {
- panic!("Failed to create service {} because of {:?}.", APC_SERVICE_NAME, e);
- });
+ let apc_service =
+ ApcManager::new_native_binder(confirmation_token_sender).unwrap_or_else(|e| {
+ panic!("Failed to create service {} because of {:?}.", APC_SERVICE_NAME, e);
+ });
binder::add_service(APC_SERVICE_NAME, apc_service.as_binder()).unwrap_or_else(|e| {
panic!("Failed to register service {} because of {:?}.", APC_SERVICE_NAME, e);
});
@@ -77,6 +104,42 @@
panic!("Failed to register service {} because of {:?}.", AUTHORIZATION_SERVICE_NAME, e);
});
+ let maintenance_service = Maintenance::new_native_binder().unwrap_or_else(|e| {
+ panic!("Failed to create service {} because of {:?}.", USER_MANAGER_SERVICE_NAME, e);
+ });
+ binder::add_service(USER_MANAGER_SERVICE_NAME, maintenance_service.as_binder()).unwrap_or_else(
+ |e| {
+ panic!("Failed to register service {} because of {:?}.", USER_MANAGER_SERVICE_NAME, e);
+ },
+ );
+
+ // Devices with KS2 and KM 1.0 may not have any IRemotelyProvisionedComponent HALs at all. Do
+ // not panic if new_native_binder returns failure because it could not find the TEE HAL.
+ if let Ok(remote_provisioning_service) = RemoteProvisioningService::new_native_binder() {
+ binder::add_service(
+ REMOTE_PROVISIONING_SERVICE_NAME,
+ remote_provisioning_service.as_binder(),
+ )
+ .unwrap_or_else(|e| {
+ panic!(
+ "Failed to register service {} because of {:?}.",
+ REMOTE_PROVISIONING_SERVICE_NAME, e
+ );
+ });
+ }
+
+ let vpnprofilestore = VpnProfileStore::new_native_binder(
+ &keystore2::globals::DB_PATH.lock().expect("Could not get DB_PATH."),
+ );
+ binder::add_service(VPNPROFILESTORE_SERVICE_NAME, vpnprofilestore.as_binder()).unwrap_or_else(
+ |e| {
+ panic!(
+ "Failed to register service {} because of {:?}.",
+ VPNPROFILESTORE_SERVICE_NAME, e
+ );
+ },
+ );
+
info!("Successfully registered Keystore 2.0 service.");
info!("Joining thread pool now.");
diff --git a/keystore2/src/km_compat/Android.bp b/keystore2/src/km_compat/Android.bp
index 126aeff..541788e 100644
--- a/keystore2/src/km_compat/Android.bp
+++ b/keystore2/src/km_compat/Android.bp
@@ -12,6 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
rust_library {
name: "libkeystore2_km_compat",
crate_name: "keystore2_km_compat",
@@ -48,16 +57,17 @@
"android.hardware.keymaster@3.0",
"android.hardware.keymaster@4.0",
"android.hardware.keymaster@4.1",
- "android.hardware.security.keymint-unstable-ndk_platform",
- "android.hardware.security.secureclock-unstable-ndk_platform",
- "android.hardware.security.sharedsecret-unstable-ndk_platform",
+ "android.hardware.security.keymint-V1-ndk_platform",
+ "android.hardware.security.secureclock-V1-ndk_platform",
+ "android.hardware.security.sharedsecret-V1-ndk_platform",
"android.security.compat-ndk_platform",
- "android.system.keystore2-ndk_platform",
+ "android.system.keystore2-V1-ndk_platform",
"libbase",
"libbinder_ndk",
"libcrypto",
"libhidlbase",
"libkeymaster4_1support",
+ "libkeymint",
"libkeymint_support",
"libkeystore2_crypto",
"libutils",
@@ -68,9 +78,9 @@
name: "libkm_compat_service",
srcs: ["km_compat_service.cpp"],
shared_libs: [
- "android.hardware.security.keymint-unstable-ndk_platform",
- "android.hardware.security.secureclock-unstable-ndk_platform",
- "android.hardware.security.sharedsecret-unstable-ndk_platform",
+ "android.hardware.security.keymint-V1-ndk_platform",
+ "android.hardware.security.secureclock-V1-ndk_platform",
+ "android.hardware.security.sharedsecret-V1-ndk_platform",
"android.security.compat-ndk_platform",
"libbinder_ndk",
"libcrypto",
@@ -97,11 +107,11 @@
"android.hardware.keymaster@3.0",
"android.hardware.keymaster@4.0",
"android.hardware.keymaster@4.1",
- "android.hardware.security.keymint-unstable-ndk_platform",
- "android.hardware.security.secureclock-unstable-ndk_platform",
- "android.hardware.security.sharedsecret-unstable-ndk_platform",
+ "android.hardware.security.keymint-V1-ndk_platform",
+ "android.hardware.security.secureclock-V1-ndk_platform",
+ "android.hardware.security.sharedsecret-V1-ndk_platform",
"android.security.compat-ndk_platform",
- "android.system.keystore2-ndk_platform",
+ "android.system.keystore2-V1-ndk_platform",
"libbase",
"libbinder_ndk",
"libcrypto",
@@ -110,6 +120,7 @@
"libkeymint_support",
"libkeystore2_crypto",
"libkm_compat",
+ "libkm_compat_service",
"libutils",
],
}
diff --git a/keystore2/src/km_compat/certificate_test.cpp b/keystore2/src/km_compat/certificate_test.cpp
index d6bece7..06cb0cb 100644
--- a/keystore2/src/km_compat/certificate_test.cpp
+++ b/keystore2/src/km_compat/certificate_test.cpp
@@ -23,6 +23,7 @@
#include <aidl/android/hardware/security/keymint/BlockMode.h>
#include <aidl/android/hardware/security/keymint/Digest.h>
#include <aidl/android/hardware/security/keymint/PaddingMode.h>
+#include <android/binder_manager.h>
#include <openssl/evp.h>
#include <openssl/x509.h>
@@ -39,18 +40,31 @@
using ::aidl::android::hardware::security::keymint::PaddingMode;
using ::aidl::android::hardware::security::keymint::SecurityLevel;
using ::aidl::android::hardware::security::keymint::Tag;
+using ::aidl::android::security::compat::IKeystoreCompatService;
namespace KMV1 = ::aidl::android::hardware::security::keymint;
-static std::variant<std::vector<Certificate>, ScopedAStatus>
-getCertificate(const std::vector<KeyParameter>& keyParams) {
- static std::shared_ptr<KeyMintDevice> device =
- KeyMintDevice::createKeyMintDevice(SecurityLevel::TRUSTED_ENVIRONMENT);
+extern "C" int32_t addKeyMintDeviceService();
+
+static std::variant<std::shared_ptr<IKeyMintDevice>, ScopedAStatus> getDevice() {
+ addKeyMintDeviceService();
+ std::shared_ptr<IKeyMintDevice> device;
+ auto service = IKeystoreCompatService::fromBinder(
+ ndk::SpAIBinder(AServiceManager_getService("android.security.compat")));
+ if (!service) {
+ return ScopedAStatus::fromStatus(STATUS_NAME_NOT_FOUND);
+ }
+ service->getKeyMintDevice(SecurityLevel::TRUSTED_ENVIRONMENT, &device);
if (!device) {
return ScopedAStatus::fromStatus(STATUS_NAME_NOT_FOUND);
}
+ return device;
+}
+
+static std::variant<std::vector<Certificate>, ScopedAStatus>
+getCertificate(std::shared_ptr<IKeyMintDevice> device, const std::vector<KeyParameter>& keyParams) {
KeyCreationResult creationResult;
- auto status = device->generateKey(keyParams, &creationResult);
+ auto status = device->generateKey(keyParams, std::nullopt /* attest_key */, &creationResult);
if (!status.isOk()) {
return status;
}
@@ -76,6 +90,8 @@
KMV1::makeKeyParameter(KMV1::TAG_ALGORITHM, Algorithm::RSA),
KMV1::makeKeyParameter(KMV1::TAG_KEY_SIZE, 2048),
KMV1::makeKeyParameter(KMV1::TAG_RSA_PUBLIC_EXPONENT, 65537),
+ KMV1::makeKeyParameter(KMV1::TAG_CERTIFICATE_NOT_BEFORE, 0),
+ KMV1::makeKeyParameter(KMV1::TAG_CERTIFICATE_NOT_AFTER, 253402300799000),
});
keyParams.insert(keyParams.end(), extraParams.begin(), extraParams.end());
return keyParams;
@@ -87,10 +103,12 @@
KMV1::makeKeyParameter(KMV1::TAG_PADDING, PaddingMode::RSA_PSS),
KMV1::makeKeyParameter(KMV1::TAG_NO_AUTH_REQUIRED),
KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::SIGN),
- KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::ENCRYPT),
});
- auto result = getCertificate(keyParams);
- ensureCertChainSize(result, 1);
+ auto device = getDevice();
+ if (std::holds_alternative<std::shared_ptr<IKeyMintDevice>>(device)) {
+ auto result = getCertificate(std::get<std::shared_ptr<IKeyMintDevice>>(device), keyParams);
+ ensureCertChainSize(result, 1);
+ }
}
TEST(CertificateTest, TestAES) {
@@ -101,8 +119,11 @@
KMV1::makeKeyParameter(KMV1::TAG_PADDING, PaddingMode::NONE),
KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::ENCRYPT),
};
- auto result = getCertificate(keyParams);
- ensureCertChainSize(result, 0);
+ auto device = getDevice();
+ if (std::holds_alternative<std::shared_ptr<IKeyMintDevice>>(device)) {
+ auto result = getCertificate(std::get<std::shared_ptr<IKeyMintDevice>>(device), keyParams);
+ ensureCertChainSize(result, 0);
+ }
}
TEST(CertificateTest, TestAttestation) {
@@ -111,9 +132,12 @@
KMV1::makeKeyParameter(KMV1::TAG_ATTESTATION_CHALLENGE, 42),
KMV1::makeKeyParameter(KMV1::TAG_ATTESTATION_APPLICATION_ID, 42),
});
- auto result = getCertificate(keyParams);
- ensureCertChainSize(result, 3);
- verify(std::get<std::vector<Certificate>>(result).back());
+ auto device = getDevice();
+ if (std::holds_alternative<std::shared_ptr<IKeyMintDevice>>(device)) {
+ auto result = getCertificate(std::get<std::shared_ptr<IKeyMintDevice>>(device), keyParams);
+ ensureCertChainSize(result, 3);
+ verify(std::get<std::vector<Certificate>>(result).back());
+ }
}
TEST(CertificateTest, TestRSAKeygenNoEncryptNoAuthRequired) {
@@ -123,9 +147,12 @@
KMV1::makeKeyParameter(KMV1::TAG_NO_AUTH_REQUIRED, true),
KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::SIGN),
});
- auto result = getCertificate(keyParams);
- ensureCertChainSize(result, 1);
- verify(std::get<std::vector<Certificate>>(result)[0]);
+ auto device = getDevice();
+ if (std::holds_alternative<std::shared_ptr<IKeyMintDevice>>(device)) {
+ auto result = getCertificate(std::get<std::shared_ptr<IKeyMintDevice>>(device), keyParams);
+ ensureCertChainSize(result, 1);
+ verify(std::get<std::vector<Certificate>>(result)[0]);
+ }
}
TEST(CertificateTest, TestRSAKeygenNoEncryptAuthRequired) {
@@ -134,6 +161,9 @@
KMV1::makeKeyParameter(KMV1::TAG_PADDING, PaddingMode::RSA_PSS),
KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::SIGN),
});
- auto result = getCertificate(keyParams);
- ensureCertChainSize(result, 1);
+ auto device = getDevice();
+ if (std::holds_alternative<std::shared_ptr<IKeyMintDevice>>(device)) {
+ auto result = getCertificate(std::get<std::shared_ptr<IKeyMintDevice>>(device), keyParams);
+ ensureCertChainSize(result, 1);
+ }
}
diff --git a/keystore2/src/km_compat/km_compat.cpp b/keystore2/src/km_compat/km_compat.cpp
index a27bfd1..b824aa8 100644
--- a/keystore2/src/km_compat/km_compat.cpp
+++ b/keystore2/src/km_compat/km_compat.cpp
@@ -17,9 +17,11 @@
#include "km_compat.h"
#include "km_compat_type_conversion.h"
+#include <AndroidKeyMintDevice.h>
#include <aidl/android/hardware/security/keymint/Algorithm.h>
#include <aidl/android/hardware/security/keymint/Digest.h>
#include <aidl/android/hardware/security/keymint/ErrorCode.h>
+#include <aidl/android/hardware/security/keymint/KeyParameterValue.h>
#include <aidl/android/hardware/security/keymint/PaddingMode.h>
#include <aidl/android/system/keystore2/ResponseCode.h>
#include <android-base/logging.h>
@@ -30,10 +32,14 @@
#include <keymasterV4_1/Keymaster3.h>
#include <keymasterV4_1/Keymaster4.h>
+#include <chrono>
+
#include "certificate_utils.h"
using ::aidl::android::hardware::security::keymint::Algorithm;
+using ::aidl::android::hardware::security::keymint::CreateKeyMintDevice;
using ::aidl::android::hardware::security::keymint::Digest;
+using ::aidl::android::hardware::security::keymint::KeyParameterValue;
using ::aidl::android::hardware::security::keymint::PaddingMode;
using ::aidl::android::hardware::security::keymint::Tag;
using ::aidl::android::system::keystore2::ResponseCode;
@@ -50,28 +56,227 @@
namespace V4_1 = ::android::hardware::keymaster::V4_1;
namespace KMV1 = ::aidl::android::hardware::security::keymint;
+using namespace std::chrono_literals;
+using std::chrono::duration_cast;
+
// Utility functions
-// Converts a V4 error code into a ScopedAStatus
-ScopedAStatus convertErrorCode(V4_0_ErrorCode result) {
- if (result == V4_0_ErrorCode::OK) {
+// Returns true if this parameter may be passed to attestKey.
+bool isAttestationParameter(const KMV1::KeyParameter& param) {
+ switch (param.tag) {
+ case Tag::APPLICATION_ID:
+ case Tag::APPLICATION_DATA:
+ case Tag::ATTESTATION_CHALLENGE:
+ case Tag::ATTESTATION_APPLICATION_ID:
+ case Tag::ATTESTATION_ID_BRAND:
+ case Tag::ATTESTATION_ID_DEVICE:
+ case Tag::ATTESTATION_ID_PRODUCT:
+ case Tag::ATTESTATION_ID_SERIAL:
+ case Tag::ATTESTATION_ID_IMEI:
+ case Tag::ATTESTATION_ID_MEID:
+ case Tag::ATTESTATION_ID_MANUFACTURER:
+ case Tag::ATTESTATION_ID_MODEL:
+ case Tag::CERTIFICATE_SERIAL:
+ case Tag::CERTIFICATE_SUBJECT:
+ case Tag::CERTIFICATE_NOT_BEFORE:
+ case Tag::CERTIFICATE_NOT_AFTER:
+ case Tag::INCLUDE_UNIQUE_ID:
+ case Tag::DEVICE_UNIQUE_ATTESTATION:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Returns true if this parameter may be passed to generate/importKey.
+bool isKeyCreationParameter(const KMV1::KeyParameter& param) {
+ switch (param.tag) {
+ case Tag::APPLICATION_ID:
+ case Tag::APPLICATION_DATA:
+ case Tag::CERTIFICATE_SERIAL:
+ case Tag::CERTIFICATE_SUBJECT:
+ case Tag::CERTIFICATE_NOT_BEFORE:
+ case Tag::CERTIFICATE_NOT_AFTER:
+ case Tag::PURPOSE:
+ case Tag::ALGORITHM:
+ case Tag::KEY_SIZE:
+ case Tag::BLOCK_MODE:
+ case Tag::DIGEST:
+ case Tag::PADDING:
+ case Tag::CALLER_NONCE:
+ case Tag::MIN_MAC_LENGTH:
+ case Tag::EC_CURVE:
+ case Tag::RSA_PUBLIC_EXPONENT:
+ case Tag::RSA_OAEP_MGF_DIGEST:
+ case Tag::BLOB_USAGE_REQUIREMENTS:
+ case Tag::BOOTLOADER_ONLY:
+ case Tag::ROLLBACK_RESISTANCE:
+ case Tag::EARLY_BOOT_ONLY:
+ case Tag::ACTIVE_DATETIME:
+ case Tag::ORIGINATION_EXPIRE_DATETIME:
+ case Tag::USAGE_EXPIRE_DATETIME:
+ case Tag::MIN_SECONDS_BETWEEN_OPS:
+ case Tag::MAX_USES_PER_BOOT:
+ case Tag::USAGE_COUNT_LIMIT:
+ case Tag::USER_ID:
+ case Tag::USER_SECURE_ID:
+ case Tag::NO_AUTH_REQUIRED:
+ case Tag::USER_AUTH_TYPE:
+ case Tag::AUTH_TIMEOUT:
+ case Tag::ALLOW_WHILE_ON_BODY:
+ case Tag::TRUSTED_USER_PRESENCE_REQUIRED:
+ case Tag::TRUSTED_CONFIRMATION_REQUIRED:
+ case Tag::UNLOCKED_DEVICE_REQUIRED:
+ case Tag::CREATION_DATETIME:
+ case Tag::UNIQUE_ID:
+ case Tag::IDENTITY_CREDENTIAL_KEY:
+ case Tag::STORAGE_KEY:
+ case Tag::MAC_LENGTH:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Size of prefix for blobs, see keyBlobPrefix().
+//
+const size_t kKeyBlobPrefixSize = 8;
+
+// Magic used in blob prefix, see keyBlobPrefix().
+//
+const uint8_t kKeyBlobMagic[7] = {'p', 'K', 'M', 'b', 'l', 'o', 'b'};
+
+// Prefixes a keyblob returned by e.g. generateKey() with information on whether it
+// originated from the real underlying KeyMaster HAL or from soft-KeyMint.
+//
+// When dealing with a keyblob, use prefixedKeyBlobRemovePrefix() to remove the
+// prefix and prefixedKeyBlobIsSoftKeyMint() to determine its origin.
+//
+// Note how the prefix itself has a magic marker ("pKMblob") which can be used
+// to identify if a blob has a prefix at all (it's assumed that any valid blob
+// from KeyMint or KeyMaster HALs never starts with the magic). This is needed
+// because blobs persisted to disk prior to using this code will not have the
+// prefix and in that case we want prefixedKeyBlobRemovePrefix() to still work.
+//
+std::vector<uint8_t> keyBlobPrefix(const std::vector<uint8_t>& blob, bool isSoftKeyMint) {
+ std::vector<uint8_t> result;
+ result.reserve(blob.size() + kKeyBlobPrefixSize);
+ result.insert(result.begin(), kKeyBlobMagic, kKeyBlobMagic + sizeof kKeyBlobMagic);
+ result.push_back(isSoftKeyMint ? 1 : 0);
+ std::copy(blob.begin(), blob.end(), std::back_inserter(result));
+ return result;
+}
+
+// Helper for prefixedKeyBlobRemovePrefix() and prefixedKeyBlobIsSoftKeyMint().
+//
+// First bool is whether there's a valid prefix. If there is, the second bool is
+// the |isSoftKeyMint| value of the prefix
+//
+std::pair<bool, bool> prefixedKeyBlobParsePrefix(const std::vector<uint8_t>& prefixedBlob) {
+ // Having a unprefixed blob is not that uncommon, for example all devices
+ // upgrading to keystore2 (so e.g. upgrading to Android 12) will have
+ // unprefixed blobs. So don't spew warnings/errors in this case...
+ if (prefixedBlob.size() < kKeyBlobPrefixSize) {
+ return std::make_pair(false, false);
+ }
+ if (std::memcmp(prefixedBlob.data(), kKeyBlobMagic, sizeof kKeyBlobMagic) != 0) {
+ return std::make_pair(false, false);
+ }
+ if (prefixedBlob[kKeyBlobPrefixSize - 1] != 0 && prefixedBlob[kKeyBlobPrefixSize - 1] != 1) {
+ return std::make_pair(false, false);
+ }
+ bool isSoftKeyMint = (prefixedBlob[kKeyBlobPrefixSize - 1] == 1);
+ return std::make_pair(true, isSoftKeyMint);
+}
+
+// Returns the prefix from a blob. If there's no prefix, returns the passed-in blob.
+//
+std::vector<uint8_t> prefixedKeyBlobRemovePrefix(const std::vector<uint8_t>& prefixedBlob) {
+ auto parsed = prefixedKeyBlobParsePrefix(prefixedBlob);
+ if (!parsed.first) {
+ // Not actually prefixed, blob was probably persisted to disk prior to the
+ // prefixing code being introduced.
+ return prefixedBlob;
+ }
+ return std::vector<uint8_t>(prefixedBlob.begin() + kKeyBlobPrefixSize, prefixedBlob.end());
+}
+
+// Returns true if the blob's origin is soft-KeyMint, false otherwise or if there
+// is no prefix on the passed-in blob.
+//
+bool prefixedKeyBlobIsSoftKeyMint(const std::vector<uint8_t>& prefixedBlob) {
+ auto parsed = prefixedKeyBlobParsePrefix(prefixedBlob);
+ return parsed.second;
+}
+
+/*
+ * Returns true if the parameter is not understood by KM 4.1 and older but can be enforced by
+ * Keystore. These parameters need to be included in the returned KeyCharacteristics, but will not
+ * be passed to the legacy backend.
+ */
+bool isNewAndKeystoreEnforceable(const KMV1::KeyParameter& param) {
+ switch (param.tag) {
+ case KMV1::Tag::MAX_BOOT_LEVEL:
+ return true;
+ case KMV1::Tag::USAGE_COUNT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+std::vector<KMV1::KeyParameter>
+extractGenerationParams(const std::vector<KMV1::KeyParameter>& params) {
+ std::vector<KMV1::KeyParameter> result;
+ std::copy_if(params.begin(), params.end(), std::back_inserter(result), isKeyCreationParameter);
+ return result;
+}
+
+std::vector<KMV1::KeyParameter>
+extractAttestationParams(const std::vector<KMV1::KeyParameter>& params) {
+ std::vector<KMV1::KeyParameter> result;
+ std::copy_if(params.begin(), params.end(), std::back_inserter(result), isAttestationParameter);
+ return result;
+}
+
+std::vector<KMV1::KeyParameter>
+extractNewAndKeystoreEnforceableParams(const std::vector<KMV1::KeyParameter>& params) {
+ std::vector<KMV1::KeyParameter> result;
+ std::copy_if(params.begin(), params.end(), std::back_inserter(result),
+ isNewAndKeystoreEnforceable);
+ return result;
+}
+
+ScopedAStatus convertErrorCode(KMV1::ErrorCode result) {
+ if (result == KMV1::ErrorCode::OK) {
return ScopedAStatus::ok();
}
return ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(result));
}
-static V4_0_ErrorCode toErrorCode(const ScopedAStatus& status) {
+// Converts a V4 error code into a ScopedAStatus
+ScopedAStatus convertErrorCode(V4_0_ErrorCode result) {
+ return convertErrorCode(convert(result));
+}
+
+static KMV1::ErrorCode toErrorCode(const ScopedAStatus& status) {
if (status.getExceptionCode() == EX_SERVICE_SPECIFIC) {
- return static_cast<V4_0_ErrorCode>(status.getServiceSpecificError());
+ return static_cast<KMV1::ErrorCode>(status.getServiceSpecificError());
} else {
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
}
static std::vector<V4_0::KeyParameter>
convertKeyParametersToLegacy(const std::vector<KeyParameter>& kps) {
- std::vector<V4_0::KeyParameter> legacyKps(kps.size());
- std::transform(kps.begin(), kps.end(), legacyKps.begin(), convertKeyParameterToLegacy);
+ std::vector<V4_0::KeyParameter> legacyKps;
+ legacyKps.reserve(kps.size());
+ for (const auto& kp : kps) {
+ auto p = convertKeyParameterToLegacy(kp);
+ if (p.tag != V4_0::Tag::INVALID) {
+ legacyKps.push_back(std::move(p));
+ }
+ }
return legacyKps;
}
@@ -83,38 +288,61 @@
}
static std::vector<KeyCharacteristics>
-convertKeyCharacteristicsFromLegacy(KeyMintSecurityLevel securityLevel,
- const V4_0_KeyCharacteristics& legacyKc) {
- KeyCharacteristics kc;
- kc.securityLevel = securityLevel;
- kc.authorizations = convertKeyParametersFromLegacy(legacyKc.hardwareEnforced);
- return {kc};
+processLegacyCharacteristics(KeyMintSecurityLevel securityLevel,
+ const std::vector<KeyParameter>& genParams,
+ const V4_0_KeyCharacteristics& legacyKc) {
+
+ KeyCharacteristics keystoreEnforced{KeyMintSecurityLevel::KEYSTORE,
+ convertKeyParametersFromLegacy(legacyKc.softwareEnforced)};
+
+ // Add all parameters that we know can be enforced by keystore but not by the legacy backend.
+ auto unsupported_requested = extractNewAndKeystoreEnforceableParams(genParams);
+ std::copy(unsupported_requested.begin(), unsupported_requested.end(),
+ std::back_insert_iterator(keystoreEnforced.authorizations));
+
+ if (securityLevel == KeyMintSecurityLevel::SOFTWARE) {
+ // If the security level of the backend is `software` we expect the hardware enforced list
+ // to be empty. Log a warning otherwise.
+ if (legacyKc.hardwareEnforced.size() != 0) {
+ LOG(WARNING) << "Unexpected hardware enforced parameters.";
+ }
+ return {keystoreEnforced};
+ }
+
+ KeyCharacteristics hwEnforced{securityLevel,
+ convertKeyParametersFromLegacy(legacyKc.hardwareEnforced)};
+ return {hwEnforced, keystoreEnforced};
}
static V4_0_KeyFormat convertKeyFormatToLegacy(const KeyFormat& kf) {
return static_cast<V4_0_KeyFormat>(kf);
}
-static V4_0_HardwareAuthToken convertAuthTokenToLegacy(const HardwareAuthToken& at) {
+static V4_0_HardwareAuthToken convertAuthTokenToLegacy(const std::optional<HardwareAuthToken>& at) {
+ if (!at) return {};
+
V4_0_HardwareAuthToken legacyAt;
- legacyAt.challenge = at.challenge;
- legacyAt.userId = at.userId;
- legacyAt.authenticatorId = at.authenticatorId;
+ legacyAt.challenge = at->challenge;
+ legacyAt.userId = at->userId;
+ legacyAt.authenticatorId = at->authenticatorId;
legacyAt.authenticatorType =
static_cast<::android::hardware::keymaster::V4_0::HardwareAuthenticatorType>(
- at.authenticatorType);
- legacyAt.timestamp = at.timestamp.milliSeconds;
- legacyAt.mac = at.mac;
+ at->authenticatorType);
+ legacyAt.timestamp = at->timestamp.milliSeconds;
+ legacyAt.mac = at->mac;
return legacyAt;
}
-static V4_0_VerificationToken convertTimestampTokenToLegacy(const TimeStampToken& tst) {
+static V4_0_VerificationToken
+convertTimestampTokenToLegacy(const std::optional<TimeStampToken>& tst) {
+ if (!tst) return {};
+
V4_0_VerificationToken legacyVt;
- legacyVt.challenge = tst.challenge;
- legacyVt.timestamp = tst.timestamp.milliSeconds;
+ legacyVt.challenge = tst->challenge;
+ legacyVt.timestamp = tst->timestamp.milliSeconds;
// Legacy verification tokens were always minted by TEE.
legacyVt.securityLevel = V4_0::SecurityLevel::TRUSTED_ENVIRONMENT;
- legacyVt.mac = tst.mac;
+ legacyVt.mac = tst->mac;
return legacyVt;
}
@@ -163,50 +391,71 @@
// KeyMintDevice implementation
ScopedAStatus KeyMintDevice::getHardwareInfo(KeyMintHardwareInfo* _aidl_return) {
- // TODO: What do I do about the version number? Is it the version of the device I get?
- auto result = mDevice->getHardwareInfo([&](auto securityLevel, const auto& keymasterName,
- const auto& keymasterAuthorName) {
- securityLevel_ =
- static_cast<::aidl::android::hardware::security::keymint::SecurityLevel>(securityLevel);
-
- _aidl_return->securityLevel = securityLevel_;
- _aidl_return->keyMintName = keymasterName;
- _aidl_return->keyMintAuthorName = keymasterAuthorName;
- });
- if (!result.isOk()) {
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
- }
+ auto result = mDevice->halVersion();
+ _aidl_return->versionNumber = result.majorVersion * 10 + result.minorVersion;
+ securityLevel_ = convert(result.securityLevel);
+ _aidl_return->securityLevel = securityLevel_;
+ _aidl_return->keyMintName = result.keymasterName;
+ _aidl_return->keyMintAuthorName = result.authorName;
+ _aidl_return->timestampTokenRequired = securityLevel_ == KMV1::SecurityLevel::STRONGBOX;
return ScopedAStatus::ok();
}
ScopedAStatus KeyMintDevice::addRngEntropy(const std::vector<uint8_t>& in_data) {
- V4_0_ErrorCode errorCode = mDevice->addRngEntropy(in_data);
- return convertErrorCode(errorCode);
+ auto result = mDevice->addRngEntropy(in_data);
+ if (!result.isOk()) {
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
+ }
+ return convertErrorCode(result);
}
-ScopedAStatus KeyMintDevice::generateKey(const std::vector<KeyParameter>& in_keyParams,
+ScopedAStatus KeyMintDevice::generateKey(const std::vector<KeyParameter>& inKeyParams,
+ const std::optional<AttestationKey>& in_attestationKey,
KeyCreationResult* out_creationResult) {
- auto legacyKeyParams = convertKeyParametersToLegacy(in_keyParams);
- V4_0_ErrorCode errorCode;
+
+ // Since KeyMaster doesn't support ECDH, route all key creation requests to
+ // soft-KeyMint if and only an ECDH key is requested.
+ //
+ // For this to work we'll need to also route begin() and deleteKey() calls to
+ // soft-KM. In order to do that, we'll prefix all keyblobs with whether it was
+ // created by the real underlying KeyMaster HAL or whether it was created by
+ // soft-KeyMint.
+ //
+ // See keyBlobPrefix() for more discussion.
+ //
+ for (const auto& keyParam : inKeyParams) {
+ if (keyParam.tag == Tag::PURPOSE &&
+ keyParam.value.get<KeyParameterValue::Tag::keyPurpose>() == KeyPurpose::AGREE_KEY) {
+ auto ret =
+ softKeyMintDevice_->generateKey(inKeyParams, in_attestationKey, out_creationResult);
+ if (ret.isOk()) {
+ out_creationResult->keyBlob = keyBlobPrefix(out_creationResult->keyBlob, true);
+ }
+ return ret;
+ }
+ }
+
+ auto legacyKeyGenParams = convertKeyParametersToLegacy(extractGenerationParams(inKeyParams));
+ KMV1::ErrorCode errorCode;
auto result = mDevice->generateKey(
- legacyKeyParams, [&](V4_0_ErrorCode error, const hidl_vec<uint8_t>& keyBlob,
- const V4_0_KeyCharacteristics& keyCharacteristics) {
- errorCode = error;
- out_creationResult->keyBlob = keyBlob;
+ legacyKeyGenParams, [&](V4_0_ErrorCode error, const hidl_vec<uint8_t>& keyBlob,
+ const V4_0_KeyCharacteristics& keyCharacteristics) {
+ errorCode = convert(error);
+ out_creationResult->keyBlob = keyBlobPrefix(keyBlob, false);
out_creationResult->keyCharacteristics =
- convertKeyCharacteristicsFromLegacy(securityLevel_, keyCharacteristics);
+ processLegacyCharacteristics(securityLevel_, inKeyParams, keyCharacteristics);
});
if (!result.isOk()) {
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
}
- if (errorCode == V4_0_ErrorCode::OK) {
- auto cert = getCertificate(in_keyParams, out_creationResult->keyBlob);
- if (std::holds_alternative<V4_0_ErrorCode>(cert)) {
- auto code = std::get<V4_0_ErrorCode>(cert);
+ if (errorCode == KMV1::ErrorCode::OK) {
+ auto cert = getCertificate(inKeyParams, out_creationResult->keyBlob);
+ if (std::holds_alternative<KMV1::ErrorCode>(cert)) {
+ auto code = std::get<KMV1::ErrorCode>(cert);
// We return OK in successful cases that do not generate a certificate.
- if (code != V4_0_ErrorCode::OK) {
+ if (code != KMV1::ErrorCode::OK) {
errorCode = code;
deleteKey(out_creationResult->keyBlob);
}
@@ -217,32 +466,34 @@
return convertErrorCode(errorCode);
}
-ScopedAStatus KeyMintDevice::importKey(const std::vector<KeyParameter>& in_inKeyParams,
+ScopedAStatus KeyMintDevice::importKey(const std::vector<KeyParameter>& inKeyParams,
KeyFormat in_inKeyFormat,
const std::vector<uint8_t>& in_inKeyData,
+ const std::optional<AttestationKey>& /* in_attestationKey */,
KeyCreationResult* out_creationResult) {
- auto legacyKeyParams = convertKeyParametersToLegacy(in_inKeyParams);
+ auto legacyKeyGENParams = convertKeyParametersToLegacy(extractGenerationParams(inKeyParams));
auto legacyKeyFormat = convertKeyFormatToLegacy(in_inKeyFormat);
- V4_0_ErrorCode errorCode;
- auto result = mDevice->importKey(legacyKeyParams, legacyKeyFormat, in_inKeyData,
+ KMV1::ErrorCode errorCode;
+ auto result = mDevice->importKey(legacyKeyGENParams, legacyKeyFormat, in_inKeyData,
[&](V4_0_ErrorCode error, const hidl_vec<uint8_t>& keyBlob,
const V4_0_KeyCharacteristics& keyCharacteristics) {
- errorCode = error;
- out_creationResult->keyBlob = keyBlob;
+ errorCode = convert(error);
+ out_creationResult->keyBlob =
+ keyBlobPrefix(keyBlob, false);
out_creationResult->keyCharacteristics =
- convertKeyCharacteristicsFromLegacy(
- securityLevel_, keyCharacteristics);
+ processLegacyCharacteristics(
+ securityLevel_, inKeyParams, keyCharacteristics);
});
if (!result.isOk()) {
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
}
- if (errorCode == V4_0_ErrorCode::OK) {
- auto cert = getCertificate(in_inKeyParams, out_creationResult->keyBlob);
- if (std::holds_alternative<V4_0_ErrorCode>(cert)) {
- auto code = std::get<V4_0_ErrorCode>(cert);
+ if (errorCode == KMV1::ErrorCode::OK) {
+ auto cert = getCertificate(inKeyParams, out_creationResult->keyBlob);
+ if (std::holds_alternative<KMV1::ErrorCode>(cert)) {
+ auto code = std::get<KMV1::ErrorCode>(cert);
// We return OK in successful cases that do not generate a certificate.
- if (code != V4_0_ErrorCode::OK) {
+ if (code != KMV1::ErrorCode::OK) {
errorCode = code;
deleteKey(out_creationResult->keyBlob);
}
@@ -253,26 +504,36 @@
return convertErrorCode(errorCode);
}
-ScopedAStatus KeyMintDevice::importWrappedKey(
- const std::vector<uint8_t>& in_inWrappedKeyData,
- const std::vector<uint8_t>& in_inWrappingKeyBlob, const std::vector<uint8_t>& in_inMaskingKey,
- const std::vector<KeyParameter>& in_inUnwrappingParams, int64_t in_inPasswordSid,
- int64_t in_inBiometricSid, KeyCreationResult* out_creationResult) {
+ScopedAStatus
+KeyMintDevice::importWrappedKey(const std::vector<uint8_t>& in_inWrappedKeyData,
+ const std::vector<uint8_t>& in_inPrefixedWrappingKeyBlob,
+ const std::vector<uint8_t>& in_inMaskingKey,
+ const std::vector<KeyParameter>& in_inUnwrappingParams,
+ int64_t in_inPasswordSid, int64_t in_inBiometricSid,
+ KeyCreationResult* out_creationResult) {
+ const std::vector<uint8_t>& wrappingKeyBlob =
+ prefixedKeyBlobRemovePrefix(in_inPrefixedWrappingKeyBlob);
+ if (prefixedKeyBlobIsSoftKeyMint(in_inPrefixedWrappingKeyBlob)) {
+ return softKeyMintDevice_->importWrappedKey(
+ in_inWrappedKeyData, wrappingKeyBlob, in_inMaskingKey, in_inUnwrappingParams,
+ in_inPasswordSid, in_inBiometricSid, out_creationResult);
+ }
+
auto legacyUnwrappingParams = convertKeyParametersToLegacy(in_inUnwrappingParams);
- V4_0_ErrorCode errorCode;
+ KMV1::ErrorCode errorCode;
auto result = mDevice->importWrappedKey(
- in_inWrappedKeyData, in_inWrappingKeyBlob, in_inMaskingKey, legacyUnwrappingParams,
+ in_inWrappedKeyData, wrappingKeyBlob, in_inMaskingKey, legacyUnwrappingParams,
in_inPasswordSid, in_inBiometricSid,
[&](V4_0_ErrorCode error, const hidl_vec<uint8_t>& keyBlob,
const V4_0_KeyCharacteristics& keyCharacteristics) {
- errorCode = error;
- out_creationResult->keyBlob = keyBlob;
+ errorCode = convert(error);
+ out_creationResult->keyBlob = keyBlobPrefix(keyBlob, false);
out_creationResult->keyCharacteristics =
- convertKeyCharacteristicsFromLegacy(securityLevel_, keyCharacteristics);
+ processLegacyCharacteristics(securityLevel_, {}, keyCharacteristics);
});
if (!result.isOk()) {
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
}
return convertErrorCode(errorCode);
}
@@ -282,27 +543,41 @@
std::vector<uint8_t>* _aidl_return) {
auto legacyUpgradeParams = convertKeyParametersToLegacy(in_inUpgradeParams);
V4_0_ErrorCode errorCode;
+
auto result =
- mDevice->upgradeKey(in_inKeyBlobToUpgrade, legacyUpgradeParams,
+ mDevice->upgradeKey(prefixedKeyBlobRemovePrefix(in_inKeyBlobToUpgrade), legacyUpgradeParams,
[&](V4_0_ErrorCode error, const hidl_vec<uint8_t>& upgradedKeyBlob) {
errorCode = error;
- *_aidl_return = upgradedKeyBlob;
+ *_aidl_return = keyBlobPrefix(upgradedKeyBlob, false);
});
if (!result.isOk()) {
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
}
return convertErrorCode(errorCode);
}
-ScopedAStatus KeyMintDevice::deleteKey(const std::vector<uint8_t>& in_inKeyBlob) {
- V4_0_ErrorCode errorCode = mDevice->deleteKey(in_inKeyBlob);
- return convertErrorCode(errorCode);
+ScopedAStatus KeyMintDevice::deleteKey(const std::vector<uint8_t>& prefixedKeyBlob) {
+ const std::vector<uint8_t>& keyBlob = prefixedKeyBlobRemovePrefix(prefixedKeyBlob);
+ if (prefixedKeyBlobIsSoftKeyMint(prefixedKeyBlob)) {
+ return softKeyMintDevice_->deleteKey(keyBlob);
+ }
+
+ auto result = mDevice->deleteKey(keyBlob);
+ if (!result.isOk()) {
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
+ }
+ return convertErrorCode(result);
}
ScopedAStatus KeyMintDevice::deleteAllKeys() {
- V4_0_ErrorCode errorCode = mDevice->deleteAllKeys();
- return convertErrorCode(errorCode);
+ auto result = mDevice->deleteAllKeys();
+ if (!result.isOk()) {
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
+ }
+ return convertErrorCode(result);
}
// We're not implementing this.
@@ -312,126 +587,207 @@
}
ScopedAStatus KeyMintDevice::begin(KeyPurpose in_inPurpose,
- const std::vector<uint8_t>& in_inKeyBlob,
+ const std::vector<uint8_t>& prefixedKeyBlob,
const std::vector<KeyParameter>& in_inParams,
const HardwareAuthToken& in_inAuthToken,
BeginResult* _aidl_return) {
if (!mOperationSlots.claimSlot()) {
return convertErrorCode(V4_0_ErrorCode::TOO_MANY_OPERATIONS);
}
+
+ const std::vector<uint8_t>& in_inKeyBlob = prefixedKeyBlobRemovePrefix(prefixedKeyBlob);
+ if (prefixedKeyBlobIsSoftKeyMint(prefixedKeyBlob)) {
+ return softKeyMintDevice_->begin(in_inPurpose, in_inKeyBlob, in_inParams, in_inAuthToken,
+ _aidl_return);
+ }
+
auto legacyPurpose =
static_cast<::android::hardware::keymaster::V4_0::KeyPurpose>(in_inPurpose);
auto legacyParams = convertKeyParametersToLegacy(in_inParams);
auto legacyAuthToken = convertAuthTokenToLegacy(in_inAuthToken);
- V4_0_ErrorCode errorCode;
+ KMV1::ErrorCode errorCode;
auto result = mDevice->begin(
legacyPurpose, in_inKeyBlob, legacyParams, legacyAuthToken,
[&](V4_0_ErrorCode error, const hidl_vec<V4_0_KeyParameter>& outParams,
uint64_t operationHandle) {
- errorCode = error;
- _aidl_return->challenge = operationHandle; // TODO: Is this right?
+ errorCode = convert(error);
+ _aidl_return->challenge = operationHandle;
_aidl_return->params = convertKeyParametersFromLegacy(outParams);
_aidl_return->operation = ndk::SharedRefBase::make<KeyMintOperation>(
mDevice, operationHandle, &mOperationSlots, error == V4_0_ErrorCode::OK);
});
if (!result.isOk()) {
- // TODO: In this case we're guaranteed that _aidl_return was not initialized, right?
- mOperationSlots.freeSlot();
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ errorCode = KMV1::ErrorCode::UNKNOWN_ERROR;
}
- if (errorCode != V4_0_ErrorCode::OK) {
+ if (errorCode != KMV1::ErrorCode::OK) {
mOperationSlots.freeSlot();
}
return convertErrorCode(errorCode);
}
-ScopedAStatus KeyMintOperation::update(const std::optional<KeyParameterArray>& in_inParams,
- const std::optional<std::vector<uint8_t>>& in_input,
- const std::optional<HardwareAuthToken>& in_inAuthToken,
- const std::optional<TimeStampToken>& in_inTimeStampToken,
- std::optional<KeyParameterArray>* out_outParams,
- std::optional<ByteArray>* out_output,
- int32_t* _aidl_return) {
- std::vector<V4_0_KeyParameter> legacyParams;
- if (in_inParams.has_value()) {
- legacyParams = convertKeyParametersToLegacy(in_inParams.value().params);
+ScopedAStatus KeyMintDevice::deviceLocked(bool passwordOnly,
+ const std::optional<TimeStampToken>& timestampToken) {
+ V4_0_VerificationToken token;
+ if (timestampToken.has_value()) {
+ token = convertTimestampTokenToLegacy(timestampToken.value());
}
- auto input = in_input.value_or(std::vector<uint8_t>());
- V4_0_HardwareAuthToken authToken;
- if (in_inAuthToken.has_value()) {
- authToken = convertAuthTokenToLegacy(in_inAuthToken.value());
+ auto ret = mDevice->deviceLocked(passwordOnly, token);
+ if (!ret.isOk()) {
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
+ } else {
+ return convertErrorCode(KMV1::ErrorCode::OK);
}
- V4_0_VerificationToken verificationToken;
- if (in_inTimeStampToken.has_value()) {
- verificationToken = convertTimestampTokenToLegacy(in_inTimeStampToken.value());
+}
+
+ScopedAStatus KeyMintDevice::earlyBootEnded() {
+ auto ret = mDevice->earlyBootEnded();
+ if (!ret.isOk()) {
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
+ } else {
+ return convertErrorCode(KMV1::ErrorCode::OK);
}
- V4_0_ErrorCode errorCode;
+}
+
+ScopedAStatus
+KeyMintDevice::convertStorageKeyToEphemeral(const std::vector<uint8_t>& prefixedStorageKeyBlob,
+ std::vector<uint8_t>* ephemeralKeyBlob) {
+ KMV1::ErrorCode km_error;
+
+ /*
+ * Wrapped storage keys cannot be emulated (and they don't need to, because if a platform
+ * supports wrapped storage keys, then the legacy backend will support it too. So error out
+ * if the wrapped storage key given is a soft keymint key.
+ */
+ if (prefixedKeyBlobIsSoftKeyMint(prefixedStorageKeyBlob)) {
+ return convertErrorCode(KMV1::ErrorCode::UNIMPLEMENTED);
+ }
+
+ const std::vector<uint8_t>& storageKeyBlob =
+ prefixedKeyBlobRemovePrefix(prefixedStorageKeyBlob);
+
+ auto hidlCb = [&](V4_0_ErrorCode ret, const hidl_vec<uint8_t>& exportedKeyBlob) {
+ km_error = convert(ret);
+ if (km_error != KMV1::ErrorCode::OK) return;
+ /*
+ * This must return the blob without the prefix since it will be used directly
+ * as a storage encryption key. But this is alright, since this wrapped ephemeral
+ * key shouldn't/won't ever be used with keymint.
+ */
+ *ephemeralKeyBlob = exportedKeyBlob;
+ };
+
+ auto ret = mDevice->exportKey(V4_0_KeyFormat::RAW, storageKeyBlob, {}, {}, hidlCb);
+ if (!ret.isOk()) {
+ LOG(ERROR) << __func__ << " export_key failed: " << ret.description();
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
+ }
+ if (km_error != KMV1::ErrorCode::OK)
+ LOG(ERROR) << __func__ << " export_key failed, code " << int32_t(km_error);
+
+ return convertErrorCode(km_error);
+}
+
+ScopedAStatus KeyMintDevice::performOperation(const std::vector<uint8_t>& /* request */,
+ std::vector<uint8_t>* /* response */) {
+ return convertErrorCode(KMV1::ErrorCode::UNIMPLEMENTED);
+}
+
+ScopedAStatus KeyMintOperation::updateAad(const std::vector<uint8_t>& input,
+ const std::optional<HardwareAuthToken>& optAuthToken,
+ const std::optional<TimeStampToken>& optTimeStampToken) {
+ V4_0_HardwareAuthToken authToken = convertAuthTokenToLegacy(optAuthToken);
+ V4_0_VerificationToken verificationToken = convertTimestampTokenToLegacy(optTimeStampToken);
+
+ KMV1::ErrorCode errorCode;
auto result = mDevice->update(
- mOperationHandle, legacyParams, input, authToken, verificationToken,
- [&](V4_0_ErrorCode error, uint32_t inputConsumed,
- const hidl_vec<V4_0_KeyParameter>& outParams, const hidl_vec<uint8_t>& output) {
- errorCode = error;
- out_outParams->emplace();
- out_outParams->value().params = convertKeyParametersFromLegacy(outParams);
- out_output->emplace();
- out_output->value().data = output;
- *_aidl_return = inputConsumed;
- });
+ mOperationHandle, {V4_0::makeKeyParameter(V4_0::TAG_ASSOCIATED_DATA, input)}, {}, authToken,
+ verificationToken,
+ [&](V4_0_ErrorCode error, auto, auto, auto) { errorCode = convert(error); });
+
if (!result.isOk()) {
- mOperationSlot.freeSlot();
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ errorCode = KMV1::ErrorCode::UNKNOWN_ERROR;
}
- if (errorCode != V4_0_ErrorCode::OK) {
- mOperationSlot.freeSlot();
- }
+ if (errorCode != KMV1::ErrorCode::OK) mOperationSlot.freeSlot();
+
return convertErrorCode(errorCode);
}
-ScopedAStatus KeyMintOperation::finish(const std::optional<KeyParameterArray>& in_inParams,
- const std::optional<std::vector<uint8_t>>& in_input,
- const std::optional<std::vector<uint8_t>>& in_inSignature,
- const std::optional<HardwareAuthToken>& in_authToken,
- const std::optional<TimeStampToken>& in_inTimeStampToken,
- std::optional<KeyParameterArray>* out_outParams,
- std::vector<uint8_t>* _aidl_return) {
- V4_0_ErrorCode errorCode;
- std::vector<V4_0_KeyParameter> legacyParams;
- if (in_inParams.has_value()) {
- legacyParams = convertKeyParametersToLegacy(in_inParams.value().params);
+ScopedAStatus KeyMintOperation::update(const std::vector<uint8_t>& input,
+ const std::optional<HardwareAuthToken>& optAuthToken,
+ const std::optional<TimeStampToken>& optTimeStampToken,
+ std::vector<uint8_t>* out_output) {
+ V4_0_HardwareAuthToken authToken = convertAuthTokenToLegacy(optAuthToken);
+ V4_0_VerificationToken verificationToken = convertTimestampTokenToLegacy(optTimeStampToken);
+
+ size_t inputPos = 0;
+ *out_output = {};
+ KMV1::ErrorCode errorCode = KMV1::ErrorCode::OK;
+
+ while (inputPos < input.size() && errorCode == KMV1::ErrorCode::OK) {
+ auto result =
+ mDevice->update(mOperationHandle, {} /* inParams */,
+ {input.begin() + inputPos, input.end()}, authToken, verificationToken,
+ [&](V4_0_ErrorCode error, uint32_t inputConsumed, auto /* outParams */,
+ const hidl_vec<uint8_t>& output) {
+ errorCode = convert(error);
+ out_output->insert(out_output->end(), output.begin(), output.end());
+ inputPos += inputConsumed;
+ });
+
+ if (!result.isOk()) {
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ errorCode = KMV1::ErrorCode::UNKNOWN_ERROR;
+ }
}
+
+ if (errorCode != KMV1::ErrorCode::OK) mOperationSlot.freeSlot();
+
+ return convertErrorCode(errorCode);
+}
+
+ScopedAStatus
+KeyMintOperation::finish(const std::optional<std::vector<uint8_t>>& in_input,
+ const std::optional<std::vector<uint8_t>>& in_signature,
+ const std::optional<HardwareAuthToken>& in_authToken,
+ const std::optional<TimeStampToken>& in_timeStampToken,
+ const std::optional<std::vector<uint8_t>>& in_confirmationToken,
+ std::vector<uint8_t>* out_output) {
auto input = in_input.value_or(std::vector<uint8_t>());
- auto signature = in_inSignature.value_or(std::vector<uint8_t>());
- V4_0_HardwareAuthToken authToken;
- if (in_authToken.has_value()) {
- authToken = convertAuthTokenToLegacy(in_authToken.value());
+ auto signature = in_signature.value_or(std::vector<uint8_t>());
+ V4_0_HardwareAuthToken authToken = convertAuthTokenToLegacy(in_authToken);
+ V4_0_VerificationToken verificationToken = convertTimestampTokenToLegacy(in_timeStampToken);
+
+ std::vector<V4_0_KeyParameter> inParams;
+ if (in_confirmationToken) {
+ inParams.push_back(makeKeyParameter(V4_0::TAG_CONFIRMATION_TOKEN, *in_confirmationToken));
}
- V4_0_VerificationToken verificationToken;
- if (in_inTimeStampToken.has_value()) {
- verificationToken = convertTimestampTokenToLegacy(in_inTimeStampToken.value());
- }
+
+ KMV1::ErrorCode errorCode;
auto result = mDevice->finish(
- mOperationHandle, legacyParams, input, signature, authToken, verificationToken,
- [&](V4_0_ErrorCode error, const hidl_vec<V4_0_KeyParameter>& outParams,
- const hidl_vec<uint8_t>& output) {
- errorCode = error;
- out_outParams->emplace();
- out_outParams->value().params = convertKeyParametersFromLegacy(outParams);
- *_aidl_return = output;
+ mOperationHandle, inParams, input, signature, authToken, verificationToken,
+ [&](V4_0_ErrorCode error, auto /* outParams */, const hidl_vec<uint8_t>& output) {
+ errorCode = convert(error);
+ *out_output = output;
});
+
mOperationSlot.freeSlot();
if (!result.isOk()) {
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ errorCode = KMV1::ErrorCode::UNKNOWN_ERROR;
}
return convertErrorCode(errorCode);
}
ScopedAStatus KeyMintOperation::abort() {
- V4_0_ErrorCode errorCode = mDevice->abort(mOperationHandle);
+ auto result = mDevice->abort(mOperationHandle);
mOperationSlot.freeSlot();
- return convertErrorCode(errorCode);
+ if (!result.isOk()) {
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ return convertErrorCode(KMV1::ErrorCode::UNKNOWN_ERROR);
+ }
+ return convertErrorCode(result);
}
KeyMintOperation::~KeyMintOperation() {
@@ -446,18 +802,18 @@
// SecureClock implementation
ScopedAStatus SecureClock::generateTimeStamp(int64_t in_challenge, TimeStampToken* _aidl_return) {
- V4_0_ErrorCode errorCode;
+ KMV1::ErrorCode errorCode;
auto result = mDevice->verifyAuthorization(
in_challenge, {}, V4_0_HardwareAuthToken(),
[&](V4_0_ErrorCode error, const V4_0_VerificationToken& token) {
- errorCode = error;
+ errorCode = convert(error);
_aidl_return->challenge = token.challenge;
_aidl_return->timestamp.milliSeconds = token.timestamp;
_aidl_return->mac = token.mac;
});
if (!result.isOk()) {
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ errorCode = KMV1::ErrorCode::UNKNOWN_ERROR;
}
return convertErrorCode(errorCode);
}
@@ -465,17 +821,17 @@
// SharedSecret implementation
ScopedAStatus SharedSecret::getSharedSecretParameters(SharedSecretParameters* _aidl_return) {
- V4_0_ErrorCode errorCode;
+ KMV1::ErrorCode errorCode;
auto result = mDevice->getHmacSharingParameters(
[&](V4_0_ErrorCode error, const V4_0_HmacSharingParameters& params) {
- errorCode = error;
+ errorCode = convert(error);
_aidl_return->seed = params.seed;
std::copy(params.nonce.data(), params.nonce.data() + params.nonce.elementCount(),
std::back_inserter(_aidl_return->nonce));
});
if (!result.isOk()) {
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ errorCode = KMV1::ErrorCode::UNKNOWN_ERROR;
}
return convertErrorCode(errorCode);
}
@@ -483,16 +839,16 @@
ScopedAStatus
SharedSecret::computeSharedSecret(const std::vector<SharedSecretParameters>& in_params,
std::vector<uint8_t>* _aidl_return) {
- V4_0_ErrorCode errorCode;
+ KMV1::ErrorCode errorCode;
auto legacyParams = convertSharedSecretParametersToLegacy(in_params);
auto result = mDevice->computeSharedHmac(
legacyParams, [&](V4_0_ErrorCode error, const hidl_vec<uint8_t>& sharingCheck) {
- errorCode = error;
+ errorCode = convert(error);
*_aidl_return = sharingCheck;
});
if (!result.isOk()) {
- return ScopedAStatus::fromServiceSpecificError(
- static_cast<int32_t>(ResponseCode::SYSTEM_ERROR));
+ LOG(ERROR) << __func__ << " transaction failed. " << result.description();
+ errorCode = KMV1::ErrorCode::UNKNOWN_ERROR;
}
return convertErrorCode(errorCode);
}
@@ -503,6 +859,7 @@
static auto getParam(const std::vector<KeyParameter>& keyParams, KMV1::TypedTag<type, tag> ttag)
-> decltype(authorizationValue(ttag, KeyParameter())) {
for (const auto& p : keyParams) {
+
if (auto v = authorizationValue(ttag, p)) {
return v;
}
@@ -524,9 +881,12 @@
auto bestSoFar = sortedOptions.end();
for (const KeyParameter& kp : keyParams) {
if (auto value = authorizationValue(tag, kp)) {
- auto it = std::find(sortedOptions.begin(), sortedOptions.end(), *value);
- if (std::distance(it, bestSoFar) < 0) {
- bestSoFar = it;
+ auto candidate = std::find(sortedOptions.begin(), sortedOptions.end(), *value);
+ // sortedOptions is sorted from best to worst. `std::distance(first, last)` counts the
+ // hops from `first` to `last`. So a better `candidate` yields a positive distance to
+ // `bestSoFar`.
+ if (std::distance(candidate, bestSoFar) > 0) {
+ bestSoFar = candidate;
}
}
}
@@ -536,12 +896,12 @@
return *bestSoFar;
}
-static std::variant<keystore::X509_Ptr, V4_0_ErrorCode>
+static std::variant<keystore::X509_Ptr, KMV1::ErrorCode>
makeCert(::android::sp<Keymaster> mDevice, const std::vector<KeyParameter>& keyParams,
const std::vector<uint8_t>& keyBlob) {
// Start generating the certificate.
// Get public key for makeCert.
- V4_0_ErrorCode errorCode;
+ KMV1::ErrorCode errorCode;
std::vector<uint8_t> key;
static std::vector<uint8_t> empty_vector;
auto unwrapBlob = [&](auto b) -> const std::vector<uint8_t>& {
@@ -554,43 +914,57 @@
V4_0_KeyFormat::X509, keyBlob, unwrapBlob(getParam(keyParams, KMV1::TAG_APPLICATION_ID)),
unwrapBlob(getParam(keyParams, KMV1::TAG_APPLICATION_DATA)),
[&](V4_0_ErrorCode error, const hidl_vec<uint8_t>& keyMaterial) {
- errorCode = error;
+ errorCode = convert(error);
key = keyMaterial;
});
if (!result.isOk()) {
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ LOG(ERROR) << __func__ << " exportKey transaction failed. " << result.description();
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
- if (errorCode != V4_0_ErrorCode::OK) {
+ if (errorCode != KMV1::ErrorCode::OK) {
return errorCode;
}
// Get pkey for makeCert.
CBS cbs;
CBS_init(&cbs, key.data(), key.size());
auto pkey = EVP_parse_public_key(&cbs);
- // makeCert
- // TODO: Get the serial and subject from key params once the tags are added. Also use new tags
- // for the two datetime parameters once we get those.
- uint64_t activation = 0;
- if (auto date = getParam(keyParams, KMV1::TAG_ACTIVE_DATETIME)) {
- activation = *date;
+ // makeCert
+ std::optional<std::reference_wrapper<const std::vector<uint8_t>>> subject;
+ if (auto blob = getParam(keyParams, KMV1::TAG_CERTIFICATE_SUBJECT)) {
+ subject = *blob;
}
- uint64_t expiration = std::numeric_limits<uint64_t>::max();
- if (auto date = getParam(keyParams, KMV1::TAG_USAGE_EXPIRE_DATETIME)) {
- expiration = *date;
+
+ std::optional<std::reference_wrapper<const std::vector<uint8_t>>> serial;
+ if (auto blob = getParam(keyParams, KMV1::TAG_CERTIFICATE_SERIAL)) {
+ serial = *blob;
+ }
+
+ int64_t activation;
+ if (auto date = getParam(keyParams, KMV1::TAG_CERTIFICATE_NOT_BEFORE)) {
+ activation = static_cast<int64_t>(*date);
+ } else {
+ return KMV1::ErrorCode::MISSING_NOT_BEFORE;
+ }
+
+ int64_t expiration;
+ if (auto date = getParam(keyParams, KMV1::TAG_CERTIFICATE_NOT_AFTER)) {
+ expiration = static_cast<int64_t>(*date);
+ } else {
+ return KMV1::ErrorCode::MISSING_NOT_AFTER;
}
auto certOrError = keystore::makeCert(
- pkey, 42, "TODO", activation, expiration, false /* intentionally left blank */,
+ pkey, serial, subject, activation, expiration, false /* intentionally left blank */,
std::nullopt /* intentionally left blank */, std::nullopt /* intentionally left blank */);
if (std::holds_alternative<keystore::CertUtilsError>(certOrError)) {
LOG(ERROR) << __func__ << ": Failed to make certificate";
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
return std::move(std::get<keystore::X509_Ptr>(certOrError));
}
-static std::variant<keystore::Algo, V4_0_ErrorCode> getKeystoreAlgorithm(Algorithm algorithm) {
+static std::variant<keystore::Algo, KMV1::ErrorCode> getKeystoreAlgorithm(Algorithm algorithm) {
switch (algorithm) {
case Algorithm::RSA:
return keystore::Algo::RSA;
@@ -598,11 +972,11 @@
return keystore::Algo::ECDSA;
default:
LOG(ERROR) << __func__ << ": This should not be called with symmetric algorithm.";
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
}
-static std::variant<keystore::Padding, V4_0_ErrorCode> getKeystorePadding(PaddingMode padding) {
+static std::variant<keystore::Padding, KMV1::ErrorCode> getKeystorePadding(PaddingMode padding) {
switch (padding) {
case PaddingMode::RSA_PKCS1_1_5_SIGN:
return keystore::Padding::PKCS1_5;
@@ -613,7 +987,7 @@
}
}
-static std::variant<keystore::Digest, V4_0_ErrorCode> getKeystoreDigest(Digest digest) {
+static std::variant<keystore::Digest, KMV1::ErrorCode> getKeystoreDigest(Digest digest) {
switch (digest) {
case Digest::SHA1:
return keystore::Digest::SHA1;
@@ -628,62 +1002,62 @@
return keystore::Digest::SHA512;
default:
LOG(ERROR) << __func__ << ": Unknown digest.";
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
}
-std::optional<V4_0_ErrorCode>
+std::optional<KMV1::ErrorCode>
KeyMintDevice::signCertificate(const std::vector<KeyParameter>& keyParams,
- const std::vector<uint8_t>& keyBlob, X509* cert) {
+ const std::vector<uint8_t>& prefixedKeyBlob, X509* cert) {
+
auto algorithm = getParam(keyParams, KMV1::TAG_ALGORITHM);
auto algoOrError = getKeystoreAlgorithm(*algorithm);
- if (std::holds_alternative<V4_0_ErrorCode>(algoOrError)) {
- return std::get<V4_0_ErrorCode>(algoOrError);
+ if (std::holds_alternative<KMV1::ErrorCode>(algoOrError)) {
+ return std::get<KMV1::ErrorCode>(algoOrError);
}
auto algo = std::get<keystore::Algo>(algoOrError);
auto origPadding = getMaximum(keyParams, KMV1::TAG_PADDING,
{PaddingMode::RSA_PSS, PaddingMode::RSA_PKCS1_1_5_SIGN});
auto paddingOrError = getKeystorePadding(origPadding);
- if (std::holds_alternative<V4_0_ErrorCode>(paddingOrError)) {
- return std::get<V4_0_ErrorCode>(paddingOrError);
+ if (std::holds_alternative<KMV1::ErrorCode>(paddingOrError)) {
+ return std::get<KMV1::ErrorCode>(paddingOrError);
}
auto padding = std::get<keystore::Padding>(paddingOrError);
- auto origDigest = getMaximum(
- keyParams, KMV1::TAG_DIGEST,
- {Digest::SHA_2_256, Digest::SHA_2_512, Digest::SHA_2_384, Digest::SHA_2_224, Digest::SHA1});
+ auto origDigest = getMaximum(keyParams, KMV1::TAG_DIGEST,
+ {Digest::SHA_2_256, Digest::SHA_2_512, Digest::SHA_2_384,
+ Digest::SHA_2_224, Digest::SHA1, Digest::NONE});
auto digestOrError = getKeystoreDigest(origDigest);
- if (std::holds_alternative<V4_0_ErrorCode>(digestOrError)) {
- return std::get<V4_0_ErrorCode>(digestOrError);
+ if (std::holds_alternative<KMV1::ErrorCode>(digestOrError)) {
+ return std::get<KMV1::ErrorCode>(digestOrError);
}
auto digest = std::get<keystore::Digest>(digestOrError);
- V4_0_ErrorCode errorCode = V4_0_ErrorCode::OK;
+ KMV1::ErrorCode errorCode = KMV1::ErrorCode::OK;
auto error = keystore::signCertWith(
&*cert,
[&](const uint8_t* data, size_t len) {
std::vector<uint8_t> dataVec(data, data + len);
std::vector<KeyParameter> kps = {
- KMV1::makeKeyParameter(KMV1::TAG_PADDING, origPadding),
KMV1::makeKeyParameter(KMV1::TAG_DIGEST, origDigest),
};
+ if (algorithm == KMV1::Algorithm::RSA) {
+ kps.push_back(KMV1::makeKeyParameter(KMV1::TAG_PADDING, origPadding));
+ }
BeginResult beginResult;
- auto error = begin(KeyPurpose::SIGN, keyBlob, kps, HardwareAuthToken(), &beginResult);
+ auto error =
+ begin(KeyPurpose::SIGN, prefixedKeyBlob, kps, HardwareAuthToken(), &beginResult);
if (!error.isOk()) {
errorCode = toErrorCode(error);
return std::vector<uint8_t>();
}
- std::optional<KeyParameterArray> outParams;
- std::optional<ByteArray> outByte;
- int32_t status;
- error = beginResult.operation->update(std::nullopt, dataVec, std::nullopt, std::nullopt,
- &outParams, &outByte, &status);
- if (!error.isOk()) {
- errorCode = toErrorCode(error);
- return std::vector<uint8_t>();
- }
+
std::vector<uint8_t> result;
- error = beginResult.operation->finish(std::nullopt, std::nullopt, std::nullopt,
- std::nullopt, std::nullopt, &outParams, &result);
+ error = beginResult.operation->finish(dataVec, //
+ {} /* signature */, //
+ {} /* authToken */, //
+ {} /* timestampToken */, //
+ {} /* confirmationToken */, //
+ &result);
if (!error.isOk()) {
errorCode = toErrorCode(error);
return std::vector<uint8_t>();
@@ -694,40 +1068,42 @@
if (error) {
LOG(ERROR) << __func__
<< ": signCertWith failed. (Callback diagnosed: " << toString(errorCode) << ")";
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
- if (errorCode != V4_0_ErrorCode::OK) {
+ if (errorCode != KMV1::ErrorCode::OK) {
return errorCode;
}
return std::nullopt;
}
-std::variant<std::vector<Certificate>, V4_0_ErrorCode>
+std::variant<std::vector<Certificate>, KMV1::ErrorCode>
KeyMintDevice::getCertificate(const std::vector<KeyParameter>& keyParams,
- const std::vector<uint8_t>& keyBlob) {
+ const std::vector<uint8_t>& prefixedKeyBlob) {
+ const std::vector<uint8_t>& keyBlob = prefixedKeyBlobRemovePrefix(prefixedKeyBlob);
+
// There are no certificates for symmetric keys.
auto algorithm = getParam(keyParams, KMV1::TAG_ALGORITHM);
if (!algorithm) {
LOG(ERROR) << __func__ << ": Unable to determine key algorithm.";
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
switch (*algorithm) {
case Algorithm::RSA:
case Algorithm::EC:
break;
default:
- return V4_0_ErrorCode::OK;
+ return KMV1::ErrorCode::OK;
}
// If attestation was requested, call and use attestKey.
if (containsParam(keyParams, KMV1::TAG_ATTESTATION_CHALLENGE)) {
- auto legacyParams = convertKeyParametersToLegacy(keyParams);
+ auto legacyParams = convertKeyParametersToLegacy(extractAttestationParams(keyParams));
std::vector<Certificate> certs;
- V4_0_ErrorCode errorCode = V4_0_ErrorCode::OK;
+ KMV1::ErrorCode errorCode = KMV1::ErrorCode::OK;
auto result = mDevice->attestKey(
keyBlob, legacyParams,
- [&](V4_0_ErrorCode error, const hidl_vec<hidl_vec<uint8_t>>& certChain) {
- errorCode = error;
+ [&](V4_0::ErrorCode error, const hidl_vec<hidl_vec<uint8_t>>& certChain) {
+ errorCode = convert(error);
for (const auto& cert : certChain) {
Certificate certificate;
certificate.encodedCertificate = cert;
@@ -736,9 +1112,9 @@
});
if (!result.isOk()) {
LOG(ERROR) << __func__ << ": Call to attestKey failed.";
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
- if (errorCode != V4_0_ErrorCode::OK) {
+ if (errorCode != KMV1::ErrorCode::OK) {
return errorCode;
}
return certs;
@@ -746,8 +1122,8 @@
// makeCert
auto certOrError = makeCert(mDevice, keyParams, keyBlob);
- if (std::holds_alternative<V4_0_ErrorCode>(certOrError)) {
- return std::get<V4_0_ErrorCode>(certOrError);
+ if (std::holds_alternative<KMV1::ErrorCode>(certOrError)) {
+ return std::get<KMV1::ErrorCode>(certOrError);
}
auto cert = std::move(std::get<keystore::X509_Ptr>(certOrError));
@@ -755,7 +1131,7 @@
auto error = keystore::setIssuer(&*cert, &*cert, false);
if (error) {
LOG(ERROR) << __func__ << ": Set issuer failed.";
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
// Signing
@@ -780,7 +1156,7 @@
error = keystore::signCert(&*cert, pkey_ptr);
if (error) {
LOG(ERROR) << __func__ << ": signCert failed.";
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
}
@@ -788,7 +1164,7 @@
auto encodedCertOrError = keystore::encodeCert(&*cert);
if (std::holds_alternative<keystore::CertUtilsError>(encodedCertOrError)) {
LOG(ERROR) << __func__ << ": encodeCert failed.";
- return V4_0_ErrorCode::UNKNOWN_ERROR;
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
}
Certificate certificate{.encodedCertificate =
@@ -802,14 +1178,14 @@
// Copied from system/security/keystore/include/keystore/keymaster_types.h.
// Changing this namespace alias will change the keymaster version.
-namespace keymaster = ::android::hardware::keymaster::V4_1;
+namespace keymasterNs = ::android::hardware::keymaster::V4_1;
-using keymaster::SecurityLevel;
+using keymasterNs::SecurityLevel;
// Copied from system/security/keystore/KeyStore.h.
using ::android::sp;
-using keymaster::support::Keymaster;
+using keymasterNs::support::Keymaster;
template <typename T, size_t count> class Devices : public std::array<T, count> {
public:
@@ -834,8 +1210,8 @@
// Copied from system/security/keystore/keystore_main.cpp.
using ::android::hardware::hidl_string;
-using keymaster::support::Keymaster3;
-using keymaster::support::Keymaster4;
+using keymasterNs::support::Keymaster3;
+using keymasterNs::support::Keymaster4;
template <typename Wrapper>
KeymasterDevices enumerateKeymasterDevices(IServiceManager* serviceManager) {
@@ -911,12 +1287,14 @@
// Constructors and helpers.
KeyMintDevice::KeyMintDevice(sp<Keymaster> device, KeyMintSecurityLevel securityLevel)
- : mDevice(device) {
+ : mDevice(device), securityLevel_(securityLevel) {
if (securityLevel == KeyMintSecurityLevel::STRONGBOX) {
- mOperationSlots.setNumFreeSlots(3);
+ setNumFreeSlots(3);
} else {
- mOperationSlots.setNumFreeSlots(15);
+ setNumFreeSlots(15);
}
+
+ softKeyMintDevice_.reset(CreateKeyMintDevice(KeyMintSecurityLevel::SOFTWARE));
}
sp<Keymaster> getDevice(KeyMintSecurityLevel securityLevel) {
@@ -993,7 +1371,7 @@
}
ScopedAStatus KeystoreCompatService::getSecureClock(std::shared_ptr<ISecureClock>* _aidl_return) {
- if (!mSharedSecret) {
+ if (!mSecureClock) {
// The legacy verification service was always provided by the TEE variant.
auto clock = SecureClock::createSecureClock(KeyMintSecurityLevel::TRUSTED_ENVIRONMENT);
if (!clock) {
diff --git a/keystore2/src/km_compat/km_compat.h b/keystore2/src/km_compat/km_compat.h
index 481481a..69c24b4 100644
--- a/keystore2/src/km_compat/km_compat.h
+++ b/keystore2/src/km_compat/km_compat.h
@@ -18,6 +18,7 @@
#include <aidl/android/hardware/security/keymint/BnKeyMintDevice.h>
#include <aidl/android/hardware/security/keymint/BnKeyMintOperation.h>
+#include <aidl/android/hardware/security/keymint/ErrorCode.h>
#include <aidl/android/hardware/security/secureclock/BnSecureClock.h>
#include <aidl/android/hardware/security/sharedsecret/BnSharedSecret.h>
#include <aidl/android/security/compat/BnKeystoreCompatService.h>
@@ -27,8 +28,8 @@
#include "certificate_utils.h"
+using ::aidl::android::hardware::security::keymint::AttestationKey;
using ::aidl::android::hardware::security::keymint::BeginResult;
-using ::aidl::android::hardware::security::keymint::ByteArray;
using ::aidl::android::hardware::security::keymint::Certificate;
using ::aidl::android::hardware::security::keymint::HardwareAuthToken;
using ::aidl::android::hardware::security::keymint::KeyCharacteristics;
@@ -36,11 +37,11 @@
using ::aidl::android::hardware::security::keymint::KeyFormat;
using ::aidl::android::hardware::security::keymint::KeyMintHardwareInfo;
using ::aidl::android::hardware::security::keymint::KeyParameter;
-using ::aidl::android::hardware::security::keymint::KeyParameterArray;
using ::aidl::android::hardware::security::keymint::KeyPurpose;
using KeyMintSecurityLevel = ::aidl::android::hardware::security::keymint::SecurityLevel;
using V4_0_ErrorCode = ::android::hardware::keymaster::V4_0::ErrorCode;
using ::aidl::android::hardware::security::keymint::IKeyMintDevice;
+using KMV1_ErrorCode = ::aidl::android::hardware::security::keymint::ErrorCode;
using ::aidl::android::hardware::security::secureclock::ISecureClock;
using ::aidl::android::hardware::security::secureclock::TimeStampToken;
using ::aidl::android::hardware::security::sharedsecret::ISharedSecret;
@@ -88,9 +89,11 @@
ScopedAStatus getHardwareInfo(KeyMintHardwareInfo* _aidl_return) override;
ScopedAStatus addRngEntropy(const std::vector<uint8_t>& in_data) override;
ScopedAStatus generateKey(const std::vector<KeyParameter>& in_keyParams,
+ const std::optional<AttestationKey>& in_attestationKey,
KeyCreationResult* out_creationResult) override;
ScopedAStatus importKey(const std::vector<KeyParameter>& in_inKeyParams,
KeyFormat in_inKeyFormat, const std::vector<uint8_t>& in_inKeyData,
+ const std::optional<AttestationKey>& in_attestationKey,
KeyCreationResult* out_creationResult) override;
ScopedAStatus importWrappedKey(const std::vector<uint8_t>& in_inWrappedKeyData,
const std::vector<uint8_t>& in_inWrappingKeyBlob,
@@ -108,19 +111,30 @@
const std::vector<KeyParameter>& in_inParams,
const HardwareAuthToken& in_inAuthToken,
BeginResult* _aidl_return) override;
+ ScopedAStatus deviceLocked(bool passwordOnly,
+ const std::optional<TimeStampToken>& timestampToken) override;
+ ScopedAStatus earlyBootEnded() override;
+
+ ScopedAStatus convertStorageKeyToEphemeral(const std::vector<uint8_t>& storageKeyBlob,
+ std::vector<uint8_t>* ephemeralKeyBlob) override;
+
+ ScopedAStatus performOperation(const std::vector<uint8_t>& request,
+ std::vector<uint8_t>* response) override;
// These are public to allow testing code to use them directly.
// This class should not be used publicly anyway.
-
- std::variant<std::vector<Certificate>, V4_0_ErrorCode>
+ std::variant<std::vector<Certificate>, KMV1_ErrorCode>
getCertificate(const std::vector<KeyParameter>& keyParams, const std::vector<uint8_t>& keyBlob);
void setNumFreeSlots(uint8_t numFreeSlots);
private:
- std::optional<V4_0_ErrorCode> signCertificate(const std::vector<KeyParameter>& keyParams,
+ std::optional<KMV1_ErrorCode> signCertificate(const std::vector<KeyParameter>& keyParams,
const std::vector<uint8_t>& keyBlob, X509* cert);
KeyMintSecurityLevel securityLevel_;
+
+ // Software-based KeyMint device used to implement ECDH.
+ std::shared_ptr<IKeyMintDevice> softKeyMintDevice_;
};
class KeyMintOperation : public aidl::android::hardware::security::keymint::BnKeyMintOperation {
@@ -135,19 +149,22 @@
: mDevice(device), mOperationHandle(operationHandle), mOperationSlot(slots, isActive) {}
~KeyMintOperation();
- ScopedAStatus update(const std::optional<KeyParameterArray>& in_inParams,
- const std::optional<std::vector<uint8_t>>& in_input,
- const std::optional<HardwareAuthToken>& in_inAuthToken,
- const std::optional<TimeStampToken>& in_inTimestampToken,
- std::optional<KeyParameterArray>* out_outParams,
- std::optional<ByteArray>* out_output, int32_t* _aidl_return);
- ScopedAStatus finish(const std::optional<KeyParameterArray>& in_inParams,
- const std::optional<std::vector<uint8_t>>& in_input,
- const std::optional<std::vector<uint8_t>>& in_inSignature,
- const std::optional<HardwareAuthToken>& in_authToken,
- const std::optional<TimeStampToken>& in_inTimestampToken,
- std::optional<KeyParameterArray>* out_outParams,
- std::vector<uint8_t>* _aidl_return);
+ ScopedAStatus updateAad(const std::vector<uint8_t>& input,
+ const std::optional<HardwareAuthToken>& authToken,
+ const std::optional<TimeStampToken>& timestampToken) override;
+
+ ScopedAStatus update(const std::vector<uint8_t>& input,
+ const std::optional<HardwareAuthToken>& authToken,
+ const std::optional<TimeStampToken>& timestampToken,
+ std::vector<uint8_t>* output) override;
+
+ ScopedAStatus finish(const std::optional<std::vector<uint8_t>>& input,
+ const std::optional<std::vector<uint8_t>>& signature,
+ const std::optional<HardwareAuthToken>& authToken,
+ const std::optional<TimeStampToken>& timeStampToken,
+ const std::optional<std::vector<uint8_t>>& confirmationToken,
+ std::vector<uint8_t>* output) override;
+
ScopedAStatus abort();
};
diff --git a/keystore2/src/km_compat/km_compat_type_conversion.h b/keystore2/src/km_compat/km_compat_type_conversion.h
index db9d2a4..c2b4669 100644
--- a/keystore2/src/km_compat/km_compat_type_conversion.h
+++ b/keystore2/src/km_compat/km_compat_type_conversion.h
@@ -16,6 +16,7 @@
#pragma once
+#include <aidl/android/hardware/security/keymint/ErrorCode.h>
#include <keymasterV4_1/keymaster_tags.h>
#include <keymint_support/keymint_tags.h>
@@ -23,6 +24,159 @@
namespace V4_1 = ::android::hardware::keymaster::V4_1;
namespace KMV1 = ::aidl::android::hardware::security::keymint;
+static KMV1::ErrorCode convert(V4_0::ErrorCode error) {
+ switch (error) {
+ case V4_0::ErrorCode::OK:
+ return KMV1::ErrorCode::OK;
+ case V4_0::ErrorCode::ROOT_OF_TRUST_ALREADY_SET:
+ return KMV1::ErrorCode::ROOT_OF_TRUST_ALREADY_SET;
+ case V4_0::ErrorCode::UNSUPPORTED_PURPOSE:
+ return KMV1::ErrorCode::UNSUPPORTED_PURPOSE;
+ case V4_0::ErrorCode::INCOMPATIBLE_PURPOSE:
+ return KMV1::ErrorCode::INCOMPATIBLE_PURPOSE;
+ case V4_0::ErrorCode::UNSUPPORTED_ALGORITHM:
+ return KMV1::ErrorCode::UNSUPPORTED_ALGORITHM;
+ case V4_0::ErrorCode::INCOMPATIBLE_ALGORITHM:
+ return KMV1::ErrorCode::INCOMPATIBLE_ALGORITHM;
+ case V4_0::ErrorCode::UNSUPPORTED_KEY_SIZE:
+ return KMV1::ErrorCode::UNSUPPORTED_KEY_SIZE;
+ case V4_0::ErrorCode::UNSUPPORTED_BLOCK_MODE:
+ return KMV1::ErrorCode::UNSUPPORTED_BLOCK_MODE;
+ case V4_0::ErrorCode::INCOMPATIBLE_BLOCK_MODE:
+ return KMV1::ErrorCode::INCOMPATIBLE_BLOCK_MODE;
+ case V4_0::ErrorCode::UNSUPPORTED_MAC_LENGTH:
+ return KMV1::ErrorCode::UNSUPPORTED_MAC_LENGTH;
+ case V4_0::ErrorCode::UNSUPPORTED_PADDING_MODE:
+ return KMV1::ErrorCode::UNSUPPORTED_PADDING_MODE;
+ case V4_0::ErrorCode::INCOMPATIBLE_PADDING_MODE:
+ return KMV1::ErrorCode::INCOMPATIBLE_PADDING_MODE;
+ case V4_0::ErrorCode::UNSUPPORTED_DIGEST:
+ return KMV1::ErrorCode::UNSUPPORTED_DIGEST;
+ case V4_0::ErrorCode::INCOMPATIBLE_DIGEST:
+ return KMV1::ErrorCode::INCOMPATIBLE_DIGEST;
+ case V4_0::ErrorCode::INVALID_EXPIRATION_TIME:
+ return KMV1::ErrorCode::INVALID_EXPIRATION_TIME;
+ case V4_0::ErrorCode::INVALID_USER_ID:
+ return KMV1::ErrorCode::INVALID_USER_ID;
+ case V4_0::ErrorCode::INVALID_AUTHORIZATION_TIMEOUT:
+ return KMV1::ErrorCode::INVALID_AUTHORIZATION_TIMEOUT;
+ case V4_0::ErrorCode::UNSUPPORTED_KEY_FORMAT:
+ return KMV1::ErrorCode::UNSUPPORTED_KEY_FORMAT;
+ case V4_0::ErrorCode::INCOMPATIBLE_KEY_FORMAT:
+ return KMV1::ErrorCode::INCOMPATIBLE_KEY_FORMAT;
+ case V4_0::ErrorCode::UNSUPPORTED_KEY_ENCRYPTION_ALGORITHM:
+ return KMV1::ErrorCode::UNSUPPORTED_KEY_ENCRYPTION_ALGORITHM;
+ case V4_0::ErrorCode::UNSUPPORTED_KEY_VERIFICATION_ALGORITHM:
+ return KMV1::ErrorCode::UNSUPPORTED_KEY_VERIFICATION_ALGORITHM;
+ case V4_0::ErrorCode::INVALID_INPUT_LENGTH:
+ return KMV1::ErrorCode::INVALID_INPUT_LENGTH;
+ case V4_0::ErrorCode::KEY_EXPORT_OPTIONS_INVALID:
+ return KMV1::ErrorCode::KEY_EXPORT_OPTIONS_INVALID;
+ case V4_0::ErrorCode::DELEGATION_NOT_ALLOWED:
+ return KMV1::ErrorCode::DELEGATION_NOT_ALLOWED;
+ case V4_0::ErrorCode::KEY_NOT_YET_VALID:
+ return KMV1::ErrorCode::KEY_NOT_YET_VALID;
+ case V4_0::ErrorCode::KEY_EXPIRED:
+ return KMV1::ErrorCode::KEY_EXPIRED;
+ case V4_0::ErrorCode::KEY_USER_NOT_AUTHENTICATED:
+ return KMV1::ErrorCode::KEY_USER_NOT_AUTHENTICATED;
+ case V4_0::ErrorCode::OUTPUT_PARAMETER_NULL:
+ return KMV1::ErrorCode::OUTPUT_PARAMETER_NULL;
+ case V4_0::ErrorCode::INVALID_OPERATION_HANDLE:
+ return KMV1::ErrorCode::INVALID_OPERATION_HANDLE;
+ case V4_0::ErrorCode::INSUFFICIENT_BUFFER_SPACE:
+ return KMV1::ErrorCode::INSUFFICIENT_BUFFER_SPACE;
+ case V4_0::ErrorCode::VERIFICATION_FAILED:
+ return KMV1::ErrorCode::VERIFICATION_FAILED;
+ case V4_0::ErrorCode::TOO_MANY_OPERATIONS:
+ return KMV1::ErrorCode::TOO_MANY_OPERATIONS;
+ case V4_0::ErrorCode::UNEXPECTED_NULL_POINTER:
+ return KMV1::ErrorCode::UNEXPECTED_NULL_POINTER;
+ case V4_0::ErrorCode::INVALID_KEY_BLOB:
+ return KMV1::ErrorCode::INVALID_KEY_BLOB;
+ case V4_0::ErrorCode::IMPORTED_KEY_NOT_ENCRYPTED:
+ return KMV1::ErrorCode::IMPORTED_KEY_NOT_ENCRYPTED;
+ case V4_0::ErrorCode::IMPORTED_KEY_DECRYPTION_FAILED:
+ return KMV1::ErrorCode::IMPORTED_KEY_DECRYPTION_FAILED;
+ case V4_0::ErrorCode::IMPORTED_KEY_NOT_SIGNED:
+ return KMV1::ErrorCode::IMPORTED_KEY_NOT_SIGNED;
+ case V4_0::ErrorCode::IMPORTED_KEY_VERIFICATION_FAILED:
+ return KMV1::ErrorCode::IMPORTED_KEY_VERIFICATION_FAILED;
+ case V4_0::ErrorCode::INVALID_ARGUMENT:
+ return KMV1::ErrorCode::INVALID_ARGUMENT;
+ case V4_0::ErrorCode::UNSUPPORTED_TAG:
+ return KMV1::ErrorCode::UNSUPPORTED_TAG;
+ case V4_0::ErrorCode::INVALID_TAG:
+ return KMV1::ErrorCode::INVALID_TAG;
+ case V4_0::ErrorCode::MEMORY_ALLOCATION_FAILED:
+ return KMV1::ErrorCode::MEMORY_ALLOCATION_FAILED;
+ case V4_0::ErrorCode::IMPORT_PARAMETER_MISMATCH:
+ return KMV1::ErrorCode::IMPORT_PARAMETER_MISMATCH;
+ case V4_0::ErrorCode::SECURE_HW_ACCESS_DENIED:
+ return KMV1::ErrorCode::SECURE_HW_ACCESS_DENIED;
+ case V4_0::ErrorCode::OPERATION_CANCELLED:
+ return KMV1::ErrorCode::OPERATION_CANCELLED;
+ case V4_0::ErrorCode::CONCURRENT_ACCESS_CONFLICT:
+ return KMV1::ErrorCode::CONCURRENT_ACCESS_CONFLICT;
+ case V4_0::ErrorCode::SECURE_HW_BUSY:
+ return KMV1::ErrorCode::SECURE_HW_BUSY;
+ case V4_0::ErrorCode::SECURE_HW_COMMUNICATION_FAILED:
+ return KMV1::ErrorCode::SECURE_HW_COMMUNICATION_FAILED;
+ case V4_0::ErrorCode::UNSUPPORTED_EC_FIELD:
+ return KMV1::ErrorCode::UNSUPPORTED_EC_FIELD;
+ case V4_0::ErrorCode::MISSING_NONCE:
+ return KMV1::ErrorCode::MISSING_NONCE;
+ case V4_0::ErrorCode::INVALID_NONCE:
+ return KMV1::ErrorCode::INVALID_NONCE;
+ case V4_0::ErrorCode::MISSING_MAC_LENGTH:
+ return KMV1::ErrorCode::MISSING_MAC_LENGTH;
+ case V4_0::ErrorCode::KEY_RATE_LIMIT_EXCEEDED:
+ return KMV1::ErrorCode::KEY_RATE_LIMIT_EXCEEDED;
+ case V4_0::ErrorCode::CALLER_NONCE_PROHIBITED:
+ return KMV1::ErrorCode::CALLER_NONCE_PROHIBITED;
+ case V4_0::ErrorCode::KEY_MAX_OPS_EXCEEDED:
+ return KMV1::ErrorCode::KEY_MAX_OPS_EXCEEDED;
+ case V4_0::ErrorCode::INVALID_MAC_LENGTH:
+ return KMV1::ErrorCode::INVALID_MAC_LENGTH;
+ case V4_0::ErrorCode::MISSING_MIN_MAC_LENGTH:
+ return KMV1::ErrorCode::MISSING_MIN_MAC_LENGTH;
+ case V4_0::ErrorCode::UNSUPPORTED_MIN_MAC_LENGTH:
+ return KMV1::ErrorCode::UNSUPPORTED_MIN_MAC_LENGTH;
+ case V4_0::ErrorCode::UNSUPPORTED_KDF:
+ return KMV1::ErrorCode::UNSUPPORTED_KDF;
+ case V4_0::ErrorCode::UNSUPPORTED_EC_CURVE:
+ return KMV1::ErrorCode::UNSUPPORTED_EC_CURVE;
+ case V4_0::ErrorCode::KEY_REQUIRES_UPGRADE:
+ return KMV1::ErrorCode::KEY_REQUIRES_UPGRADE;
+ case V4_0::ErrorCode::ATTESTATION_CHALLENGE_MISSING:
+ return KMV1::ErrorCode::ATTESTATION_CHALLENGE_MISSING;
+ case V4_0::ErrorCode::KEYMASTER_NOT_CONFIGURED:
+ return KMV1::ErrorCode::KEYMINT_NOT_CONFIGURED;
+ case V4_0::ErrorCode::ATTESTATION_APPLICATION_ID_MISSING:
+ return KMV1::ErrorCode::ATTESTATION_APPLICATION_ID_MISSING;
+ case V4_0::ErrorCode::CANNOT_ATTEST_IDS:
+ return KMV1::ErrorCode::CANNOT_ATTEST_IDS;
+ case V4_0::ErrorCode::ROLLBACK_RESISTANCE_UNAVAILABLE:
+ return KMV1::ErrorCode::ROLLBACK_RESISTANCE_UNAVAILABLE;
+ case V4_0::ErrorCode::HARDWARE_TYPE_UNAVAILABLE:
+ return KMV1::ErrorCode::HARDWARE_TYPE_UNAVAILABLE;
+ case V4_0::ErrorCode::PROOF_OF_PRESENCE_REQUIRED:
+ return KMV1::ErrorCode::PROOF_OF_PRESENCE_REQUIRED;
+ case V4_0::ErrorCode::CONCURRENT_PROOF_OF_PRESENCE_REQUESTED:
+ return KMV1::ErrorCode::CONCURRENT_PROOF_OF_PRESENCE_REQUESTED;
+ case V4_0::ErrorCode::NO_USER_CONFIRMATION:
+ return KMV1::ErrorCode::NO_USER_CONFIRMATION;
+ case V4_0::ErrorCode::DEVICE_LOCKED:
+ return KMV1::ErrorCode::DEVICE_LOCKED;
+ case V4_0::ErrorCode::UNIMPLEMENTED:
+ return KMV1::ErrorCode::UNIMPLEMENTED;
+ case V4_0::ErrorCode::VERSION_MISMATCH:
+ return KMV1::ErrorCode::VERSION_MISMATCH;
+ case V4_0::ErrorCode::UNKNOWN_ERROR:
+ return KMV1::ErrorCode::UNKNOWN_ERROR;
+ }
+}
+
static std::optional<V4_0::KeyPurpose> convert(KMV1::KeyPurpose p) {
switch (p) {
case KMV1::KeyPurpose::ENCRYPT:
@@ -241,6 +395,8 @@
return V4_0::SecurityLevel::TRUSTED_ENVIRONMENT;
case KMV1::SecurityLevel::STRONGBOX:
return V4_0::SecurityLevel::STRONGBOX;
+ case KMV1::SecurityLevel::KEYSTORE:
+ return V4_0::SecurityLevel::SOFTWARE;
}
}
@@ -578,7 +734,14 @@
}
break;
case KMV1::Tag::RSA_OAEP_MGF_DIGEST:
- // Does not exist in KM < KeyMint 1.0.
+ case KMV1::Tag::CERTIFICATE_SERIAL:
+ case KMV1::Tag::CERTIFICATE_SUBJECT:
+ case KMV1::Tag::CERTIFICATE_NOT_BEFORE:
+ case KMV1::Tag::CERTIFICATE_NOT_AFTER:
+ // These tags do not exist in KM < KeyMint 1.0.
+ break;
+ case KMV1::Tag::MAX_BOOT_LEVEL:
+ // Does not exist in API level 30 or below.
break;
}
return V4_0::KeyParameter{.tag = V4_0::Tag::INVALID};
diff --git a/keystore2/src/km_compat/lib.rs b/keystore2/src/km_compat/lib.rs
index 7814364..5ece8a7 100644
--- a/keystore2/src/km_compat/lib.rs
+++ b/keystore2/src/km_compat/lib.rs
@@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// TODO: Once this is stable, remove this and document everything public.
-#![allow(missing_docs)]
+//! Export into Rust a function to create a KeyMintDevice and add it as a service.
+#[allow(missing_docs)] // TODO remove this
extern "C" {
fn addKeyMintDeviceService() -> i32;
}
+#[allow(missing_docs)] // TODO remove this
pub fn add_keymint_device_service() -> i32 {
unsafe { addKeyMintDeviceService() }
}
@@ -31,42 +32,56 @@
Algorithm::Algorithm, BeginResult::BeginResult, BlockMode::BlockMode, Digest::Digest,
ErrorCode::ErrorCode, HardwareAuthToken::HardwareAuthToken, IKeyMintDevice::IKeyMintDevice,
KeyCreationResult::KeyCreationResult, KeyFormat::KeyFormat, KeyParameter::KeyParameter,
- KeyParameterArray::KeyParameterArray, KeyParameterValue::KeyParameterValue,
- KeyPurpose::KeyPurpose, PaddingMode::PaddingMode, SecurityLevel::SecurityLevel, Tag::Tag,
+ KeyParameterValue::KeyParameterValue, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
+ SecurityLevel::SecurityLevel, Tag::Tag,
};
- use android_hardware_security_keymint::binder;
+ use android_hardware_security_keymint::binder::{self, Strong};
use android_security_compat::aidl::android::security::compat::IKeystoreCompatService::IKeystoreCompatService;
static COMPAT_NAME: &str = "android.security.compat";
- fn get_device() -> Box<dyn IKeyMintDevice> {
+ fn get_device() -> Option<Strong<dyn IKeyMintDevice>> {
add_keymint_device_service();
- let compat_service: Box<dyn IKeystoreCompatService> =
- binder::get_interface(COMPAT_NAME).unwrap();
- compat_service.getKeyMintDevice(SecurityLevel::TRUSTED_ENVIRONMENT).unwrap()
+ let compat_service: Strong<dyn IKeystoreCompatService> =
+ binder::get_interface(COMPAT_NAME).ok()?;
+ compat_service.getKeyMintDevice(SecurityLevel::TRUSTED_ENVIRONMENT).ok()
+ }
+
+ macro_rules! get_device_or_skip_test {
+ () => {
+ match get_device() {
+ Some(dev) => dev,
+ None => return,
+ }
+ };
}
#[test]
fn test_get_hardware_info() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
let hinfo = legacy.getHardwareInfo();
assert!(hinfo.is_ok());
}
#[test]
fn test_add_rng_entropy() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
let result = legacy.addRngEntropy(&[42; 16]);
assert!(result.is_ok(), "{:?}", result);
}
// TODO: If I only need the key itself, don't return the other things.
fn generate_key(legacy: &dyn IKeyMintDevice, kps: Vec<KeyParameter>) -> KeyCreationResult {
- let creation_result = legacy.generateKey(&kps).expect("Failed to generate key");
+ let creation_result =
+ legacy.generateKey(&kps, None /* attest_key */).expect("Failed to generate key");
assert_ne!(creation_result.keyBlob.len(), 0);
creation_result
}
+ // Per RFC 5280 4.1.2.5, an undefined expiration (not-after) field should be set to GeneralizedTime
+ // 999912312359559, which is 253402300799000 ms from Jan 1, 1970.
+ const UNDEFINED_NOT_AFTER: i64 = 253402300799000i64;
+
fn generate_rsa_key(legacy: &dyn IKeyMintDevice, encrypt: bool, attest: bool) -> Vec<u8> {
let mut kps = vec![
KeyParameter {
@@ -88,6 +103,14 @@
tag: Tag::PURPOSE,
value: KeyParameterValue::KeyPurpose(KeyPurpose::SIGN),
},
+ KeyParameter {
+ tag: Tag::CERTIFICATE_NOT_BEFORE,
+ value: KeyParameterValue::DateTime(0),
+ },
+ KeyParameter {
+ tag: Tag::CERTIFICATE_NOT_AFTER,
+ value: KeyParameterValue::DateTime(UNDEFINED_NOT_AFTER),
+ },
];
if encrypt {
kps.push(KeyParameter {
@@ -117,39 +140,40 @@
#[test]
fn test_generate_key_no_encrypt() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
generate_rsa_key(legacy.as_ref(), false, false);
}
#[test]
fn test_generate_key_encrypt() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
generate_rsa_key(legacy.as_ref(), true, false);
}
#[test]
fn test_generate_key_attested() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
generate_rsa_key(legacy.as_ref(), false, true);
}
#[test]
fn test_import_key() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
let kps = [KeyParameter {
tag: Tag::ALGORITHM,
value: KeyParameterValue::Algorithm(Algorithm::AES),
}];
let kf = KeyFormat::RAW;
let kd = [0; 16];
- let creation_result = legacy.importKey(&kps, kf, &kd).expect("Failed to import key");
+ let creation_result =
+ legacy.importKey(&kps, kf, &kd, None /* attest_key */).expect("Failed to import key");
assert_ne!(creation_result.keyBlob.len(), 0);
assert_eq!(creation_result.certificateChain.len(), 0);
}
#[test]
fn test_import_wrapped_key() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
let result = legacy.importWrappedKey(&[], &[], &[], &[], 0, 0);
// For this test we only care that there was no crash.
assert!(result.is_ok() || result.is_err());
@@ -157,7 +181,7 @@
#[test]
fn test_upgrade_key() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
let blob = generate_rsa_key(legacy.as_ref(), false, false);
let result = legacy.upgradeKey(&blob, &[]);
// For this test we only care that there was no crash.
@@ -166,7 +190,7 @@
#[test]
fn test_delete_key() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
let blob = generate_rsa_key(legacy.as_ref(), false, false);
let result = legacy.deleteKey(&blob);
assert!(result.is_ok(), "{:?}", result);
@@ -174,14 +198,14 @@
#[test]
fn test_delete_all_keys() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
let result = legacy.deleteAllKeys();
assert!(result.is_ok(), "{:?}", result);
}
#[test]
fn test_destroy_attestation_ids() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
let result = legacy.destroyAttestationIds();
assert!(result.is_err());
assert_eq!(result.unwrap_err().service_specific_error(), ErrorCode::UNIMPLEMENTED.0,);
@@ -243,7 +267,7 @@
#[test]
fn test_begin_abort() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
let blob = generate_aes_key(legacy.as_ref());
let begin_result = begin(legacy.as_ref(), &blob, KeyPurpose::ENCRYPT, None);
let operation = begin_result.operation.unwrap();
@@ -255,54 +279,72 @@
#[test]
fn test_begin_update_finish() {
- let legacy = get_device();
+ let legacy = get_device_or_skip_test!();
let blob = generate_aes_key(legacy.as_ref());
let begin_result = begin(legacy.as_ref(), &blob, KeyPurpose::ENCRYPT, None);
let operation = begin_result.operation.unwrap();
- let params = KeyParameterArray {
- params: vec![KeyParameter {
- tag: Tag::ASSOCIATED_DATA,
- value: KeyParameterValue::Blob(b"foobar".to_vec()),
- }],
- };
+
+ let update_aad_result = operation.updateAad(
+ &b"foobar".to_vec(),
+ None, /* authToken */
+ None, /* timestampToken */
+ );
+ assert!(update_aad_result.is_ok(), "{:?}", update_aad_result);
+
let message = [42; 128];
- let mut out_params = None;
- let result =
- operation.finish(Some(¶ms), Some(&message), None, None, None, &mut out_params);
+ let result = operation.finish(
+ Some(&message),
+ None, /* signature */
+ None, /* authToken */
+ None, /* timestampToken */
+ None, /* confirmationToken */
+ );
assert!(result.is_ok(), "{:?}", result);
let ciphertext = result.unwrap();
assert!(!ciphertext.is_empty());
- assert!(out_params.is_some());
let begin_result =
begin(legacy.as_ref(), &blob, KeyPurpose::DECRYPT, Some(begin_result.params));
+
let operation = begin_result.operation.unwrap();
- let mut out_params = None;
- let mut output = None;
+
+ let update_aad_result = operation.updateAad(
+ &b"foobar".to_vec(),
+ None, /* authToken */
+ None, /* timestampToken */
+ );
+ assert!(update_aad_result.is_ok(), "{:?}", update_aad_result);
+
let result = operation.update(
- Some(¶ms),
- Some(&ciphertext),
- None,
- None,
- &mut out_params,
- &mut output,
+ &ciphertext,
+ None, /* authToken */
+ None, /* timestampToken */
);
assert!(result.is_ok(), "{:?}", result);
- assert_eq!(result.unwrap(), message.len() as i32);
- assert!(output.is_some());
- assert_eq!(output.unwrap().data, message.to_vec());
- let result = operation.finish(Some(¶ms), None, None, None, None, &mut out_params);
+ assert_eq!(result.unwrap(), message);
+ let result = operation.finish(
+ None, /* input */
+ None, /* signature */
+ None, /* authToken */
+ None, /* timestampToken */
+ None, /* confirmationToken */
+ );
assert!(result.is_ok(), "{:?}", result);
- assert!(out_params.is_some());
}
#[test]
fn test_secure_clock() {
add_keymint_device_service();
- let compat_service: Box<dyn IKeystoreCompatService> =
- binder::get_interface(COMPAT_NAME).unwrap();
- let secure_clock = compat_service.getSecureClock().unwrap();
+ let compat_service: binder::Strong<dyn IKeystoreCompatService> =
+ match binder::get_interface(COMPAT_NAME) {
+ Ok(cs) => cs,
+ _ => return,
+ };
+ let secure_clock = match compat_service.getSecureClock() {
+ Ok(sc) => sc,
+ _ => return,
+ };
let challenge = 42;
let result = secure_clock.generateTimeStamp(challenge);
@@ -315,10 +357,16 @@
#[test]
fn test_shared_secret() {
add_keymint_device_service();
- let compat_service: Box<dyn IKeystoreCompatService> =
- binder::get_interface(COMPAT_NAME).unwrap();
- let shared_secret =
- compat_service.getSharedSecret(SecurityLevel::TRUSTED_ENVIRONMENT).unwrap();
+ let compat_service: binder::Strong<dyn IKeystoreCompatService> =
+ match binder::get_interface(COMPAT_NAME) {
+ Ok(cs) => cs,
+ _ => return,
+ };
+ let shared_secret = match compat_service.getSharedSecret(SecurityLevel::TRUSTED_ENVIRONMENT)
+ {
+ Ok(ss) => ss,
+ _ => return,
+ };
let result = shared_secret.getSharedSecretParameters();
assert!(result.is_ok(), "{:?}", result);
diff --git a/keystore2/src/km_compat/parameter_conversion_test.cpp b/keystore2/src/km_compat/parameter_conversion_test.cpp
index 41be067..48af20c 100644
--- a/keystore2/src/km_compat/parameter_conversion_test.cpp
+++ b/keystore2/src/km_compat/parameter_conversion_test.cpp
@@ -150,3 +150,84 @@
TEST_KEY_PARAMETER_CONVERSION_V4_0(TAG_USER_SECURE_ID);
TEST_KEY_PARAMETER_CONVERSION_V4_0(TAG_VENDOR_PATCHLEVEL);
}
+
+#define TEST_ERROR_CODE_CONVERSION(variant) \
+ ASSERT_EQ(KMV1::ErrorCode::variant, convert(V4_0::ErrorCode::variant))
+
+TEST(KmCompatTypeConversionTest, testErrorCodeConversion) {
+ TEST_ERROR_CODE_CONVERSION(OK);
+ TEST_ERROR_CODE_CONVERSION(ROOT_OF_TRUST_ALREADY_SET);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_PURPOSE);
+ TEST_ERROR_CODE_CONVERSION(INCOMPATIBLE_PURPOSE);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_ALGORITHM);
+ TEST_ERROR_CODE_CONVERSION(INCOMPATIBLE_ALGORITHM);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_KEY_SIZE);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_BLOCK_MODE);
+ TEST_ERROR_CODE_CONVERSION(INCOMPATIBLE_BLOCK_MODE);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_MAC_LENGTH);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_PADDING_MODE);
+ TEST_ERROR_CODE_CONVERSION(INCOMPATIBLE_PADDING_MODE);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_DIGEST);
+ TEST_ERROR_CODE_CONVERSION(INCOMPATIBLE_DIGEST);
+ TEST_ERROR_CODE_CONVERSION(INVALID_EXPIRATION_TIME);
+ TEST_ERROR_CODE_CONVERSION(INVALID_USER_ID);
+ TEST_ERROR_CODE_CONVERSION(INVALID_AUTHORIZATION_TIMEOUT);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_KEY_FORMAT);
+ TEST_ERROR_CODE_CONVERSION(INCOMPATIBLE_KEY_FORMAT);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_KEY_ENCRYPTION_ALGORITHM);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_KEY_VERIFICATION_ALGORITHM);
+ TEST_ERROR_CODE_CONVERSION(INVALID_INPUT_LENGTH);
+ TEST_ERROR_CODE_CONVERSION(KEY_EXPORT_OPTIONS_INVALID);
+ TEST_ERROR_CODE_CONVERSION(DELEGATION_NOT_ALLOWED);
+ TEST_ERROR_CODE_CONVERSION(KEY_NOT_YET_VALID);
+ TEST_ERROR_CODE_CONVERSION(KEY_EXPIRED);
+ TEST_ERROR_CODE_CONVERSION(KEY_USER_NOT_AUTHENTICATED);
+ TEST_ERROR_CODE_CONVERSION(OUTPUT_PARAMETER_NULL);
+ TEST_ERROR_CODE_CONVERSION(INVALID_OPERATION_HANDLE);
+ TEST_ERROR_CODE_CONVERSION(INSUFFICIENT_BUFFER_SPACE);
+ TEST_ERROR_CODE_CONVERSION(VERIFICATION_FAILED);
+ TEST_ERROR_CODE_CONVERSION(TOO_MANY_OPERATIONS);
+ TEST_ERROR_CODE_CONVERSION(UNEXPECTED_NULL_POINTER);
+ TEST_ERROR_CODE_CONVERSION(INVALID_KEY_BLOB);
+ TEST_ERROR_CODE_CONVERSION(IMPORTED_KEY_NOT_ENCRYPTED);
+ TEST_ERROR_CODE_CONVERSION(IMPORTED_KEY_DECRYPTION_FAILED);
+ TEST_ERROR_CODE_CONVERSION(IMPORTED_KEY_NOT_SIGNED);
+ TEST_ERROR_CODE_CONVERSION(IMPORTED_KEY_VERIFICATION_FAILED);
+ TEST_ERROR_CODE_CONVERSION(INVALID_ARGUMENT);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_TAG);
+ TEST_ERROR_CODE_CONVERSION(INVALID_TAG);
+ TEST_ERROR_CODE_CONVERSION(MEMORY_ALLOCATION_FAILED);
+ TEST_ERROR_CODE_CONVERSION(IMPORT_PARAMETER_MISMATCH);
+ TEST_ERROR_CODE_CONVERSION(SECURE_HW_ACCESS_DENIED);
+ TEST_ERROR_CODE_CONVERSION(OPERATION_CANCELLED);
+ TEST_ERROR_CODE_CONVERSION(CONCURRENT_ACCESS_CONFLICT);
+ TEST_ERROR_CODE_CONVERSION(SECURE_HW_BUSY);
+ TEST_ERROR_CODE_CONVERSION(SECURE_HW_COMMUNICATION_FAILED);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_EC_FIELD);
+ TEST_ERROR_CODE_CONVERSION(MISSING_NONCE);
+ TEST_ERROR_CODE_CONVERSION(INVALID_NONCE);
+ TEST_ERROR_CODE_CONVERSION(MISSING_MAC_LENGTH);
+ TEST_ERROR_CODE_CONVERSION(KEY_RATE_LIMIT_EXCEEDED);
+ TEST_ERROR_CODE_CONVERSION(CALLER_NONCE_PROHIBITED);
+ TEST_ERROR_CODE_CONVERSION(KEY_MAX_OPS_EXCEEDED);
+ TEST_ERROR_CODE_CONVERSION(INVALID_MAC_LENGTH);
+ TEST_ERROR_CODE_CONVERSION(MISSING_MIN_MAC_LENGTH);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_MIN_MAC_LENGTH);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_KDF);
+ TEST_ERROR_CODE_CONVERSION(UNSUPPORTED_EC_CURVE);
+ TEST_ERROR_CODE_CONVERSION(KEY_REQUIRES_UPGRADE);
+ TEST_ERROR_CODE_CONVERSION(ATTESTATION_CHALLENGE_MISSING);
+ ASSERT_EQ(KMV1::ErrorCode::KEYMINT_NOT_CONFIGURED,
+ convert(V4_0::ErrorCode::KEYMASTER_NOT_CONFIGURED));
+ TEST_ERROR_CODE_CONVERSION(ATTESTATION_APPLICATION_ID_MISSING);
+ TEST_ERROR_CODE_CONVERSION(CANNOT_ATTEST_IDS);
+ TEST_ERROR_CODE_CONVERSION(ROLLBACK_RESISTANCE_UNAVAILABLE);
+ TEST_ERROR_CODE_CONVERSION(HARDWARE_TYPE_UNAVAILABLE);
+ TEST_ERROR_CODE_CONVERSION(PROOF_OF_PRESENCE_REQUIRED);
+ TEST_ERROR_CODE_CONVERSION(CONCURRENT_PROOF_OF_PRESENCE_REQUESTED);
+ TEST_ERROR_CODE_CONVERSION(NO_USER_CONFIRMATION);
+ TEST_ERROR_CODE_CONVERSION(DEVICE_LOCKED);
+ TEST_ERROR_CODE_CONVERSION(UNIMPLEMENTED);
+ TEST_ERROR_CODE_CONVERSION(VERSION_MISMATCH);
+ TEST_ERROR_CODE_CONVERSION(UNKNOWN_ERROR);
+}
diff --git a/keystore2/src/km_compat/slot_test.cpp b/keystore2/src/km_compat/slot_test.cpp
index 0859ddf..43f3bc6 100644
--- a/keystore2/src/km_compat/slot_test.cpp
+++ b/keystore2/src/km_compat/slot_test.cpp
@@ -24,7 +24,6 @@
using ::aidl::android::hardware::security::keymint::Algorithm;
using ::aidl::android::hardware::security::keymint::BlockMode;
-using ::aidl::android::hardware::security::keymint::ByteArray;
using ::aidl::android::hardware::security::keymint::Certificate;
using ::aidl::android::hardware::security::keymint::Digest;
using ::aidl::android::hardware::security::keymint::ErrorCode;
@@ -47,7 +46,7 @@
KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::DECRYPT),
});
KeyCreationResult creationResult;
- auto status = device->generateKey(keyParams, &creationResult);
+ auto status = device->generateKey(keyParams, std::nullopt /* attest_key */, &creationResult);
if (!status.isOk()) {
return {};
}
@@ -100,18 +99,19 @@
// Calling finish should free up a slot.
auto last = operations.back();
operations.pop_back();
- std::optional<KeyParameterArray> kpa;
std::vector<uint8_t> byteVec;
- auto status = last->finish(std::nullopt, std::nullopt, std::nullopt, std::nullopt, std::nullopt,
- &kpa, &byteVec);
+ auto status = last->finish(std::nullopt /* input */, std::nullopt /* signature */,
+ std::nullopt /* authToken */, std::nullopt /* timestampToken */,
+ std::nullopt /* confirmationToken */, &byteVec);
ASSERT_TRUE(status.isOk());
result = begin(device, true);
ASSERT_TRUE(std::holds_alternative<BeginResult>(result));
operations.push_back(std::get<BeginResult>(result).operation);
// Calling finish and abort on an already-finished operation should not free up another slot.
- status = last->finish(std::nullopt, std::nullopt, std::nullopt, std::nullopt, std::nullopt,
- &kpa, &byteVec);
+ status = last->finish(std::nullopt /* input */, std::nullopt /* signature */,
+ std::nullopt /* authToken */, std::nullopt /* timestampToken */,
+ std::nullopt /* confirmationToken */, &byteVec);
ASSERT_TRUE(!status.isOk());
status = last->abort();
ASSERT_TRUE(!status.isOk());
@@ -130,8 +130,9 @@
operations.push_back(std::get<BeginResult>(result).operation);
// Calling finish and abort on an already-aborted operation should not free up another slot.
- status = last->finish(std::nullopt, std::nullopt, std::nullopt, std::nullopt, std::nullopt,
- &kpa, &byteVec);
+ status = last->finish(std::nullopt /* input */, std::nullopt /* signature */,
+ std::nullopt /* authToken */, std::nullopt /* timestampToken */,
+ std::nullopt /* confirmationToken */, &byteVec);
ASSERT_TRUE(!status.isOk());
status = last->abort();
ASSERT_TRUE(!status.isOk());
@@ -140,23 +141,23 @@
ASSERT_EQ(std::get<ScopedAStatus>(result).getServiceSpecificError(),
static_cast<int32_t>(ErrorCode::TOO_MANY_OPERATIONS));
- // Generating a certificate with signWith also uses a slot.
+ // Generating a certificate with signWith uses a slot but falls back to not using one.
auto kps = std::vector<KeyParameter>({
KMV1::makeKeyParameter(KMV1::TAG_ALGORITHM, Algorithm::RSA),
KMV1::makeKeyParameter(KMV1::TAG_KEY_SIZE, 2048),
KMV1::makeKeyParameter(KMV1::TAG_RSA_PUBLIC_EXPONENT, 65537),
KMV1::makeKeyParameter(KMV1::TAG_DIGEST, Digest::SHA_2_256),
KMV1::makeKeyParameter(KMV1::TAG_PURPOSE, KeyPurpose::SIGN),
+ KMV1::makeKeyParameter(KMV1::TAG_CERTIFICATE_NOT_BEFORE, 0),
+ KMV1::makeKeyParameter(KMV1::TAG_CERTIFICATE_NOT_AFTER, 253402300799000),
KMV1::makeKeyParameter(KMV1::TAG_NO_AUTH_REQUIRED, true),
});
KeyCreationResult creationResult;
- status = device->generateKey(kps, &creationResult);
- ASSERT_TRUE(!status.isOk());
- ASSERT_EQ(status.getServiceSpecificError(),
- static_cast<int32_t>(ErrorCode::TOO_MANY_OPERATIONS));
+ status = device->generateKey(kps, std::nullopt /* attest_key */, &creationResult);
+ ASSERT_TRUE(status.isOk());
// But generating a certificate with signCert does not use a slot.
kps.pop_back();
- status = device->generateKey(kps, &creationResult);
+ status = device->generateKey(kps, std::nullopt /* attest_key */, &creationResult);
ASSERT_TRUE(status.isOk());
// Destructing operations should free up their slots.
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 230a82c..7c8a909 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -12,12 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#![allow(dead_code)]
-
//! This module implements methods to load legacy keystore key blob files.
use crate::{
- database::KeyMetaData,
error::{Error as KsError, ResponseCode},
key_parameter::{KeyParameter, KeyParameterValue},
super_key::SuperKeyManager,
@@ -27,9 +24,13 @@
SecurityLevel::SecurityLevel, Tag::Tag, TagType::TagType,
};
use anyhow::{Context, Result};
-use keystore2_crypto::{aes_gcm_decrypt, derive_key_from_password, ZVec};
-use std::io::{ErrorKind, Read};
+use keystore2_crypto::{aes_gcm_decrypt, Password, ZVec};
+use std::collections::{HashMap, HashSet};
use std::{convert::TryInto, fs::File, path::Path, path::PathBuf};
+use std::{
+ fs,
+ io::{ErrorKind, Read, Result as IoResult},
+};
const SUPPORTED_LEGACY_BLOB_VERSION: u8 = 3;
@@ -213,7 +214,7 @@
// flags (1 Byte)
// info (1 Byte)
// initialization_vector (16 Bytes)
- // integrity (MD5 digest or gcb tag) (16 Bytes)
+ // integrity (MD5 digest or gcm tag) (16 Bytes)
// length (4 Bytes)
const COMMON_HEADER_SIZE: usize = 4 + Self::IV_SIZE + Self::GCM_TAG_LENGTH + 4;
@@ -224,13 +225,14 @@
const LENGTH_OFFSET: usize = 4 + Self::IV_SIZE + Self::GCM_TAG_LENGTH;
const IV_OFFSET: usize = 4;
const AEAD_TAG_OFFSET: usize = Self::IV_OFFSET + Self::IV_SIZE;
- const DIGEST_OFFSET: usize = Self::IV_OFFSET + Self::IV_SIZE;
+ const _DIGEST_OFFSET: usize = Self::IV_OFFSET + Self::IV_SIZE;
/// Construct a new LegacyBlobLoader with a root path of `path` relative to which it will
/// expect legacy key blob files.
pub fn new(path: &Path) -> Self {
Self { path: path.to_owned() }
}
+
/// Encodes an alias string as ascii character sequence in the range
/// ['+' .. '.'] and ['0' .. '~'].
/// Bytes with values in the range ['0' .. '~'] are represented as they are.
@@ -587,7 +589,7 @@
let sw_list = Self::read_key_parameters(&mut stream)
.context("In read_characteristics_file.")?
.into_iter()
- .map(|value| KeyParameter::new(value, SecurityLevel::SOFTWARE));
+ .map(|value| KeyParameter::new(value, SecurityLevel::KEYSTORE));
Ok(hw_list.into_iter().flatten().chain(sw_list).collect())
}
@@ -600,7 +602,7 @@
// used this for user installed certificates without private key material.
fn read_km_blob_file(&self, uid: u32, alias: &str) -> Result<Option<(Blob, String)>> {
- let mut iter = ["USRPKEY", "USERSKEY"].iter();
+ let mut iter = ["USRPKEY", "USRSKEY"].iter();
let (blob, prefix) = loop {
if let Some(prefix) = iter.next() {
@@ -619,7 +621,7 @@
}
fn read_generic_blob(path: &Path) -> Result<Option<Blob>> {
- let mut file = match File::open(path) {
+ let mut file = match Self::with_retry_interrupted(|| File::open(path)) {
Ok(file) => file,
Err(e) => match e.kind() {
ErrorKind::NotFound => return Ok(None),
@@ -630,50 +632,338 @@
Ok(Some(Self::new_from_stream(&mut file).context("In read_generic_blob.")?))
}
- /// This function constructs the blob file name which has the form:
- /// user_<android user id>/<uid>_<alias>.
- fn make_blob_filename(&self, uid: u32, alias: &str, prefix: &str) -> PathBuf {
+ /// Read a legacy vpn profile blob.
+ pub fn read_vpn_profile(&self, uid: u32, alias: &str) -> Result<Option<Vec<u8>>> {
+ let path = match self.make_vpn_profile_filename(uid, alias) {
+ Some(path) => path,
+ None => return Ok(None),
+ };
+
+ let blob =
+ Self::read_generic_blob(&path).context("In read_vpn_profile: Failed to read blob.")?;
+
+ Ok(blob.and_then(|blob| match blob.value {
+ BlobValue::Generic(blob) => Some(blob),
+ _ => {
+ log::info!("Unexpected vpn profile blob type. Ignoring");
+ None
+ }
+ }))
+ }
+
+ /// Remove a vpn profile by the name alias with owner uid.
+ pub fn remove_vpn_profile(&self, uid: u32, alias: &str) -> Result<()> {
+ let path = match self.make_vpn_profile_filename(uid, alias) {
+ Some(path) => path,
+ None => return Ok(()),
+ };
+
+ if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
+ match e.kind() {
+ ErrorKind::NotFound => return Ok(()),
+ _ => return Err(e).context("In remove_vpn_profile."),
+ }
+ }
+
+ let user_id = uid_to_android_user(uid);
+ self.remove_user_dir_if_empty(user_id)
+ .context("In remove_vpn_profile: Trying to remove empty user dir.")
+ }
+
+ fn is_vpn_profile(encoded_alias: &str) -> bool {
+ // We can check the encoded alias because the prefixes we are interested
+ // in are all in the printable range that don't get mangled.
+ encoded_alias.starts_with("VPN_")
+ || encoded_alias.starts_with("PLATFORM_VPN_")
+ || encoded_alias == "LOCKDOWN_VPN"
+ }
+
+ /// List all profiles belonging to the given uid.
+ pub fn list_vpn_profiles(&self, uid: u32) -> Result<Vec<String>> {
let mut path = self.path.clone();
let user_id = uid_to_android_user(uid);
- let encoded_alias = Self::encode_alias(&format!("{}_{}", prefix, alias));
path.push(format!("user_{}", user_id));
+ let uid_str = uid.to_string();
+ let dir =
+ Self::with_retry_interrupted(|| fs::read_dir(path.as_path())).with_context(|| {
+ format!("In list_vpn_profiles: Failed to open legacy blob database. {:?}", path)
+ })?;
+ let mut result: Vec<String> = Vec::new();
+ for entry in dir {
+ let file_name =
+ entry.context("In list_vpn_profiles: Trying to access dir entry")?.file_name();
+ if let Some(f) = file_name.to_str() {
+ let encoded_alias = &f[uid_str.len() + 1..];
+ if f.starts_with(&uid_str) && Self::is_vpn_profile(encoded_alias) {
+ result.push(
+ Self::decode_alias(encoded_alias)
+ .context("In list_vpn_profiles: Trying to decode alias.")?,
+ )
+ }
+ }
+ }
+ Ok(result)
+ }
+
+ /// This function constructs the vpn_profile file name which has the form:
+ /// user_<android user id>/<uid>_<alias>.
+ fn make_vpn_profile_filename(&self, uid: u32, alias: &str) -> Option<PathBuf> {
+ // legacy vpn entries must start with VPN_ or PLATFORM_VPN_ or are literally called
+ // LOCKDOWN_VPN.
+ if !Self::is_vpn_profile(alias) {
+ return None;
+ }
+
+ let mut path = self.path.clone();
+ let user_id = uid_to_android_user(uid);
+ let encoded_alias = Self::encode_alias(alias);
+ path.push(format!("user_{}", user_id));
+ path.push(format!("{}_{}", uid, encoded_alias));
+ Some(path)
+ }
+
+ /// This function constructs the blob file name which has the form:
+ /// user_<android user id>/<uid>_<prefix>_<alias>.
+ fn make_blob_filename(&self, uid: u32, alias: &str, prefix: &str) -> PathBuf {
+ let user_id = uid_to_android_user(uid);
+ let encoded_alias = Self::encode_alias(&format!("{}_{}", prefix, alias));
+ let mut path = self.make_user_path_name(user_id);
path.push(format!("{}_{}", uid, encoded_alias));
path
}
/// This function constructs the characteristics file name which has the form:
- /// user_<android user id>/.<uid>_chr_<alias>.
+ /// user_<android user id>/.<uid>_chr_<prefix>_<alias>.
fn make_chr_filename(&self, uid: u32, alias: &str, prefix: &str) -> PathBuf {
- let mut path = self.path.clone();
let user_id = uid_to_android_user(uid);
let encoded_alias = Self::encode_alias(&format!("{}_{}", prefix, alias));
- path.push(format!("user_{}", user_id));
+ let mut path = self.make_user_path_name(user_id);
path.push(format!(".{}_chr_{}", uid, encoded_alias));
path
}
- fn load_by_uid_alias(
+ fn make_super_key_filename(&self, user_id: u32) -> PathBuf {
+ let mut path = self.make_user_path_name(user_id);
+ path.push(".masterkey");
+ path
+ }
+
+ fn make_user_path_name(&self, user_id: u32) -> PathBuf {
+ let mut path = self.path.clone();
+ path.push(&format!("user_{}", user_id));
+ path
+ }
+
+ /// Returns if the legacy blob database is empty, i.e., there are no entries matching "user_*"
+ /// in the database dir.
+ pub fn is_empty(&self) -> Result<bool> {
+ let dir = Self::with_retry_interrupted(|| fs::read_dir(self.path.as_path()))
+ .context("In is_empty: Failed to open legacy blob database.")?;
+ for entry in dir {
+ if (*entry.context("In is_empty: Trying to access dir entry")?.file_name())
+ .to_str()
+ .map_or(false, |f| f.starts_with("user_"))
+ {
+ return Ok(false);
+ }
+ }
+ Ok(true)
+ }
+
+ /// Returns if the legacy blob database is empty for a given user, i.e., there are no entries
+ /// matching "user_*" in the database dir.
+ pub fn is_empty_user(&self, user_id: u32) -> Result<bool> {
+ let mut user_path = self.path.clone();
+ user_path.push(format!("user_{}", user_id));
+ if !user_path.as_path().is_dir() {
+ return Ok(true);
+ }
+ Ok(Self::with_retry_interrupted(|| user_path.read_dir())
+ .context("In is_empty_user: Failed to open legacy user dir.")?
+ .next()
+ .is_none())
+ }
+
+ fn extract_alias(encoded_alias: &str) -> Option<String> {
+ // We can check the encoded alias because the prefixes we are interested
+ // in are all in the printable range that don't get mangled.
+ for prefix in &["USRPKEY_", "USRSKEY_", "USRCERT_", "CACERT_"] {
+ if let Some(alias) = encoded_alias.strip_prefix(prefix) {
+ return Self::decode_alias(&alias).ok();
+ }
+ }
+ None
+ }
+
+ /// List all entries for a given user. The strings are unchanged file names, i.e.,
+ /// encoded with UID prefix.
+ fn list_user(&self, user_id: u32) -> Result<Vec<String>> {
+ let path = self.make_user_path_name(user_id);
+ let dir =
+ Self::with_retry_interrupted(|| fs::read_dir(path.as_path())).with_context(|| {
+ format!("In list_user: Failed to open legacy blob database. {:?}", path)
+ })?;
+ let mut result: Vec<String> = Vec::new();
+ for entry in dir {
+ let file_name = entry.context("In list_user: Trying to access dir entry")?.file_name();
+ if let Some(f) = file_name.to_str() {
+ result.push(f.to_string())
+ }
+ }
+ Ok(result)
+ }
+
+ /// List all keystore entries belonging to the given user. Returns a map of UIDs
+ /// to sets of decoded aliases.
+ pub fn list_keystore_entries_for_user(
+ &self,
+ user_id: u32,
+ ) -> Result<HashMap<u32, HashSet<String>>> {
+ let user_entries = self
+ .list_user(user_id)
+ .context("In list_keystore_entries_for_user: Trying to list user.")?;
+
+ let result =
+ user_entries.into_iter().fold(HashMap::<u32, HashSet<String>>::new(), |mut acc, v| {
+ if let Some(sep_pos) = v.find('_') {
+ if let Ok(uid) = v[0..sep_pos].parse::<u32>() {
+ if let Some(alias) = Self::extract_alias(&v[sep_pos + 1..]) {
+ let entry = acc.entry(uid).or_default();
+ entry.insert(alias);
+ }
+ }
+ }
+ acc
+ });
+ Ok(result)
+ }
+
+ /// List all keystore entries belonging to the given uid.
+ pub fn list_keystore_entries_for_uid(&self, uid: u32) -> Result<Vec<String>> {
+ let user_id = uid_to_android_user(uid);
+
+ let user_entries = self
+ .list_user(user_id)
+ .context("In list_keystore_entries_for_uid: Trying to list user.")?;
+
+ let uid_str = format!("{}_", uid);
+
+ let mut result: Vec<String> = user_entries
+ .into_iter()
+ .filter_map(|v| {
+ if !v.starts_with(&uid_str) {
+ return None;
+ }
+ let encoded_alias = &v[uid_str.len()..];
+ Self::extract_alias(encoded_alias)
+ })
+ .collect();
+
+ result.sort_unstable();
+ result.dedup();
+ Ok(result)
+ }
+
+ fn with_retry_interrupted<F, T>(f: F) -> IoResult<T>
+ where
+ F: Fn() -> IoResult<T>,
+ {
+ loop {
+ match f() {
+ Ok(v) => return Ok(v),
+ Err(e) => match e.kind() {
+ ErrorKind::Interrupted => continue,
+ _ => return Err(e),
+ },
+ }
+ }
+ }
+
+ /// Deletes a keystore entry. Also removes the user_<uid> directory on the
+ /// last migration.
+ pub fn remove_keystore_entry(&self, uid: u32, alias: &str) -> Result<bool> {
+ let mut something_was_deleted = false;
+ let prefixes = ["USRPKEY", "USRSKEY"];
+ for prefix in &prefixes {
+ let path = self.make_blob_filename(uid, alias, prefix);
+ if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
+ match e.kind() {
+ // Only a subset of keys are expected.
+ ErrorKind::NotFound => continue,
+ // Log error but ignore.
+ _ => log::error!("Error while deleting key blob entries. {:?}", e),
+ }
+ }
+ let path = self.make_chr_filename(uid, alias, prefix);
+ if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
+ match e.kind() {
+ ErrorKind::NotFound => {
+ log::info!("No characteristics file found for legacy key blob.")
+ }
+ // Log error but ignore.
+ _ => log::error!("Error while deleting key blob entries. {:?}", e),
+ }
+ }
+ something_was_deleted = true;
+ // Only one of USRPKEY and USRSKEY can be present. So we can end the loop
+ // if we reach this point.
+ break;
+ }
+
+ let prefixes = ["USRCERT", "CACERT"];
+ for prefix in &prefixes {
+ let path = self.make_blob_filename(uid, alias, prefix);
+ if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
+ match e.kind() {
+ // USRCERT and CACERT are optional either or both may or may not be present.
+ ErrorKind::NotFound => continue,
+ // Log error but ignore.
+ _ => log::error!("Error while deleting key blob entries. {:?}", e),
+ }
+ something_was_deleted = true;
+ }
+ }
+
+ if something_was_deleted {
+ let user_id = uid_to_android_user(uid);
+ self.remove_user_dir_if_empty(user_id)
+ .context("In remove_keystore_entry: Trying to remove empty user dir.")?;
+ }
+
+ Ok(something_was_deleted)
+ }
+
+ fn remove_user_dir_if_empty(&self, user_id: u32) -> Result<()> {
+ if self
+ .is_empty_user(user_id)
+ .context("In remove_user_dir_if_empty: Trying to check for empty user dir.")?
+ {
+ let user_path = self.make_user_path_name(user_id);
+ Self::with_retry_interrupted(|| fs::remove_dir(user_path.as_path())).ok();
+ }
+ Ok(())
+ }
+
+ /// Load a legacy key blob entry by uid and alias.
+ pub fn load_by_uid_alias(
&self,
uid: u32,
alias: &str,
- key_manager: &SuperKeyManager,
- ) -> Result<(Option<(Blob, Vec<KeyParameter>)>, Option<Vec<u8>>, Option<Vec<u8>>, KeyMetaData)>
- {
- let metadata = KeyMetaData::new();
-
+ key_manager: Option<&SuperKeyManager>,
+ ) -> Result<(Option<(Blob, Vec<KeyParameter>)>, Option<Vec<u8>>, Option<Vec<u8>>)> {
let km_blob = self.read_km_blob_file(uid, alias).context("In load_by_uid_alias.")?;
let km_blob = match km_blob {
Some((km_blob, prefix)) => {
- let km_blob =
- match km_blob {
- Blob { flags: _, value: BlobValue::Decrypted(_) } => km_blob,
- // Unwrap the key blob if required.
- Blob { flags, value: BlobValue::Encrypted { iv, tag, data } } => {
+ let km_blob = match km_blob {
+ Blob { flags: _, value: BlobValue::Decrypted(_) } => km_blob,
+ // Unwrap the key blob if required and if we have key_manager.
+ Blob { flags, value: BlobValue::Encrypted { ref iv, ref tag, ref data } } => {
+ if let Some(key_manager) = key_manager {
let decrypted = match key_manager
.get_per_boot_key_by_user_id(uid_to_android_user(uid))
{
- Some(key) => aes_gcm_decrypt(&data, &iv, &tag, &key).context(
+ Some(key) => key.aes_gcm_decrypt(data, iv, tag).context(
"In load_by_uid_alias: while trying to decrypt legacy blob.",
)?,
None => {
@@ -687,11 +977,16 @@
}
};
Blob { flags, value: BlobValue::Decrypted(decrypted) }
+ } else {
+ km_blob
}
- _ => return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
+ }
+ _ => {
+ return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
"In load_by_uid_alias: Found wrong blob type in legacy key blob file.",
- ),
- };
+ )
+ }
+ };
let hw_sec_level = match km_blob.is_strongbox() {
true => SecurityLevel::STRONGBOX,
@@ -729,28 +1024,38 @@
}
};
- Ok((km_blob, user_cert, ca_cert, metadata))
+ Ok((km_blob, user_cert, ca_cert))
+ }
+
+ /// Returns true if the given user has a super key.
+ pub fn has_super_key(&self, user_id: u32) -> bool {
+ self.make_super_key_filename(user_id).is_file()
}
/// Load and decrypt legacy super key blob.
- pub fn load_super_key(&self, user_id: u32, pw: &[u8]) -> Result<Option<ZVec>> {
- let mut path = self.path.clone();
- path.push(&format!("user_{}", user_id));
- path.push(".masterkey");
+ pub fn load_super_key(&self, user_id: u32, pw: &Password) -> Result<Option<ZVec>> {
+ let path = self.make_super_key_filename(user_id);
let blob = Self::read_generic_blob(&path)
.context("In load_super_key: While loading super key.")?;
let blob = match blob {
Some(blob) => match blob {
- Blob {
- value: BlobValue::PwEncrypted { iv, tag, data, salt, key_size }, ..
- } => {
- let key = derive_key_from_password(pw, Some(&salt), key_size)
- .context("In load_super_key: Failed to derive key from password.")?;
- let blob = aes_gcm_decrypt(&data, &iv, &tag, &key).context(
- "In load_super_key: while trying to decrypt legacy super key blob.",
- )?;
- Some(blob)
+ Blob { flags, value: BlobValue::PwEncrypted { iv, tag, data, salt, key_size } } => {
+ if (flags & flags::ENCRYPTED) != 0 {
+ let key = pw
+ .derive_key(Some(&salt), key_size)
+ .context("In load_super_key: Failed to derive key from password.")?;
+ let blob = aes_gcm_decrypt(&data, &iv, &tag, &key).context(
+ "In load_super_key: while trying to decrypt legacy super key blob.",
+ )?;
+ Some(blob)
+ } else {
+ // In 2019 we had some unencrypted super keys due to b/141955555.
+ Some(
+ data.try_into()
+ .context("In load_super_key: Trying to convert key into ZVec")?,
+ )
+ }
}
_ => {
return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
@@ -763,6 +1068,18 @@
Ok(blob)
}
+
+ /// Removes the super key for the given user from the legacy database.
+ /// If this was the last entry in the user's database, this function removes
+ /// the user_<uid> directory as well.
+ pub fn remove_super_key(&self, user_id: u32) {
+ let path = self.make_super_key_filename(user_id);
+ Self::with_retry_interrupted(|| fs::remove_file(path.as_path())).ok();
+ if self.is_empty_user(user_id).ok().unwrap_or(false) {
+ let path = self.make_user_path_name(user_id);
+ Self::with_retry_interrupted(|| fs::remove_dir(path.as_path())).ok();
+ }
+ }
}
#[cfg(test)]
@@ -897,6 +1214,37 @@
}
#[test]
+ fn test_is_empty() {
+ let temp_dir = TempDir::new("test_is_empty").expect("Failed to create temp dir.");
+ let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
+
+ assert!(legacy_blob_loader.is_empty().expect("Should succeed and be empty."));
+
+ let _db = crate::database::KeystoreDB::new(temp_dir.path(), None)
+ .expect("Failed to open database.");
+
+ assert!(legacy_blob_loader.is_empty().expect("Should succeed and still be empty."));
+
+ std::fs::create_dir(&*temp_dir.build().push("user_0")).expect("Failed to create user_0.");
+
+ assert!(!legacy_blob_loader.is_empty().expect("Should succeed but not be empty."));
+
+ std::fs::create_dir(&*temp_dir.build().push("user_10")).expect("Failed to create user_10.");
+
+ assert!(!legacy_blob_loader.is_empty().expect("Should succeed but still not be empty."));
+
+ std::fs::remove_dir_all(&*temp_dir.build().push("user_0"))
+ .expect("Failed to remove user_0.");
+
+ assert!(!legacy_blob_loader.is_empty().expect("Should succeed but still not be empty."));
+
+ std::fs::remove_dir_all(&*temp_dir.build().push("user_10"))
+ .expect("Failed to remove user_10.");
+
+ assert!(legacy_blob_loader.is_empty().expect("Should succeed and be empty again."));
+ }
+
+ #[test]
fn test_legacy_blobs() -> anyhow::Result<()> {
let temp_dir = TempDir::new("legacy_blob_test")?;
std::fs::create_dir(&*temp_dir.build().push("user_0"))?;
@@ -938,32 +1286,32 @@
)?;
let key_manager = crate::super_key::SuperKeyManager::new();
- let mut db = crate::database::KeystoreDB::new(temp_dir.path())?;
+ let mut db = crate::database::KeystoreDB::new(temp_dir.path(), None)?;
let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
assert_eq!(
legacy_blob_loader
- .load_by_uid_alias(10223, "authbound", &key_manager)
+ .load_by_uid_alias(10223, "authbound", Some(&key_manager))
.unwrap_err()
.root_cause()
.downcast_ref::<error::Error>(),
Some(&error::Error::Rc(ResponseCode::LOCKED))
);
- key_manager.unlock_user_key(0, PASSWORD, &mut db, &legacy_blob_loader)?;
+ key_manager.unlock_user_key(&mut db, 0, &(PASSWORD.into()), &legacy_blob_loader)?;
- if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain), _kp) =
- legacy_blob_loader.load_by_uid_alias(10223, "authbound", &key_manager)?
+ if let (Some((Blob { flags, value: _ }, _params)), Some(cert), Some(chain)) =
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
{
assert_eq!(flags, 4);
- assert_eq!(value, BlobValue::Decrypted(DECRYPTED_USRPKEY_AUTHBOUND.try_into()?));
+ //assert_eq!(value, BlobValue::Encrypted(..));
assert_eq!(&cert[..], LOADED_CERT_AUTHBOUND);
assert_eq!(&chain[..], LOADED_CACERT_AUTHBOUND);
} else {
panic!("");
}
- if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain), _kp) =
- legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", &key_manager)?
+ if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain)) =
+ legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", Some(&key_manager))?
{
assert_eq!(flags, 0);
assert_eq!(value, BlobValue::Decrypted(LOADED_USRPKEY_NON_AUTHBOUND.try_into()?));
@@ -973,6 +1321,33 @@
panic!("");
}
+ legacy_blob_loader.remove_keystore_entry(10223, "authbound").expect("This should succeed.");
+ legacy_blob_loader
+ .remove_keystore_entry(10223, "non_authbound")
+ .expect("This should succeed.");
+
+ assert_eq!(
+ (None, None, None),
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
+ );
+ assert_eq!(
+ (None, None, None),
+ legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", Some(&key_manager))?
+ );
+
+ // The database should not be empty due to the super key.
+ assert!(!legacy_blob_loader.is_empty()?);
+ assert!(!legacy_blob_loader.is_empty_user(0)?);
+
+ // The database should be considered empty for user 1.
+ assert!(legacy_blob_loader.is_empty_user(1)?);
+
+ legacy_blob_loader.remove_super_key(0);
+
+ // Now it should be empty.
+ assert!(legacy_blob_loader.is_empty_user(0)?);
+ assert!(legacy_blob_loader.is_empty()?);
+
Ok(())
}
}
diff --git a/keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs b/keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs
index aa99162..14bd40c 100644
--- a/keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs
+++ b/keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs
@@ -741,7 +741,7 @@
0xab, 0xae, 0x24, 0xe2, 0x44, 0x35, 0x16, 0x8d, 0x55, 0x3c, 0xe4,
];
-pub static DECRYPTED_USRPKEY_AUTHBOUND: &[u8] = &[
+pub static _DECRYPTED_USRPKEY_AUTHBOUND: &[u8] = &[
0x44, 0x4b, 0x4d, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
0xc6, 0x15, 0x3a, 0x08, 0x1e, 0x43, 0xba, 0x7a, 0x0f, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_migrator.rs
new file mode 100644
index 0000000..e5bcae4
--- /dev/null
+++ b/keystore2/src/legacy_migrator.rs
@@ -0,0 +1,720 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module acts as a bridge between the legacy key database and the keystore2 database.
+
+use crate::error::Error;
+use crate::key_parameter::KeyParameterValue;
+use crate::legacy_blob::BlobValue;
+use crate::utils::uid_to_android_user;
+use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
+use crate::{
+ database::{
+ BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData,
+ KeyMetaEntry, KeystoreDB, Uuid, KEYSTORE_UUID,
+ },
+ super_key::USER_SUPER_KEY,
+};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode,
+};
+use anyhow::{Context, Result};
+use core::ops::Deref;
+use keystore2_crypto::{Password, ZVec};
+use std::collections::{HashMap, HashSet};
+use std::sync::atomic::{AtomicU8, Ordering};
+use std::sync::mpsc::channel;
+use std::sync::{Arc, Mutex};
+
+/// Represents LegacyMigrator.
+pub struct LegacyMigrator {
+ async_task: Arc<AsyncTask>,
+ initializer: Mutex<
+ Option<
+ Box<
+ dyn FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
+ + Send
+ + 'static,
+ >,
+ >,
+ >,
+ /// This atomic is used for cheap interior mutability. It is intended to prevent
+ /// expensive calls into the legacy migrator when the legacy database is empty.
+ /// When transitioning from READY to EMPTY, spurious calls may occur for a brief period
+ /// of time. This is tolerable in favor of the common case.
+ state: AtomicU8,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+struct RecentMigration {
+ uid: u32,
+ alias: String,
+}
+
+impl RecentMigration {
+ fn new(uid: u32, alias: String) -> Self {
+ Self { uid, alias }
+ }
+}
+
+enum BulkDeleteRequest {
+ Uid(u32),
+ User(u32),
+}
+
+struct LegacyMigratorState {
+ recently_migrated: HashSet<RecentMigration>,
+ recently_migrated_super_key: HashSet<u32>,
+ legacy_loader: Arc<LegacyBlobLoader>,
+ sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>,
+ db: KeystoreDB,
+}
+
+impl LegacyMigrator {
+ const WIFI_NAMESPACE: i64 = 102;
+ const AID_WIFI: u32 = 1010;
+
+ const STATE_UNINITIALIZED: u8 = 0;
+ const STATE_READY: u8 = 1;
+ const STATE_EMPTY: u8 = 2;
+
+ /// Constructs a new LegacyMigrator using the given AsyncTask object as migration
+ /// worker.
+ pub fn new(async_task: Arc<AsyncTask>) -> Self {
+ Self {
+ async_task,
+ initializer: Default::default(),
+ state: AtomicU8::new(Self::STATE_UNINITIALIZED),
+ }
+ }
+
+ /// The legacy migrator must be initialized deferred, because keystore starts very early.
+ /// At this time the data partition may not be mounted. So we cannot open database connections
+ /// until we get actual key load requests. This sets the function that the legacy loader
+ /// uses to connect to the database.
+ pub fn set_init<F>(&self, f_init: F) -> Result<()>
+ where
+ F: FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>)
+ + Send
+ + 'static,
+ {
+ let mut initializer = self.initializer.lock().expect("Failed to lock initializer.");
+
+ // If we are not uninitialized we have no business setting the initializer.
+ if self.state.load(Ordering::Relaxed) != Self::STATE_UNINITIALIZED {
+ return Ok(());
+ }
+
+ // Only set the initializer if it hasn't been set before.
+ if initializer.is_none() {
+ *initializer = Some(Box::new(f_init))
+ }
+
+ Ok(())
+ }
+
+ /// This function is called by the migration requestor to check if it is worth
+ /// making a migration request. It also transitions the state from UNINITIALIZED
+ /// to READY or EMPTY on first use. The deferred initialization is necessary, because
+ /// Keystore 2.0 runs early during boot, where data may not yet be mounted.
+ /// Returns Ok(STATE_READY) if a migration request is worth undertaking and
+ /// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader
+ /// was not initialized and cannot be initialized.
+ fn check_state(&self) -> Result<u8> {
+ let mut first_try = true;
+ loop {
+ match (self.state.load(Ordering::Relaxed), first_try) {
+ (Self::STATE_EMPTY, _) => {
+ return Ok(Self::STATE_EMPTY);
+ }
+ (Self::STATE_UNINITIALIZED, true) => {
+ // If we find the legacy loader uninitialized, we grab the initializer lock,
+ // check if the legacy database is empty, and if not, schedule an initialization
+ // request. Coming out of the initializer lock, the state is either EMPTY or
+ // READY.
+ let mut initializer = self.initializer.lock().unwrap();
+
+ if let Some(initializer) = initializer.take() {
+ let (db, sec_level_to_km_uuid, legacy_loader) = (initializer)();
+
+ if legacy_loader.is_empty().context(
+ "In check_state: Trying to check if the legacy database is empty.",
+ )? {
+ self.state.store(Self::STATE_EMPTY, Ordering::Relaxed);
+ return Ok(Self::STATE_EMPTY);
+ }
+
+ self.async_task.queue_hi(move |shelf| {
+ shelf.get_or_put_with(|| LegacyMigratorState {
+ recently_migrated: Default::default(),
+ recently_migrated_super_key: Default::default(),
+ legacy_loader,
+ sec_level_to_km_uuid,
+ db,
+ });
+ });
+
+ // It is safe to set this here even though the async task may not yet have
+ // run because any thread observing this will not be able to schedule a
+ // task that can run before the initialization.
+ // Also we can only transition out of this state while having the
+ // initializer lock and having found an initializer.
+ self.state.store(Self::STATE_READY, Ordering::Relaxed);
+ return Ok(Self::STATE_READY);
+ } else {
+ // There is a chance that we just lost the race from state.load() to
+ // grabbing the initializer mutex. If that is the case the state must
+ // be EMPTY or READY after coming out of the lock. So we can give it
+ // one more try.
+ first_try = false;
+ continue;
+ }
+ }
+ (Self::STATE_UNINITIALIZED, false) => {
+ // Okay, tough luck. The legacy loader was really completely uninitialized.
+ return Err(Error::sys()).context(
+ "In check_state: Legacy loader should not be called uninitialized.",
+ );
+ }
+ (Self::STATE_READY, _) => return Ok(Self::STATE_READY),
+ (s, _) => panic!("Unknown legacy migrator state. {} ", s),
+ }
+ }
+ }
+
+ /// List all aliases for uid in the legacy database.
+ pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
+ let uid = match (domain, namespace) {
+ (Domain::APP, namespace) => namespace as u32,
+ (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
+ _ => return Ok(Vec::new()),
+ };
+ self.do_serialized(move |state| state.list_uid(uid)).unwrap_or_else(|| Ok(Vec::new())).map(
+ |v| {
+ v.into_iter()
+ .map(|alias| KeyDescriptor {
+ domain,
+ nspace: namespace,
+ alias: Some(alias),
+ blob: None,
+ })
+ .collect()
+ },
+ )
+ }
+
+ /// Sends the given closure to the migrator thread for execution after calling check_state.
+ /// Returns None if the database was empty and the request was not executed.
+ /// Otherwise returns Some with the result produced by the migration request.
+ /// The loader state may transition to STATE_EMPTY during the execution of this function.
+ fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>>
+ where
+ F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static,
+ {
+ // Short circuit if the database is empty or not initialized (error case).
+ match self.check_state().context("In do_serialized: Checking state.") {
+ Ok(LegacyMigrator::STATE_EMPTY) => return None,
+ Ok(LegacyMigrator::STATE_READY) => {}
+ Err(e) => return Some(Err(e)),
+ Ok(s) => panic!("Unknown legacy migrator state. {} ", s),
+ }
+
+ // We have established that there may be a key in the legacy database.
+ // Now we schedule a migration request.
+ let (sender, receiver) = channel();
+ self.async_task.queue_hi(move |shelf| {
+ // Get the migrator state from the shelf.
+ // There may not be a state. This can happen if this migration request was scheduled
+ // before a previous request established that the legacy database was empty
+ // and removed the state from the shelf. Since we know now that the database
+ // is empty, we can return None here.
+ let (new_state, result) = if let Some(legacy_migrator_state) =
+ shelf.get_downcast_mut::<LegacyMigratorState>()
+ {
+ let result = f(legacy_migrator_state);
+ (legacy_migrator_state.check_empty(), Some(result))
+ } else {
+ (Self::STATE_EMPTY, None)
+ };
+
+ // If the migration request determined that the database is now empty, we discard
+ // the state from the shelf to free up the resources we won't need any longer.
+ if result.is_some() && new_state == Self::STATE_EMPTY {
+ shelf.remove_downcast_ref::<LegacyMigratorState>();
+ }
+
+ // Send the result to the requester.
+ if let Err(e) = sender.send((new_state, result)) {
+ log::error!("In do_serialized. Error in sending the result. {:?}", e);
+ }
+ });
+
+ let (new_state, result) = match receiver.recv() {
+ Err(e) => {
+ return Some(Err(e).context("In do_serialized. Failed to receive from the sender."))
+ }
+ Ok(r) => r,
+ };
+
+ // We can only transition to EMPTY but never back.
+ // The migrator never creates any legacy blobs.
+ if new_state == Self::STATE_EMPTY {
+ self.state.store(Self::STATE_EMPTY, Ordering::Relaxed)
+ }
+
+ result
+ }
+
+ /// Runs the key_accessor function and returns its result. If it returns an error and the
+ /// root cause was KEY_NOT_FOUND, tries to migrate a key with the given parameters from
+ /// the legacy database to the new database and runs the key_accessor function again if
+ /// the migration request was successful.
+ pub fn with_try_migrate<F, T>(
+ &self,
+ key: &KeyDescriptor,
+ caller_uid: u32,
+ key_accessor: F,
+ ) -> Result<T>
+ where
+ F: Fn() -> Result<T>,
+ {
+ // Access the key and return on success.
+ match key_accessor() {
+ Ok(result) => return Ok(result),
+ Err(e) => match e.root_cause().downcast_ref::<Error>() {
+ Some(&Error::Rc(ResponseCode::KEY_NOT_FOUND)) => {}
+ _ => return Err(e),
+ },
+ }
+
+ // Filter inputs. We can only load legacy app domain keys and some special rules due
+ // to which we migrate keys transparently to an SELINUX domain.
+ let uid = match key {
+ KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid,
+ KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => {
+ match *nspace {
+ Self::WIFI_NAMESPACE => Self::AID_WIFI,
+ _ => {
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context(format!("No legacy keys for namespace {}", nspace))
+ }
+ }
+ }
+ _ => {
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("No legacy keys for key descriptor.")
+ }
+ };
+
+ let key_clone = key.clone();
+ let result = self
+ .do_serialized(move |migrator_state| migrator_state.check_and_migrate(uid, key_clone));
+
+ if let Some(result) = result {
+ result?;
+ // After successful migration try again.
+ key_accessor()
+ } else {
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.")
+ }
+ }
+
+ /// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error
+ /// this function makes a migration request and on success retries the key_accessor.
+ pub fn with_try_migrate_super_key<F, T>(
+ &self,
+ user_id: u32,
+ pw: &Password,
+ mut key_accessor: F,
+ ) -> Result<Option<T>>
+ where
+ F: FnMut() -> Result<Option<T>>,
+ {
+ match key_accessor() {
+ Ok(Some(result)) => return Ok(Some(result)),
+ Ok(None) => {}
+ Err(e) => return Err(e),
+ }
+ let pw = pw.try_clone().context("In with_try_migrate_super_key: Cloning password.")?;
+ let result = self.do_serialized(move |migrator_state| {
+ migrator_state.check_and_migrate_super_key(user_id, &pw)
+ });
+
+ if let Some(result) = result {
+ result?;
+ // After successful migration try again.
+ key_accessor()
+ } else {
+ Ok(None)
+ }
+ }
+
+ /// Deletes all keys belonging to the given namespace, migrating them into the database
+ /// for subsequent garbage collection if necessary.
+ pub fn bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()> {
+ let uid = match (domain, nspace) {
+ (Domain::APP, nspace) => nspace as u32,
+ (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI,
+ // Nothing to do.
+ _ => return Ok(()),
+ };
+
+ let result = self.do_serialized(move |migrator_state| {
+ migrator_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
+ });
+
+ result.unwrap_or(Ok(()))
+ }
+
+ /// Deletes all keys belonging to the given android user, migrating them into the database
+ /// for subsequent garbage collection if necessary.
+ pub fn bulk_delete_user(
+ &self,
+ user_id: u32,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ let result = self.do_serialized(move |migrator_state| {
+ migrator_state
+ .bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
+ });
+
+ result.unwrap_or(Ok(()))
+ }
+
+ /// Queries the legacy database for the presence of a super key for the given user.
+ pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
+ let result =
+ self.do_serialized(move |migrator_state| migrator_state.has_super_key(user_id));
+ result.unwrap_or(Ok(false))
+ }
+}
+
+impl LegacyMigratorState {
+ fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> {
+ let sec_level = if is_strongbox {
+ SecurityLevel::STRONGBOX
+ } else {
+ SecurityLevel::TRUSTED_ENVIRONMENT
+ };
+
+ self.sec_level_to_km_uuid.get(&sec_level).copied().ok_or_else(|| {
+ anyhow::anyhow!(Error::sys()).context("In get_km_uuid: No KM instance for blob.")
+ })
+ }
+
+ fn list_uid(&mut self, uid: u32) -> Result<Vec<String>> {
+ self.legacy_loader
+ .list_keystore_entries_for_uid(uid)
+ .context("In list_uid: Trying to list legacy entries.")
+ }
+
+ /// This is a key migration request that can run in the migrator thread. This should
+ /// be passed to do_serialized.
+ fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
+ let alias = key.alias.clone().ok_or_else(|| {
+ anyhow::anyhow!(Error::sys()).context(concat!(
+ "In check_and_migrate: Must be Some because ",
+ "our caller must not have called us otherwise."
+ ))
+ })?;
+
+ if self.recently_migrated.contains(&RecentMigration::new(uid, alias.clone())) {
+ return Ok(());
+ }
+
+ if key.domain == Domain::APP {
+ key.nspace = uid as i64;
+ }
+
+ // If the key is not found in the cache, try to load from the legacy database.
+ let (km_blob_params, user_cert, ca_cert) = self
+ .legacy_loader
+ .load_by_uid_alias(uid, &alias, None)
+ .context("In check_and_migrate: Trying to load legacy blob.")?;
+ let result = match km_blob_params {
+ Some((km_blob, params)) => {
+ let is_strongbox = km_blob.is_strongbox();
+ let (blob, mut blob_metadata) = match km_blob.take_value() {
+ BlobValue::Encrypted { iv, tag, data } => {
+ // Get super key id for user id.
+ let user_id = uid_to_android_user(uid as u32);
+
+ let super_key_id = match self
+ .db
+ .load_super_key(&USER_SUPER_KEY, user_id)
+ .context("In check_and_migrate: Failed to load super key")?
+ {
+ Some((_, entry)) => entry.id(),
+ None => {
+ // This might be the first time we access the super key,
+ // and it may not have been migrated. We cannot import
+ // the legacy super_key key now, because we need to reencrypt
+ // it which we cannot do if we are not unlocked, which we are
+ // not because otherwise the key would have been migrated.
+ // We can check though if the key exists. If it does,
+ // we can return Locked. Otherwise, we can delete the
+ // key and return NotFound, because the key will never
+ // be unlocked again.
+ if self.legacy_loader.has_super_key(user_id) {
+ return Err(Error::Rc(ResponseCode::LOCKED)).context(concat!(
+ "In check_and_migrate: Cannot migrate super key of this ",
+ "key while user is locked."
+ ));
+ } else {
+ self.legacy_loader.remove_keystore_entry(uid, &alias).context(
+ concat!(
+ "In check_and_migrate: ",
+ "Trying to remove obsolete key."
+ ),
+ )?;
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_migrate: Obsolete key.");
+ }
+ }
+ };
+
+ let mut blob_metadata = BlobMetaData::new();
+ blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
+ blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
+ blob_metadata
+ .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
+ (LegacyBlob::Vec(data), blob_metadata)
+ }
+ BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()),
+ _ => {
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_migrate: Legacy key has unexpected type.")
+ }
+ };
+
+ let km_uuid = self
+ .get_km_uuid(is_strongbox)
+ .context("In check_and_migrate: Trying to get KM UUID")?;
+ blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+
+ let mut metadata = KeyMetaData::new();
+ let creation_date = DateTime::now()
+ .context("In check_and_migrate: Trying to make creation time.")?;
+ metadata.add(KeyMetaEntry::CreationDate(creation_date));
+
+ // Store legacy key in the database.
+ self.db
+ .store_new_key(
+ &key,
+ ¶ms,
+ &(&blob, &blob_metadata),
+ &CertificateInfo::new(user_cert, ca_cert),
+ &metadata,
+ &km_uuid,
+ )
+ .context("In check_and_migrate.")?;
+ Ok(())
+ }
+ None => {
+ if let Some(ca_cert) = ca_cert {
+ self.db
+ .store_new_certificate(&key, &ca_cert, &KEYSTORE_UUID)
+ .context("In check_and_migrate: Failed to insert new certificate.")?;
+ Ok(())
+ } else {
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_migrate: Legacy key not found.")
+ }
+ }
+ };
+
+ match result {
+ Ok(()) => {
+ // Add the key to the migrated_keys list.
+ self.recently_migrated.insert(RecentMigration::new(uid, alias.clone()));
+ // Delete legacy key from the file system
+ self.legacy_loader
+ .remove_keystore_entry(uid, &alias)
+ .context("In check_and_migrate: Trying to remove migrated key.")?;
+ Ok(())
+ }
+ Err(e) => Err(e),
+ }
+ }
+
+ fn check_and_migrate_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
+ if self.recently_migrated_super_key.contains(&user_id) {
+ return Ok(());
+ }
+
+ if let Some(super_key) = self
+ .legacy_loader
+ .load_super_key(user_id, &pw)
+ .context("In check_and_migrate_super_key: Trying to load legacy super key.")?
+ {
+ let (blob, blob_metadata) =
+ crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, pw)
+ .context("In check_and_migrate_super_key: Trying to encrypt super key.")?;
+
+ self.db
+ .store_super_key(
+ user_id,
+ &USER_SUPER_KEY,
+ &blob,
+ &blob_metadata,
+ &KeyMetaData::new(),
+ )
+ .context(concat!(
+ "In check_and_migrate_super_key: ",
+ "Trying to insert legacy super_key into the database."
+ ))?;
+ self.legacy_loader.remove_super_key(user_id);
+ self.recently_migrated_super_key.insert(user_id);
+ Ok(())
+ } else {
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In check_and_migrate_super_key: No key found do migrate.")
+ }
+ }
+
+ /// Key migrator request to be run by do_serialized.
+ /// See LegacyMigrator::bulk_delete_uid and LegacyMigrator::bulk_delete_user.
+ fn bulk_delete(
+ &mut self,
+ bulk_delete_request: BulkDeleteRequest,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ let (aliases, user_id) = match bulk_delete_request {
+ BulkDeleteRequest::Uid(uid) => (
+ self.legacy_loader
+ .list_keystore_entries_for_uid(uid)
+ .context("In bulk_delete: Trying to get aliases for uid.")
+ .map(|aliases| {
+ let mut h = HashMap::<u32, HashSet<String>>::new();
+ h.insert(uid, aliases.into_iter().collect());
+ h
+ })?,
+ uid_to_android_user(uid),
+ ),
+ BulkDeleteRequest::User(user_id) => (
+ self.legacy_loader
+ .list_keystore_entries_for_user(user_id)
+ .context("In bulk_delete: Trying to get aliases for user_id.")?,
+ user_id,
+ ),
+ };
+
+ let super_key_id = self
+ .db
+ .load_super_key(&USER_SUPER_KEY, user_id)
+ .context("In bulk_delete: Failed to load super key")?
+ .map(|(_, entry)| entry.id());
+
+ for (uid, alias) in aliases
+ .into_iter()
+ .map(|(uid, aliases)| aliases.into_iter().map(move |alias| (uid, alias)))
+ .flatten()
+ {
+ let (km_blob_params, _, _) = self
+ .legacy_loader
+ .load_by_uid_alias(uid, &alias, None)
+ .context("In bulk_delete: Trying to load legacy blob.")?;
+
+ // Determine if the key needs special handling to be deleted.
+ let (need_gc, is_super_encrypted) = km_blob_params
+ .as_ref()
+ .map(|(blob, params)| {
+ (
+ params.iter().any(|kp| {
+ KeyParameterValue::RollbackResistance == *kp.key_parameter_value()
+ }),
+ blob.is_encrypted(),
+ )
+ })
+ .unwrap_or((false, false));
+
+ if keep_non_super_encrypted_keys && !is_super_encrypted {
+ continue;
+ }
+
+ if need_gc {
+ let mark_deleted = match km_blob_params
+ .map(|(blob, _)| (blob.is_strongbox(), blob.take_value()))
+ {
+ Some((is_strongbox, BlobValue::Encrypted { iv, tag, data })) => {
+ let mut blob_metadata = BlobMetaData::new();
+ if let (Ok(km_uuid), Some(super_key_id)) =
+ (self.get_km_uuid(is_strongbox), super_key_id)
+ {
+ blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+ blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
+ blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
+ blob_metadata
+ .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
+ Some((LegacyBlob::Vec(data), blob_metadata))
+ } else {
+ // Oh well - we tried our best, but if we cannot determine which
+ // KeyMint instance we have to send this blob to, we cannot
+ // do more than delete the key from the file system.
+ // And if we don't know which key wraps this key we cannot
+ // unwrap it for KeyMint either.
+ None
+ }
+ }
+ Some((_, BlobValue::Decrypted(data))) => {
+ Some((LegacyBlob::ZVec(data), BlobMetaData::new()))
+ }
+ _ => None,
+ };
+
+ if let Some((blob, blob_metadata)) = mark_deleted {
+ self.db.set_deleted_blob(&blob, &blob_metadata).context(concat!(
+ "In bulk_delete: Trying to insert deleted ",
+ "blob into the database for garbage collection."
+ ))?;
+ }
+ }
+
+ self.legacy_loader
+ .remove_keystore_entry(uid, &alias)
+ .context("In bulk_delete: Trying to remove migrated key.")?;
+ }
+ Ok(())
+ }
+
+ fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
+ Ok(self.recently_migrated_super_key.contains(&user_id)
+ || self.legacy_loader.has_super_key(user_id))
+ }
+
+ fn check_empty(&self) -> u8 {
+ if self.legacy_loader.is_empty().unwrap_or(false) {
+ LegacyMigrator::STATE_EMPTY
+ } else {
+ LegacyMigrator::STATE_READY
+ }
+ }
+}
+
+enum LegacyBlob {
+ Vec(Vec<u8>),
+ ZVec(ZVec),
+}
+
+impl Deref for LegacyBlob {
+ type Target = [u8];
+
+ fn deref(&self) -> &Self::Target {
+ match self {
+ Self::Vec(v) => &v,
+ Self::ZVec(v) => &v,
+ }
+ }
+}
diff --git a/keystore2/src/lib.rs b/keystore2/src/lib.rs
index 811db91..62dc16a 100644
--- a/keystore2/src/lib.rs
+++ b/keystore2/src/lib.rs
@@ -16,21 +16,30 @@
#![recursion_limit = "256"]
pub mod apc;
+pub mod async_task;
pub mod authorization;
pub mod database;
+pub mod ec_crypto;
pub mod enforcements;
+pub mod entropy;
pub mod error;
pub mod globals;
/// Internal Representation of Key Parameter and convenience functions.
pub mod key_parameter;
pub mod legacy_blob;
+pub mod legacy_migrator;
+pub mod maintenance;
+pub mod metrics;
pub mod operation;
pub mod permission;
+pub mod remote_provisioning;
pub mod security_level;
pub mod service;
+pub mod shared_secret_negotiation;
+pub mod try_insert;
pub mod utils;
-mod async_task;
+mod attestation_key_utils;
mod db_utils;
mod gc;
mod super_key;
diff --git a/keystore2/src/maintenance.rs b/keystore2/src/maintenance.rs
new file mode 100644
index 0000000..e059a0b
--- /dev/null
+++ b/keystore2/src/maintenance.rs
@@ -0,0 +1,201 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module implements IKeystoreMaintenance AIDL interface.
+
+use crate::error::map_km_error;
+use crate::error::Error as KeystoreError;
+use crate::globals::get_keymint_device;
+use crate::globals::{DB, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::permission::KeystorePerm;
+use crate::super_key::UserState;
+use crate::utils::check_keystore_permission;
+use crate::{database::MonotonicRawTime, error::map_or_log_err};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::IKeyMintDevice::IKeyMintDevice;
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use android_security_maintenance::aidl::android::security::maintenance::{
+ IKeystoreMaintenance::{BnKeystoreMaintenance, IKeystoreMaintenance},
+ UserState::UserState as AidlUserState,
+};
+use android_security_maintenance::binder::{Interface, Result as BinderResult};
+use android_system_keystore2::aidl::android::system::keystore2::Domain::Domain;
+use android_system_keystore2::aidl::android::system::keystore2::ResponseCode::ResponseCode;
+use anyhow::{Context, Result};
+use binder::{IBinderInternal, Strong};
+use keystore2_crypto::Password;
+
+/// This struct is defined to implement the aforementioned AIDL interface.
+/// As of now, it is an empty struct.
+pub struct Maintenance;
+
+impl Maintenance {
+ /// Create a new instance of Keystore User Manager service.
+ pub fn new_native_binder() -> Result<Strong<dyn IKeystoreMaintenance>> {
+ let result = BnKeystoreMaintenance::new_binder(Self);
+ result.as_binder().set_requesting_sid(true);
+ Ok(result)
+ }
+
+ fn on_user_password_changed(user_id: i32, password: Option<Password>) -> Result<()> {
+ //Check permission. Function should return if this failed. Therefore having '?' at the end
+ //is very important.
+ check_keystore_permission(KeystorePerm::change_password())
+ .context("In on_user_password_changed.")?;
+
+ if let Some(pw) = password.as_ref() {
+ DB.with(|db| {
+ SUPER_KEY.unlock_screen_lock_bound_key(&mut db.borrow_mut(), user_id as u32, pw)
+ })
+ .context("In on_user_password_changed: unlock_screen_lock_bound_key failed")?;
+ }
+
+ match DB
+ .with(|db| {
+ UserState::get_with_password_changed(
+ &mut db.borrow_mut(),
+ &LEGACY_MIGRATOR,
+ &SUPER_KEY,
+ user_id as u32,
+ password.as_ref(),
+ )
+ })
+ .context("In on_user_password_changed.")?
+ {
+ UserState::LskfLocked => {
+ // Error - password can not be changed when the device is locked
+ Err(KeystoreError::Rc(ResponseCode::LOCKED))
+ .context("In on_user_password_changed. Device is locked.")
+ }
+ _ => {
+ // LskfLocked is the only error case for password change
+ Ok(())
+ }
+ }
+ }
+
+ fn add_or_remove_user(user_id: i32) -> Result<()> {
+ // Check permission. Function should return if this failed. Therefore having '?' at the end
+ // is very important.
+ check_keystore_permission(KeystorePerm::change_user()).context("In add_or_remove_user.")?;
+ DB.with(|db| {
+ UserState::reset_user(
+ &mut db.borrow_mut(),
+ &SUPER_KEY,
+ &LEGACY_MIGRATOR,
+ user_id as u32,
+ false,
+ )
+ })
+ .context("In add_or_remove_user: Trying to delete keys from db.")
+ }
+
+ fn clear_namespace(domain: Domain, nspace: i64) -> Result<()> {
+ // Permission check. Must return on error. Do not touch the '?'.
+ check_keystore_permission(KeystorePerm::clear_uid()).context("In clear_namespace.")?;
+
+ LEGACY_MIGRATOR
+ .bulk_delete_uid(domain, nspace)
+ .context("In clear_namespace: Trying to delete legacy keys.")?;
+ DB.with(|db| db.borrow_mut().unbind_keys_for_namespace(domain, nspace))
+ .context("In clear_namespace: Trying to delete keys from db.")
+ }
+
+ fn get_state(user_id: i32) -> Result<AidlUserState> {
+ // Check permission. Function should return if this failed. Therefore having '?' at the end
+ // is very important.
+ check_keystore_permission(KeystorePerm::get_state()).context("In get_state.")?;
+ let state = DB
+ .with(|db| {
+ UserState::get(&mut db.borrow_mut(), &LEGACY_MIGRATOR, &SUPER_KEY, user_id as u32)
+ })
+ .context("In get_state. Trying to get UserState.")?;
+
+ match state {
+ UserState::Uninitialized => Ok(AidlUserState::UNINITIALIZED),
+ UserState::LskfUnlocked(_) => Ok(AidlUserState::LSKF_UNLOCKED),
+ UserState::LskfLocked => Ok(AidlUserState::LSKF_LOCKED),
+ }
+ }
+
+ fn early_boot_ended_help(sec_level: &SecurityLevel) -> Result<()> {
+ let (dev, _, _) =
+ get_keymint_device(sec_level).context("In early_boot_ended: getting keymint device")?;
+ let km_dev: Strong<dyn IKeyMintDevice> =
+ dev.get_interface().context("In early_boot_ended: getting keymint device interface")?;
+ map_km_error(km_dev.earlyBootEnded())
+ .context("In keymint device: calling earlyBootEnded")?;
+ Ok(())
+ }
+
+ fn early_boot_ended() -> Result<()> {
+ check_keystore_permission(KeystorePerm::early_boot_ended())
+ .context("In early_boot_ended. Checking permission")?;
+
+ let sec_levels = [
+ (SecurityLevel::TRUSTED_ENVIRONMENT, "TRUSTED_ENVIRONMENT"),
+ (SecurityLevel::STRONGBOX, "STRONGBOX"),
+ ];
+ sec_levels.iter().fold(Ok(()), |result, (sec_level, sec_level_string)| {
+ let curr_result = Maintenance::early_boot_ended_help(sec_level);
+ if curr_result.is_err() {
+ log::error!(
+ "Call to earlyBootEnded failed for security level {}.",
+ &sec_level_string
+ );
+ }
+ result.and(curr_result)
+ })
+ }
+
+ fn on_device_off_body() -> Result<()> {
+ // Security critical permission check. This statement must return on fail.
+ check_keystore_permission(KeystorePerm::report_off_body())
+ .context("In on_device_off_body.")?;
+
+ DB.with(|db| db.borrow_mut().update_last_off_body(MonotonicRawTime::now()))
+ .context("In on_device_off_body: Trying to update last off body time.")
+ }
+}
+
+impl Interface for Maintenance {}
+
+impl IKeystoreMaintenance for Maintenance {
+ fn onUserPasswordChanged(&self, user_id: i32, password: Option<&[u8]>) -> BinderResult<()> {
+ map_or_log_err(Self::on_user_password_changed(user_id, password.map(|pw| pw.into())), Ok)
+ }
+
+ fn onUserAdded(&self, user_id: i32) -> BinderResult<()> {
+ map_or_log_err(Self::add_or_remove_user(user_id), Ok)
+ }
+
+ fn onUserRemoved(&self, user_id: i32) -> BinderResult<()> {
+ map_or_log_err(Self::add_or_remove_user(user_id), Ok)
+ }
+
+ fn clearNamespace(&self, domain: Domain, nspace: i64) -> BinderResult<()> {
+ map_or_log_err(Self::clear_namespace(domain, nspace), Ok)
+ }
+
+ fn getState(&self, user_id: i32) -> BinderResult<AidlUserState> {
+ map_or_log_err(Self::get_state(user_id), Ok)
+ }
+
+ fn earlyBootEnded(&self) -> BinderResult<()> {
+ map_or_log_err(Self::early_boot_ended(), Ok)
+ }
+
+ fn onDeviceOffBody(&self) -> BinderResult<()> {
+ map_or_log_err(Self::on_device_off_body(), Ok)
+ }
+}
diff --git a/keystore2/src/metrics.rs b/keystore2/src/metrics.rs
new file mode 100644
index 0000000..9524cb2
--- /dev/null
+++ b/keystore2/src/metrics.rs
@@ -0,0 +1,415 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module provides convenience functions for keystore2 logging.
+use crate::error::get_error_code;
+use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
+use crate::operation::Outcome;
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ Algorithm::Algorithm, BlockMode::BlockMode, Digest::Digest, EcCurve::EcCurve,
+ HardwareAuthenticatorType::HardwareAuthenticatorType, KeyOrigin::KeyOrigin,
+ KeyParameter::KeyParameter, KeyPurpose::KeyPurpose, PaddingMode::PaddingMode,
+};
+use statslog_rust::keystore2_key_creation_event_reported::{
+ Algorithm as StatsdAlgorithm, EcCurve as StatsdEcCurve, KeyOrigin as StatsdKeyOrigin,
+ Keystore2KeyCreationEventReported, UserAuthType as StatsdUserAuthType,
+};
+
+use statslog_rust::keystore2_key_operation_event_reported::{
+ Keystore2KeyOperationEventReported, Outcome as StatsdOutcome, Purpose as StatsdKeyPurpose,
+};
+
+fn create_default_key_creation_atom() -> Keystore2KeyCreationEventReported {
+ // If a value is not present, fields represented by bitmaps and i32 fields
+ // will take 0, except error_code which defaults to 1 indicating NO_ERROR and key_size,
+ // and auth_time_out which default to -1.
+ // The boolean fields are set to false by default.
+ // Some keymint enums do have 0 as an enum variant value. In such cases, the corresponding
+ // enum variant value in atoms.proto is incremented by 1, in order to have 0 as the reserved
+ // value for unspecified fields.
+ Keystore2KeyCreationEventReported {
+ algorithm: StatsdAlgorithm::AlgorithmUnspecified,
+ key_size: -1,
+ key_origin: StatsdKeyOrigin::OriginUnspecified,
+ user_auth_type: StatsdUserAuthType::AuthTypeUnspecified,
+ user_auth_key_timeout_seconds: -1,
+ padding_mode_bitmap: 0,
+ digest_bitmap: 0,
+ block_mode_bitmap: 0,
+ purpose_bitmap: 0,
+ ec_curve: StatsdEcCurve::EcCurveUnspecified,
+ // as per keystore2/ResponseCode.aidl, 1 is reserved for NO_ERROR
+ error_code: 1,
+ attestation_requested: false,
+ }
+}
+
+fn create_default_key_operation_atom() -> Keystore2KeyOperationEventReported {
+ Keystore2KeyOperationEventReported {
+ purpose: StatsdKeyPurpose::KeyPurposeUnspecified,
+ padding_mode_bitmap: 0,
+ digest_bitmap: 0,
+ block_mode_bitmap: 0,
+ outcome: StatsdOutcome::OutcomeUnspecified,
+ error_code: 1,
+ key_upgraded: false,
+ }
+}
+
+/// Log key creation events via statsd API.
+pub fn log_key_creation_event_stats<U>(key_params: &[KeyParameter], result: &anyhow::Result<U>) {
+ let key_creation_event_stats = construct_key_creation_event_stats(key_params, result);
+
+ let logging_result = key_creation_event_stats.stats_write();
+
+ if let Err(e) = logging_result {
+ log::error!(
+ "In log_key_creation_event_stats. Error in logging key creation event. {:?}",
+ e
+ );
+ }
+}
+
+/// Log key operation events via statsd API.
+pub fn log_key_operation_event_stats(
+ key_purpose: KeyPurpose,
+ op_params: &[KeyParameter],
+ op_outcome: &Outcome,
+ key_upgraded: bool,
+) {
+ let key_operation_event_stats =
+ construct_key_operation_event_stats(key_purpose, op_params, op_outcome, key_upgraded);
+
+ let logging_result = key_operation_event_stats.stats_write();
+
+ if let Err(e) = logging_result {
+ log::error!(
+ "In log_key_operation_event_stats. Error in logging key operation event. {:?}",
+ e
+ );
+ }
+}
+
+fn construct_key_creation_event_stats<U>(
+ key_params: &[KeyParameter],
+ result: &anyhow::Result<U>,
+) -> Keystore2KeyCreationEventReported {
+ let mut key_creation_event_atom = create_default_key_creation_atom();
+
+ if let Err(ref e) = result {
+ key_creation_event_atom.error_code = get_error_code(e);
+ }
+
+ for key_param in key_params.iter().map(KsKeyParamValue::from) {
+ match key_param {
+ KsKeyParamValue::Algorithm(a) => {
+ key_creation_event_atom.algorithm = match a {
+ Algorithm::RSA => StatsdAlgorithm::Rsa,
+ Algorithm::EC => StatsdAlgorithm::Ec,
+ Algorithm::AES => StatsdAlgorithm::Aes,
+ Algorithm::TRIPLE_DES => StatsdAlgorithm::TripleDes,
+ Algorithm::HMAC => StatsdAlgorithm::Hmac,
+ _ => StatsdAlgorithm::AlgorithmUnspecified,
+ }
+ }
+ KsKeyParamValue::KeySize(s) => {
+ key_creation_event_atom.key_size = s;
+ }
+ KsKeyParamValue::KeyOrigin(o) => {
+ key_creation_event_atom.key_origin = match o {
+ KeyOrigin::GENERATED => StatsdKeyOrigin::Generated,
+ KeyOrigin::DERIVED => StatsdKeyOrigin::Derived,
+ KeyOrigin::IMPORTED => StatsdKeyOrigin::Imported,
+ KeyOrigin::RESERVED => StatsdKeyOrigin::Reserved,
+ KeyOrigin::SECURELY_IMPORTED => StatsdKeyOrigin::SecurelyImported,
+ _ => StatsdKeyOrigin::OriginUnspecified,
+ }
+ }
+ KsKeyParamValue::HardwareAuthenticatorType(a) => {
+ key_creation_event_atom.user_auth_type = match a {
+ HardwareAuthenticatorType::NONE => StatsdUserAuthType::None,
+ HardwareAuthenticatorType::PASSWORD => StatsdUserAuthType::Password,
+ HardwareAuthenticatorType::FINGERPRINT => StatsdUserAuthType::Fingerprint,
+ HardwareAuthenticatorType::ANY => StatsdUserAuthType::Any,
+ _ => StatsdUserAuthType::AuthTypeUnspecified,
+ }
+ }
+ KsKeyParamValue::AuthTimeout(t) => {
+ key_creation_event_atom.user_auth_key_timeout_seconds = t;
+ }
+ KsKeyParamValue::PaddingMode(p) => {
+ key_creation_event_atom.padding_mode_bitmap =
+ compute_padding_mode_bitmap(&key_creation_event_atom.padding_mode_bitmap, p);
+ }
+ KsKeyParamValue::Digest(d) => {
+ key_creation_event_atom.digest_bitmap =
+ compute_digest_bitmap(&key_creation_event_atom.digest_bitmap, d);
+ }
+ KsKeyParamValue::BlockMode(b) => {
+ key_creation_event_atom.block_mode_bitmap =
+ compute_block_mode_bitmap(&key_creation_event_atom.block_mode_bitmap, b);
+ }
+ KsKeyParamValue::KeyPurpose(k) => {
+ key_creation_event_atom.purpose_bitmap =
+ compute_purpose_bitmap(&key_creation_event_atom.purpose_bitmap, k);
+ }
+ KsKeyParamValue::EcCurve(e) => {
+ key_creation_event_atom.ec_curve = match e {
+ EcCurve::P_224 => StatsdEcCurve::P224,
+ EcCurve::P_256 => StatsdEcCurve::P256,
+ EcCurve::P_384 => StatsdEcCurve::P384,
+ EcCurve::P_521 => StatsdEcCurve::P521,
+ _ => StatsdEcCurve::EcCurveUnspecified,
+ }
+ }
+ KsKeyParamValue::AttestationChallenge(_) => {
+ key_creation_event_atom.attestation_requested = true;
+ }
+ _ => {}
+ }
+ }
+ key_creation_event_atom
+}
+
+fn construct_key_operation_event_stats(
+ key_purpose: KeyPurpose,
+ op_params: &[KeyParameter],
+ op_outcome: &Outcome,
+ key_upgraded: bool,
+) -> Keystore2KeyOperationEventReported {
+ let mut key_operation_event_atom = create_default_key_operation_atom();
+
+ key_operation_event_atom.key_upgraded = key_upgraded;
+
+ key_operation_event_atom.purpose = match key_purpose {
+ KeyPurpose::ENCRYPT => StatsdKeyPurpose::Encrypt,
+ KeyPurpose::DECRYPT => StatsdKeyPurpose::Decrypt,
+ KeyPurpose::SIGN => StatsdKeyPurpose::Sign,
+ KeyPurpose::VERIFY => StatsdKeyPurpose::Verify,
+ KeyPurpose::WRAP_KEY => StatsdKeyPurpose::WrapKey,
+ KeyPurpose::AGREE_KEY => StatsdKeyPurpose::AgreeKey,
+ KeyPurpose::ATTEST_KEY => StatsdKeyPurpose::AttestKey,
+ _ => StatsdKeyPurpose::KeyPurposeUnspecified,
+ };
+
+ key_operation_event_atom.outcome = match op_outcome {
+ Outcome::Unknown | Outcome::Dropped => StatsdOutcome::Dropped,
+ Outcome::Success => StatsdOutcome::Success,
+ Outcome::Abort => StatsdOutcome::Abort,
+ Outcome::Pruned => StatsdOutcome::Pruned,
+ Outcome::ErrorCode(e) => {
+ key_operation_event_atom.error_code = e.0;
+ StatsdOutcome::Error
+ }
+ };
+
+ for key_param in op_params.iter().map(KsKeyParamValue::from) {
+ match key_param {
+ KsKeyParamValue::PaddingMode(p) => {
+ key_operation_event_atom.padding_mode_bitmap =
+ compute_padding_mode_bitmap(&key_operation_event_atom.padding_mode_bitmap, p);
+ }
+ KsKeyParamValue::Digest(d) => {
+ key_operation_event_atom.digest_bitmap =
+ compute_digest_bitmap(&key_operation_event_atom.digest_bitmap, d);
+ }
+ KsKeyParamValue::BlockMode(b) => {
+ key_operation_event_atom.block_mode_bitmap =
+ compute_block_mode_bitmap(&key_operation_event_atom.block_mode_bitmap, b);
+ }
+ _ => {}
+ }
+ }
+
+ key_operation_event_atom
+}
+
+fn compute_purpose_bitmap(purpose_bitmap: &i32, purpose: KeyPurpose) -> i32 {
+ let mut bitmap = *purpose_bitmap;
+ match purpose {
+ KeyPurpose::ENCRYPT => {
+ bitmap |= 1 << KeyPurposeBitPosition::ENCRYPT_BIT_POS as i32;
+ }
+ KeyPurpose::DECRYPT => {
+ bitmap |= 1 << KeyPurposeBitPosition::DECRYPT_BIT_POS as i32;
+ }
+ KeyPurpose::SIGN => {
+ bitmap |= 1 << KeyPurposeBitPosition::SIGN_BIT_POS as i32;
+ }
+ KeyPurpose::VERIFY => {
+ bitmap |= 1 << KeyPurposeBitPosition::VERIFY_BIT_POS as i32;
+ }
+ KeyPurpose::WRAP_KEY => {
+ bitmap |= 1 << KeyPurposeBitPosition::WRAP_KEY_BIT_POS as i32;
+ }
+ KeyPurpose::AGREE_KEY => {
+ bitmap |= 1 << KeyPurposeBitPosition::AGREE_KEY_BIT_POS as i32;
+ }
+ KeyPurpose::ATTEST_KEY => {
+ bitmap |= 1 << KeyPurposeBitPosition::ATTEST_KEY_BIT_POS as i32;
+ }
+ _ => {}
+ }
+ bitmap
+}
+
+fn compute_padding_mode_bitmap(padding_mode_bitmap: &i32, padding_mode: PaddingMode) -> i32 {
+ let mut bitmap = *padding_mode_bitmap;
+ match padding_mode {
+ PaddingMode::NONE => {
+ bitmap |= 1 << PaddingModeBitPosition::NONE_BIT_POSITION as i32;
+ }
+ PaddingMode::RSA_OAEP => {
+ bitmap |= 1 << PaddingModeBitPosition::RSA_OAEP_BIT_POS as i32;
+ }
+ PaddingMode::RSA_PSS => {
+ bitmap |= 1 << PaddingModeBitPosition::RSA_PSS_BIT_POS as i32;
+ }
+ PaddingMode::RSA_PKCS1_1_5_ENCRYPT => {
+ bitmap |= 1 << PaddingModeBitPosition::RSA_PKCS1_1_5_ENCRYPT_BIT_POS as i32;
+ }
+ PaddingMode::RSA_PKCS1_1_5_SIGN => {
+ bitmap |= 1 << PaddingModeBitPosition::RSA_PKCS1_1_5_SIGN_BIT_POS as i32;
+ }
+ PaddingMode::PKCS7 => {
+ bitmap |= 1 << PaddingModeBitPosition::PKCS7_BIT_POS as i32;
+ }
+ _ => {}
+ }
+ bitmap
+}
+
+fn compute_digest_bitmap(digest_bitmap: &i32, digest: Digest) -> i32 {
+ let mut bitmap = *digest_bitmap;
+ match digest {
+ Digest::NONE => {
+ bitmap |= 1 << DigestBitPosition::NONE_BIT_POSITION as i32;
+ }
+ Digest::MD5 => {
+ bitmap |= 1 << DigestBitPosition::MD5_BIT_POS as i32;
+ }
+ Digest::SHA1 => {
+ bitmap |= 1 << DigestBitPosition::SHA_1_BIT_POS as i32;
+ }
+ Digest::SHA_2_224 => {
+ bitmap |= 1 << DigestBitPosition::SHA_2_224_BIT_POS as i32;
+ }
+ Digest::SHA_2_256 => {
+ bitmap |= 1 << DigestBitPosition::SHA_2_256_BIT_POS as i32;
+ }
+ Digest::SHA_2_384 => {
+ bitmap |= 1 << DigestBitPosition::SHA_2_384_BIT_POS as i32;
+ }
+ Digest::SHA_2_512 => {
+ bitmap |= 1 << DigestBitPosition::SHA_2_512_BIT_POS as i32;
+ }
+ _ => {}
+ }
+ bitmap
+}
+
+fn compute_block_mode_bitmap(block_mode_bitmap: &i32, block_mode: BlockMode) -> i32 {
+ let mut bitmap = *block_mode_bitmap;
+ match block_mode {
+ BlockMode::ECB => {
+ bitmap |= 1 << BlockModeBitPosition::ECB_BIT_POS as i32;
+ }
+ BlockMode::CBC => {
+ bitmap |= 1 << BlockModeBitPosition::CBC_BIT_POS as i32;
+ }
+ BlockMode::CTR => {
+ bitmap |= 1 << BlockModeBitPosition::CTR_BIT_POS as i32;
+ }
+ BlockMode::GCM => {
+ bitmap |= 1 << BlockModeBitPosition::GCM_BIT_POS as i32;
+ }
+ _ => {}
+ }
+ bitmap
+}
+/// Enum defining the bit position for each padding mode. Since padding mode can be repeatable, it
+/// is represented using a bitmap.
+#[allow(non_camel_case_types)]
+#[repr(i32)]
+pub enum PaddingModeBitPosition {
+ ///Bit position in the PaddingMode bitmap for NONE.
+ NONE_BIT_POSITION = 0,
+ ///Bit position in the PaddingMode bitmap for RSA_OAEP.
+ RSA_OAEP_BIT_POS = 1,
+ ///Bit position in the PaddingMode bitmap for RSA_PSS.
+ RSA_PSS_BIT_POS = 2,
+ ///Bit position in the PaddingMode bitmap for RSA_PKCS1_1_5_ENCRYPT.
+ RSA_PKCS1_1_5_ENCRYPT_BIT_POS = 3,
+ ///Bit position in the PaddingMode bitmap for RSA_PKCS1_1_5_SIGN.
+ RSA_PKCS1_1_5_SIGN_BIT_POS = 4,
+ ///Bit position in the PaddingMode bitmap for RSA_PKCS7.
+ PKCS7_BIT_POS = 5,
+}
+
+/// Enum defining the bit position for each digest type. Since digest can be repeatable in
+/// key parameters, it is represented using a bitmap.
+#[allow(non_camel_case_types)]
+#[repr(i32)]
+pub enum DigestBitPosition {
+ ///Bit position in the Digest bitmap for NONE.
+ NONE_BIT_POSITION = 0,
+ ///Bit position in the Digest bitmap for MD5.
+ MD5_BIT_POS = 1,
+ ///Bit position in the Digest bitmap for SHA1.
+ SHA_1_BIT_POS = 2,
+ ///Bit position in the Digest bitmap for SHA_2_224.
+ SHA_2_224_BIT_POS = 3,
+ ///Bit position in the Digest bitmap for SHA_2_256.
+ SHA_2_256_BIT_POS = 4,
+ ///Bit position in the Digest bitmap for SHA_2_384.
+ SHA_2_384_BIT_POS = 5,
+ ///Bit position in the Digest bitmap for SHA_2_512.
+ SHA_2_512_BIT_POS = 6,
+}
+
+/// Enum defining the bit position for each block mode type. Since block mode can be repeatable in
+/// key parameters, it is represented using a bitmap.
+#[allow(non_camel_case_types)]
+#[repr(i32)]
+enum BlockModeBitPosition {
+ ///Bit position in the BlockMode bitmap for ECB.
+ ECB_BIT_POS = 1,
+ ///Bit position in the BlockMode bitmap for CBC.
+ CBC_BIT_POS = 2,
+ ///Bit position in the BlockMode bitmap for CTR.
+ CTR_BIT_POS = 3,
+ ///Bit position in the BlockMode bitmap for GCM.
+ GCM_BIT_POS = 4,
+}
+
+/// Enum defining the bit position for each key purpose. Since key purpose can be repeatable in
+/// key parameters, it is represented using a bitmap.
+#[allow(non_camel_case_types)]
+#[repr(i32)]
+enum KeyPurposeBitPosition {
+ ///Bit position in the KeyPurpose bitmap for Encrypt.
+ ENCRYPT_BIT_POS = 1,
+ ///Bit position in the KeyPurpose bitmap for Decrypt.
+ DECRYPT_BIT_POS = 2,
+ ///Bit position in the KeyPurpose bitmap for Sign.
+ SIGN_BIT_POS = 3,
+ ///Bit position in the KeyPurpose bitmap for Verify.
+ VERIFY_BIT_POS = 4,
+ ///Bit position in the KeyPurpose bitmap for Wrap Key.
+ WRAP_KEY_BIT_POS = 5,
+ ///Bit position in the KeyPurpose bitmap for Agree Key.
+ AGREE_KEY_BIT_POS = 6,
+ ///Bit position in the KeyPurpose bitmap for Attest Key.
+ ATTEST_KEY_BIT_POS = 7,
+}
diff --git a/keystore2/src/operation.rs b/keystore2/src/operation.rs
index 829987d..f71577b 100644
--- a/keystore2/src/operation.rs
+++ b/keystore2/src/operation.rs
@@ -126,18 +126,17 @@
//! Either way, we have to revaluate the pruning scores.
use crate::enforcements::AuthInfo;
-use crate::error::{map_km_error, map_or_log_err, Error, ErrorCode, ResponseCode};
+use crate::error::{map_err_with, map_km_error, map_or_log_err, Error, ErrorCode, ResponseCode};
+use crate::metrics::log_key_operation_event_stats;
use crate::utils::Asp;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- ByteArray::ByteArray, IKeyMintOperation::IKeyMintOperation,
- KeyParameter::KeyParameter as KmParam, KeyParameterArray::KeyParameterArray,
- KeyParameterValue::KeyParameterValue as KmParamValue, Tag::Tag,
+ IKeyMintOperation::IKeyMintOperation, KeyParameter::KeyParameter, KeyPurpose::KeyPurpose,
};
use android_system_keystore2::aidl::android::system::keystore2::{
IKeystoreOperation::BnKeystoreOperation, IKeystoreOperation::IKeystoreOperation,
};
use anyhow::{anyhow, Context, Result};
-use binder::{IBinder, Interface};
+use binder::IBinderInternal;
use std::{
collections::HashMap,
sync::{Arc, Mutex, MutexGuard, Weak},
@@ -149,12 +148,18 @@
/// to one of the other variants exactly once. The distinction in outcome is mainly
/// for the statistic.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
-enum Outcome {
+pub enum Outcome {
+ /// Operations have `Outcome::Unknown` as long as they are active.
Unknown,
+ /// Operation is successful.
Success,
+ /// Operation is aborted.
Abort,
+ /// Operation is dropped.
Dropped,
+ /// Operation is pruned.
Pruned,
+ /// Operation is failed with the error code.
ErrorCode(ErrorCode),
}
@@ -169,12 +174,34 @@
outcome: Mutex<Outcome>,
owner: u32, // Uid of the operation's owner.
auth_info: Mutex<AuthInfo>,
+ forced: bool,
+ logging_info: LoggingInfo,
+}
+
+/// Keeps track of the information required for logging operations.
+#[derive(Debug)]
+pub struct LoggingInfo {
+ purpose: KeyPurpose,
+ op_params: Vec<KeyParameter>,
+ key_upgraded: bool,
+}
+
+impl LoggingInfo {
+ /// Constructor
+ pub fn new(
+ purpose: KeyPurpose,
+ op_params: Vec<KeyParameter>,
+ key_upgraded: bool,
+ ) -> LoggingInfo {
+ Self { purpose, op_params, key_upgraded }
+ }
}
struct PruningInfo {
last_usage: Instant,
owner: u32,
index: usize,
+ forced: bool,
}
// We don't except more than 32KiB of data in `update`, `updateAad`, and `finish`.
@@ -184,9 +211,11 @@
/// Constructor
pub fn new(
index: usize,
- km_op: Box<dyn IKeyMintOperation>,
+ km_op: binder::Strong<dyn IKeyMintOperation>,
owner: u32,
auth_info: AuthInfo,
+ forced: bool,
+ logging_info: LoggingInfo,
) -> Self {
Self {
index,
@@ -195,6 +224,8 @@
outcome: Mutex::new(Outcome::Unknown),
owner,
auth_info: Mutex::new(auth_info),
+ forced,
+ logging_info,
}
}
@@ -220,6 +251,7 @@
last_usage: *self.last_usage.lock().expect("In get_pruning_info."),
owner: self.owner,
index: self.index,
+ forced: self.forced,
})
}
@@ -247,13 +279,14 @@
}
*locked_outcome = Outcome::Pruned;
- let km_op: Box<dyn IKeyMintOperation> = match self.km_op.get_interface() {
- Ok(km_op) => km_op,
- Err(e) => {
- log::error!("In prune: Failed to get KeyMintOperation interface.\n {:?}", e);
- return Err(Error::sys());
- }
- };
+ let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
+ match self.km_op.get_interface() {
+ Ok(km_op) => km_op,
+ Err(e) => {
+ log::error!("In prune: Failed to get KeyMintOperation interface.\n {:?}", e);
+ return Err(Error::sys());
+ }
+ };
// We abort the operation. If there was an error we log it but ignore it.
if let Err(e) = map_km_error(km_op.abort()) {
@@ -324,17 +357,7 @@
Self::check_input_length(aad_input).context("In update_aad")?;
self.touch();
- let params = KeyParameterArray {
- params: vec![KmParam {
- tag: Tag::ASSOCIATED_DATA,
- value: KmParamValue::Blob(aad_input.into()),
- }],
- };
-
- let mut out_params: Option<KeyParameterArray> = None;
- let mut output: Option<ByteArray> = None;
-
- let km_op: Box<dyn IKeyMintOperation> =
+ let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
self.km_op.get_interface().context("In update: Failed to get KeyMintOperation.")?;
let (hat, tst) = self
@@ -346,14 +369,7 @@
self.update_outcome(
&mut *outcome,
- map_km_error(km_op.update(
- Some(¶ms),
- None,
- hat.as_ref(),
- tst.as_ref(),
- &mut out_params,
- &mut output,
- )),
+ map_km_error(km_op.updateAad(aad_input, hat.as_ref(), tst.as_ref())),
)
.context("In update_aad: KeyMint::update failed.")?;
@@ -367,10 +383,7 @@
Self::check_input_length(input).context("In update")?;
self.touch();
- let mut out_params: Option<KeyParameterArray> = None;
- let mut output: Option<ByteArray> = None;
-
- let km_op: Box<dyn IKeyMintOperation> =
+ let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
self.km_op.get_interface().context("In update: Failed to get KeyMintOperation.")?;
let (hat, tst) = self
@@ -380,28 +393,17 @@
.before_update()
.context("In update: Trying to get auth tokens.")?;
- self.update_outcome(
- &mut *outcome,
- map_km_error(km_op.update(
- None,
- Some(input),
- hat.as_ref(),
- tst.as_ref(),
- &mut out_params,
- &mut output,
- )),
- )
- .context("In update: KeyMint::update failed.")?;
+ let output = self
+ .update_outcome(
+ &mut *outcome,
+ map_km_error(km_op.update(input, hat.as_ref(), tst.as_ref())),
+ )
+ .context("In update: KeyMint::update failed.")?;
- match output {
- Some(blob) => {
- if blob.data.is_empty() {
- Ok(None)
- } else {
- Ok(Some(blob.data))
- }
- }
- None => Ok(None),
+ if output.is_empty() {
+ Ok(None)
+ } else {
+ Ok(Some(output))
}
}
@@ -414,12 +416,10 @@
}
self.touch();
- let mut out_params: Option<KeyParameterArray> = None;
-
- let km_op: Box<dyn IKeyMintOperation> =
+ let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
self.km_op.get_interface().context("In finish: Failed to get KeyMintOperation.")?;
- let (hat, tst) = self
+ let (hat, tst, confirmation_token) = self
.auth_info
.lock()
.unwrap()
@@ -430,12 +430,11 @@
.update_outcome(
&mut *outcome,
map_km_error(km_op.finish(
- None,
input,
signature,
hat.as_ref(),
tst.as_ref(),
- &mut out_params,
+ confirmation_token.as_deref(),
)),
)
.context("In finish: KeyMint::finish failed.")?;
@@ -458,7 +457,7 @@
fn abort(&self, outcome: Outcome) -> Result<()> {
let mut locked_outcome = self.check_active().context("In abort")?;
*locked_outcome = outcome;
- let km_op: Box<dyn IKeyMintOperation> =
+ let km_op: binder::public_api::Strong<dyn IKeyMintOperation> =
self.km_op.get_interface().context("In abort: Failed to get KeyMintOperation.")?;
map_km_error(km_op.abort()).context("In abort: KeyMint::abort failed.")
@@ -467,7 +466,15 @@
impl Drop for Operation {
fn drop(&mut self) {
- if let Ok(Outcome::Unknown) = self.outcome.get_mut() {
+ let guard = self.outcome.lock().expect("In drop.");
+ log_key_operation_event_stats(
+ self.logging_info.purpose,
+ &(self.logging_info.op_params),
+ &guard,
+ self.logging_info.key_upgraded,
+ );
+ if let Outcome::Unknown = *guard {
+ drop(guard);
// If the operation was still active we call abort, setting
// the outcome to `Outcome::Dropped`
if let Err(e) = self.abort(Outcome::Dropped) {
@@ -497,9 +504,11 @@
/// owner uid and returns a new Operation wrapped in a `std::sync::Arc`.
pub fn create_operation(
&self,
- km_op: Box<dyn IKeyMintOperation>,
+ km_op: binder::public_api::Strong<dyn IKeyMintOperation>,
owner: u32,
auth_info: AuthInfo,
+ forced: bool,
+ logging_info: LoggingInfo,
) -> Arc<Operation> {
// We use unwrap because we don't allow code that can panic while locked.
let mut operations = self.operations.lock().expect("In create_operation.");
@@ -512,12 +521,26 @@
s.upgrade().is_none()
}) {
Some(free_slot) => {
- let new_op = Arc::new(Operation::new(index - 1, km_op, owner, auth_info));
+ let new_op = Arc::new(Operation::new(
+ index - 1,
+ km_op,
+ owner,
+ auth_info,
+ forced,
+ logging_info,
+ ));
*free_slot = Arc::downgrade(&new_op);
new_op
}
None => {
- let new_op = Arc::new(Operation::new(operations.len(), km_op, owner, auth_info));
+ let new_op = Arc::new(Operation::new(
+ operations.len(),
+ km_op,
+ owner,
+ auth_info,
+ forced,
+ logging_info,
+ ));
operations.push(Arc::downgrade(&new_op));
new_op
}
@@ -600,7 +623,7 @@
/// ## Update
/// We also allow callers to cannibalize their own sibling operations if no other
/// slot can be found. In this case the least recently used sibling is pruned.
- pub fn prune(&self, caller: u32) -> Result<(), Error> {
+ pub fn prune(&self, caller: u32, forced: bool) -> Result<(), Error> {
loop {
// Maps the uid of the owner to the number of operations that owner has
// (running_siblings). More operations per owner lowers the pruning
@@ -625,7 +648,8 @@
}
});
- let caller_malus = 1u64 + *owners.entry(caller).or_default();
+ // If the operation is forced, the caller has a malus of 0.
+ let caller_malus = if forced { 0 } else { 1u64 + *owners.entry(caller).or_default() };
// We iterate through all operations computing the malus and finding
// the candidate with the highest malus which must also be higher
@@ -639,7 +663,7 @@
let mut oldest_caller_op: Option<CandidateInfo> = None;
let candidate = pruning_info.iter().fold(
None,
- |acc: Option<CandidateInfo>, &PruningInfo { last_usage, owner, index }| {
+ |acc: Option<CandidateInfo>, &PruningInfo { last_usage, owner, index, forced }| {
// Compute the age of the current operation.
let age = now
.checked_duration_since(last_usage)
@@ -659,12 +683,17 @@
}
// Compute the malus of the current operation.
- // Expect safety: Every owner in pruning_info was counted in
- // the owners map. So this unwrap cannot panic.
- let malus = *owners
- .get(&owner)
- .expect("This is odd. We should have counted every owner in pruning_info.")
- + ((age.as_secs() + 1) as f64).log(6.0).floor() as u64;
+ let malus = if forced {
+ // Forced operations have a malus of 0. And cannot even be pruned
+ // by other forced operations.
+ 0
+ } else {
+ // Expect safety: Every owner in pruning_info was counted in
+ // the owners map. So this unwrap cannot panic.
+ *owners.get(&owner).expect(
+ "This is odd. We should have counted every owner in pruning_info.",
+ ) + ((age.as_secs() + 1) as f64).log(6.0).floor() as u64
+ };
// Now check if the current operation is a viable/better candidate
// the one currently stored in the accumulator.
@@ -751,9 +780,11 @@
impl KeystoreOperation {
/// Creates a new operation instance wrapped in a
/// BnKeystoreOperation proxy object. It also
- /// calls `IBinder::set_requesting_sid` on the new interface, because
+ /// calls `IBinderInternal::set_requesting_sid` on the new interface, because
/// we need it for checking Keystore permissions.
- pub fn new_native_binder(operation: Arc<Operation>) -> impl IKeystoreOperation + Send {
+ pub fn new_native_binder(
+ operation: Arc<Operation>,
+ ) -> binder::public_api::Strong<dyn IKeystoreOperation> {
let result =
BnKeystoreOperation::new_binder(Self { operation: Mutex::new(Some(operation)) });
result.as_binder().set_requesting_sid(true);
@@ -835,11 +866,21 @@
}
fn abort(&self) -> binder::public_api::Result<()> {
- map_or_log_err(
+ map_err_with(
self.with_locked_operation(
|op| op.abort(Outcome::Abort).context("In KeystoreOperation::abort"),
true,
),
+ |e| {
+ match e.root_cause().downcast_ref::<Error>() {
+ // Calling abort on expired operations is something very common.
+ // There is no reason to clutter the log with it. It is never the cause
+ // for a true problem.
+ Some(Error::Km(ErrorCode::INVALID_OPERATION_HANDLE)) => {}
+ _ => log::error!("{:?}", e),
+ };
+ e
+ },
Ok,
)
}
diff --git a/keystore2/src/permission.rs b/keystore2/src/permission.rs
index a81954f..45c4dc1 100644
--- a/keystore2/src/permission.rs
+++ b/keystore2/src/permission.rs
@@ -193,6 +193,7 @@
/// ```
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
KeyPerm from KeyPermission with default (NONE, none) {
+ CONVERT_STORAGE_KEY_TO_EPHEMERAL, selinux name: convert_storage_key_to_ephemeral;
DELETE, selinux name: delete;
GEN_UNIQUE_ID, selinux name: gen_unique_id;
GET_INFO, selinux name: get_info;
@@ -291,7 +292,7 @@
AddAuth = 1, selinux name: add_auth;
/// Checked when an app is uninstalled or wiped.
ClearNs = 2, selinux name: clear_ns;
- /// Checked when Keystore 2.0 gets locked.
+ /// Checked when the user state is queried from Keystore 2.0.
GetState = 4, selinux name: get_state;
/// Checked when Keystore 2.0 is asked to list a namespace that the caller
/// does not have the get_info permission for.
@@ -299,9 +300,21 @@
/// Checked when Keystore 2.0 gets locked.
Lock = 0x10, selinux name: lock;
/// Checked when Keystore 2.0 shall be reset.
- Reset = 0x20, selinux name: reset;
+ Reset = 0x20, selinux name: reset;
/// Checked when Keystore 2.0 shall be unlocked.
- Unlock = 0x40, selinux name: unlock;
+ Unlock = 0x40, selinux name: unlock;
+ /// Checked when user is added or removed.
+ ChangeUser = 0x80, selinux name: change_user;
+ /// Checked when password of the user is changed.
+ ChangePassword = 0x100, selinux name: change_password;
+ /// Checked when a UID is cleared.
+ ClearUID = 0x200, selinux name: clear_uid;
+ /// Checked when Credstore calls IKeystoreAuthorization to obtain auth tokens.
+ GetAuthToken = 0x400, selinux name: get_auth_token;
+ /// Checked when earlyBootEnded() is called.
+ EarlyBootEnded = 0x800, selinux name: early_boot_ended;
+ /// Checked when IKeystoreMaintenance::onDeviceOffBody is called.
+ ReportOffBody = 0x1000, selinux name: report_off_body;
}
);
@@ -454,9 +467,12 @@
for p in access_vec.into_iter() {
selinux::check_access(caller_ctx, &target_context, "keystore2_key", p.to_selinux())
- .context(concat!(
- "check_grant_permission: check_access failed. ",
- "The caller may have tried to grant a permission that they don't possess."
+ .context(format!(
+ concat!(
+ "check_grant_permission: check_access failed. ",
+ "The caller may have tried to grant a permission that they don't possess. {:?}"
+ ),
+ p
))?
}
Ok(())
@@ -573,6 +589,17 @@
KeyPerm::rebind(),
KeyPerm::update(),
KeyPerm::use_(),
+ KeyPerm::convert_storage_key_to_ephemeral(),
+ ];
+
+ const SYSTEM_SERVER_PERMISSIONS_NO_GRANT: KeyPermSet = key_perm_set![
+ KeyPerm::delete(),
+ KeyPerm::use_dev_id(),
+ // No KeyPerm::grant()
+ KeyPerm::get_info(),
+ KeyPerm::rebind(),
+ KeyPerm::update(),
+ KeyPerm::use_(),
];
const NOT_GRANT_PERMS: KeyPermSet = key_perm_set![
@@ -586,6 +613,7 @@
KeyPerm::rebind(),
KeyPerm::update(),
KeyPerm::use_(),
+ KeyPerm::convert_storage_key_to_ephemeral(),
];
const UNPRIV_PERMS: KeyPermSet = key_perm_set![
@@ -643,18 +671,25 @@
assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::add_auth()).is_ok());
assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::clear_ns()).is_ok());
assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::get_state()).is_ok());
- assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::list()).is_ok());
assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::lock()).is_ok());
assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::reset()).is_ok());
assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::unlock()).is_ok());
+ assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::change_user()).is_ok());
+ assert!(
+ check_keystore_permission(&system_server_ctx, KeystorePerm::change_password()).is_ok()
+ );
+ assert!(check_keystore_permission(&system_server_ctx, KeystorePerm::clear_uid()).is_ok());
let shell_ctx = Context::new("u:r:shell:s0")?;
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::add_auth()));
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::clear_ns()));
- assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::get_state()));
+ assert!(check_keystore_permission(&shell_ctx, KeystorePerm::get_state()).is_ok());
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::list()));
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::lock()));
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::reset()));
assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::unlock()));
+ assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::change_user()));
+ assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::change_password()));
+ assert_perm_failed!(check_keystore_permission(&shell_ctx, KeystorePerm::clear_uid()));
Ok(())
}
@@ -663,9 +698,10 @@
let system_server_ctx = Context::new("u:r:system_server:s0")?;
let shell_ctx = Context::new("u:r:shell:s0")?;
let key = KeyDescriptor { domain: Domain::APP, nspace: 0, alias: None, blob: None };
- assert!(check_grant_permission(&system_server_ctx, NOT_GRANT_PERMS, &key).is_ok());
- // attempts to grant the grant permission must always fail even when privileged.
+ check_grant_permission(&system_server_ctx, SYSTEM_SERVER_PERMISSIONS_NO_GRANT, &key)
+ .expect("Grant permission check failed.");
+ // attempts to grant the grant permission must always fail even when privileged.
assert_perm_failed!(check_grant_permission(
&system_server_ctx,
KeyPerm::grant().into(),
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
new file mode 100644
index 0000000..1c757c9
--- /dev/null
+++ b/keystore2/src/remote_provisioning.rs
@@ -0,0 +1,449 @@
+// Copyright 2020, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This is the implementation for the remote provisioning AIDL interface between
+//! the network providers for remote provisioning and the system. This interface
+//! allows the caller to prompt the Remote Provisioning HAL to generate keys and
+//! CBOR blobs that can be ferried to a provisioning server that will return
+//! certificate chains signed by some root authority and stored in a keystore SQLite
+//! DB.
+
+use std::collections::HashMap;
+
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ Algorithm::Algorithm, AttestationKey::AttestationKey, Certificate::Certificate,
+ DeviceInfo::DeviceInfo, IRemotelyProvisionedComponent::IRemotelyProvisionedComponent,
+ KeyParameter::KeyParameter, KeyParameterValue::KeyParameterValue,
+ MacedPublicKey::MacedPublicKey, ProtectedData::ProtectedData, SecurityLevel::SecurityLevel,
+ Tag::Tag,
+};
+use android_security_remoteprovisioning::aidl::android::security::remoteprovisioning::{
+ AttestationPoolStatus::AttestationPoolStatus, IRemoteProvisioning::BnRemoteProvisioning,
+ IRemoteProvisioning::IRemoteProvisioning,
+};
+use android_security_remoteprovisioning::binder::Strong;
+use android_system_keystore2::aidl::android::system::keystore2::{
+ Domain::Domain, KeyDescriptor::KeyDescriptor,
+};
+use anyhow::{Context, Result};
+use keystore2_crypto::parse_subject_from_certificate;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+use crate::database::{CertificateChain, KeystoreDB, Uuid};
+use crate::error::{self, map_or_log_err, map_rem_prov_error, Error};
+use crate::globals::{get_keymint_device, get_remotely_provisioned_component, DB};
+use crate::utils::Asp;
+
+/// Contains helper functions to check if remote provisioning is enabled on the system and, if so,
+/// to assign and retrieve attestation keys and certificate chains.
+#[derive(Default)]
+pub struct RemProvState {
+ security_level: SecurityLevel,
+ km_uuid: Uuid,
+ is_hal_present: AtomicBool,
+}
+
+impl RemProvState {
+ /// Creates a RemProvState struct.
+ pub fn new(security_level: SecurityLevel, km_uuid: Uuid) -> Self {
+ Self { security_level, km_uuid, is_hal_present: AtomicBool::new(true) }
+ }
+
+ /// Checks if remote provisioning is enabled and partially caches the result. On a hybrid system
+ /// remote provisioning can flip from being disabled to enabled depending on responses from the
+ /// server, so unfortunately caching the presence or absence of the HAL is not enough to fully
+ /// make decisions about the state of remote provisioning during runtime.
+ fn check_rem_prov_enabled(&self, db: &mut KeystoreDB) -> Result<bool> {
+ if !self.is_hal_present.load(Ordering::Relaxed)
+ || get_remotely_provisioned_component(&self.security_level).is_err()
+ {
+ self.is_hal_present.store(false, Ordering::Relaxed);
+ return Ok(false);
+ }
+ // To check if remote provisioning is enabled on a system that supports both remote
+ // provisioning and factory provisioned keys, we only need to check if there are any
+ // keys at all generated to indicate if the app has gotten the signal to begin filling
+ // the key pool from the server.
+ let pool_status = db
+ .get_attestation_pool_status(0 /* date */, &self.km_uuid)
+ .context("In check_rem_prov_enabled: failed to get attestation pool status.")?;
+ Ok(pool_status.total != 0)
+ }
+
+ /// Fetches a remote provisioning attestation key and certificate chain inside of the
+ /// returned `CertificateChain` struct if one exists for the given caller_uid. If one has not
+ /// been assigned, this function will assign it. If there are no signed attestation keys
+ /// available to be assigned, it will return the ResponseCode `OUT_OF_KEYS`
+ fn get_rem_prov_attest_key(
+ &self,
+ key: &KeyDescriptor,
+ caller_uid: u32,
+ db: &mut KeystoreDB,
+ ) -> Result<Option<CertificateChain>> {
+ match key.domain {
+ Domain::APP => {
+ // Attempt to get an Attestation Key once. If it fails, then the app doesn't
+ // have a valid chain assigned to it. The helper function will return None after
+ // attempting to assign a key. An error will be thrown if the pool is simply out
+ // of usable keys. Then another attempt to fetch the just-assigned key will be
+ // made. If this fails too, something is very wrong.
+ self.get_rem_prov_attest_key_helper(key, caller_uid, db)
+ .context("In get_rem_prov_attest_key: Failed to get a key")?
+ .map_or_else(
+ || self.get_rem_prov_attest_key_helper(key, caller_uid, db),
+ |v| Ok(Some(v)),
+ )
+ .context(concat!(
+ "In get_rem_prov_attest_key: Failed to get a key after",
+ "attempting to assign one."
+ ))?
+ .map_or_else(
+ || {
+ Err(Error::sys()).context(concat!(
+ "In get_rem_prov_attest_key: Attempted to assign a ",
+ "key and failed silently. Something is very wrong."
+ ))
+ },
+ |cert_chain| Ok(Some(cert_chain)),
+ )
+ }
+ _ => Ok(None),
+ }
+ }
+
+ /// Returns None if an AttestationKey fails to be assigned. Errors if no keys are available.
+ fn get_rem_prov_attest_key_helper(
+ &self,
+ key: &KeyDescriptor,
+ caller_uid: u32,
+ db: &mut KeystoreDB,
+ ) -> Result<Option<CertificateChain>> {
+ let cert_chain = db
+ .retrieve_attestation_key_and_cert_chain(key.domain, caller_uid as i64, &self.km_uuid)
+ .context("In get_rem_prov_attest_key_helper: Failed to retrieve a key + cert chain")?;
+ match cert_chain {
+ Some(cert_chain) => Ok(Some(cert_chain)),
+ // Either this app needs to be assigned a key, or the pool is empty. An error will
+ // be thrown if there is no key available to assign. This will indicate that the app
+ // should be nudged to provision more keys so keystore can retry.
+ None => {
+ db.assign_attestation_key(key.domain, caller_uid as i64, &self.km_uuid)
+ .context("In get_rem_prov_attest_key_helper: Failed to assign a key")?;
+ Ok(None)
+ }
+ }
+ }
+
+ fn is_asymmetric_key(&self, params: &[KeyParameter]) -> bool {
+ params.iter().any(|kp| {
+ matches!(
+ kp,
+ KeyParameter {
+ tag: Tag::ALGORITHM,
+ value: KeyParameterValue::Algorithm(Algorithm::RSA)
+ } | KeyParameter {
+ tag: Tag::ALGORITHM,
+ value: KeyParameterValue::Algorithm(Algorithm::EC)
+ }
+ )
+ })
+ }
+
+ /// Checks to see (1) if the key in question should be attested to based on the algorithm and
+ /// (2) if remote provisioning is present and enabled on the system. If these conditions are
+ /// met, it makes an attempt to fetch the attestation key assigned to the `caller_uid`.
+ ///
+ /// It returns the ResponseCode `OUT_OF_KEYS` if there is not one key currently assigned to the
+ /// `caller_uid` and there are none available to assign.
+ pub fn get_remotely_provisioned_attestation_key_and_certs(
+ &self,
+ key: &KeyDescriptor,
+ caller_uid: u32,
+ params: &[KeyParameter],
+ db: &mut KeystoreDB,
+ ) -> Result<Option<(AttestationKey, Certificate)>> {
+ if !self.is_asymmetric_key(params) || !self.check_rem_prov_enabled(db)? {
+ // There is no remote provisioning component for this security level on the
+ // device. Return None so the underlying KM instance knows to use its
+ // factory provisioned key instead. Alternatively, it's not an asymmetric key
+ // and therefore will not be attested.
+ Ok(None)
+ } else {
+ match self.get_rem_prov_attest_key(&key, caller_uid, db).context(concat!(
+ "In get_remote_provisioning_key_and_certs: Failed to get ",
+ "attestation key"
+ ))? {
+ Some(cert_chain) => Ok(Some((
+ AttestationKey {
+ keyBlob: cert_chain.private_key.to_vec(),
+ attestKeyParams: vec![],
+ issuerSubjectName: parse_subject_from_certificate(&cert_chain.batch_cert)
+ .context(concat!(
+ "In get_remote_provisioning_key_and_certs: Failed to ",
+ "parse subject."
+ ))?,
+ },
+ Certificate { encodedCertificate: cert_chain.cert_chain },
+ ))),
+ None => Ok(None),
+ }
+ }
+ }
+}
+/// Implementation of the IRemoteProvisioning service.
+#[derive(Default)]
+pub struct RemoteProvisioningService {
+ device_by_sec_level: HashMap<SecurityLevel, Asp>,
+}
+
+impl RemoteProvisioningService {
+ fn get_dev_by_sec_level(
+ &self,
+ sec_level: &SecurityLevel,
+ ) -> Result<Strong<dyn IRemotelyProvisionedComponent>> {
+ if let Some(dev) = self.device_by_sec_level.get(sec_level) {
+ dev.get_interface().context("In get_dev_by_sec_level.")
+ } else {
+ Err(error::Error::sys()).context(concat!(
+ "In get_dev_by_sec_level: Remote instance for requested security level",
+ " not found."
+ ))
+ }
+ }
+
+ /// Creates a new instance of the remote provisioning service
+ pub fn new_native_binder() -> Result<Strong<dyn IRemoteProvisioning>> {
+ let mut result: Self = Default::default();
+ let dev = get_remotely_provisioned_component(&SecurityLevel::TRUSTED_ENVIRONMENT)
+ .context("In new_native_binder: Failed to get TEE Remote Provisioner instance.")?;
+ result.device_by_sec_level.insert(SecurityLevel::TRUSTED_ENVIRONMENT, dev);
+ if let Ok(dev) = get_remotely_provisioned_component(&SecurityLevel::STRONGBOX) {
+ result.device_by_sec_level.insert(SecurityLevel::STRONGBOX, dev);
+ }
+ Ok(BnRemoteProvisioning::new_binder(result))
+ }
+
+ /// Populates the AttestationPoolStatus parcelable with information about how many
+ /// certs will be expiring by the date provided in `expired_by` along with how many
+ /// keys have not yet been assigned.
+ pub fn get_pool_status(
+ &self,
+ expired_by: i64,
+ sec_level: SecurityLevel,
+ ) -> Result<AttestationPoolStatus> {
+ let (_, _, uuid) = get_keymint_device(&sec_level)?;
+ DB.with::<_, Result<AttestationPoolStatus>>(|db| {
+ let mut db = db.borrow_mut();
+ // delete_expired_attestation_keys is always safe to call, and will remove anything
+ // older than the date at the time of calling. No work should be done on the
+ // attestation keys unless the pool status is checked first, so this call should be
+ // enough to routinely clean out expired keys.
+ db.delete_expired_attestation_keys()?;
+ Ok(db.get_attestation_pool_status(expired_by, &uuid)?)
+ })
+ }
+
+ /// Generates a CBOR blob which will be assembled by the calling code into a larger
+ /// CBOR blob intended for delivery to a provisioning serever. This blob will contain
+ /// `num_csr` certificate signing requests for attestation keys generated in the TEE,
+ /// along with a server provided `eek` and `challenge`. The endpoint encryption key will
+ /// be used to encrypt the sensitive contents being transmitted to the server, and the
+ /// challenge will ensure freshness. A `test_mode` flag will instruct the remote provisioning
+ /// HAL if it is okay to accept EEKs that aren't signed by something that chains back to the
+ /// baked in root of trust in the underlying IRemotelyProvisionedComponent instance.
+ #[allow(clippy::too_many_arguments)]
+ pub fn generate_csr(
+ &self,
+ test_mode: bool,
+ num_csr: i32,
+ eek: &[u8],
+ challenge: &[u8],
+ sec_level: SecurityLevel,
+ protected_data: &mut ProtectedData,
+ device_info: &mut DeviceInfo,
+ ) -> Result<Vec<u8>> {
+ let dev = self.get_dev_by_sec_level(&sec_level)?;
+ let (_, _, uuid) = get_keymint_device(&sec_level)?;
+ let keys_to_sign = DB.with::<_, Result<Vec<MacedPublicKey>>>(|db| {
+ let mut db = db.borrow_mut();
+ Ok(db
+ .fetch_unsigned_attestation_keys(num_csr, &uuid)?
+ .iter()
+ .map(|key| MacedPublicKey { macedKey: key.to_vec() })
+ .collect())
+ })?;
+ let mut mac = map_rem_prov_error(dev.generateCertificateRequest(
+ test_mode,
+ &keys_to_sign,
+ eek,
+ challenge,
+ device_info,
+ protected_data,
+ ))
+ .context("In generate_csr: Failed to generate csr")?;
+ let mut cose_mac_0 = Vec::<u8>::new();
+ // TODO(b/180392379): Replace this manual CBOR generation with the cbor-serde crate as well.
+ // This generates an array consisting of the mac and the public key Maps.
+ // Just generate the actual MacedPublicKeys structure when the crate is
+ // available.
+ cose_mac_0.push((0b100_00000 | (keys_to_sign.len() + 1)) as u8);
+ cose_mac_0.push(0b010_11000); //push mac
+ cose_mac_0.push(mac.len() as u8);
+ cose_mac_0.append(&mut mac);
+ for maced_public_key in keys_to_sign {
+ if maced_public_key.macedKey.len() > 83 + 8 {
+ cose_mac_0.extend_from_slice(&maced_public_key.macedKey[8..83 + 8]);
+ }
+ }
+ Ok(cose_mac_0)
+ }
+
+ /// Provisions a certificate chain for a key whose CSR was included in generate_csr. The
+ /// `public_key` is used to index into the SQL database in order to insert the `certs` blob
+ /// which represents a PEM encoded X.509 certificate chain. The `expiration_date` is provided
+ /// as a convenience from the caller to avoid having to parse the certificates semantically
+ /// here.
+ pub fn provision_cert_chain(
+ &self,
+ public_key: &[u8],
+ batch_cert: &[u8],
+ certs: &[u8],
+ expiration_date: i64,
+ sec_level: SecurityLevel,
+ ) -> Result<()> {
+ DB.with::<_, Result<()>>(|db| {
+ let mut db = db.borrow_mut();
+ let (_, _, uuid) = get_keymint_device(&sec_level)?;
+ Ok(db.store_signed_attestation_certificate_chain(
+ public_key,
+ batch_cert,
+ certs, /* DER encoded certificate chain */
+ expiration_date,
+ &uuid,
+ )?)
+ })
+ }
+
+ /// Submits a request to the Remote Provisioner HAL to generate a signing key pair.
+ /// `is_test_mode` indicates whether or not the returned public key should be marked as being
+ /// for testing in order to differentiate them from private keys. If the call is successful,
+ /// the key pair is then added to the database.
+ pub fn generate_key_pair(&self, is_test_mode: bool, sec_level: SecurityLevel) -> Result<()> {
+ let (_, _, uuid) = get_keymint_device(&sec_level)?;
+ let dev = self.get_dev_by_sec_level(&sec_level)?;
+ let mut maced_key = MacedPublicKey { macedKey: Vec::new() };
+ let priv_key =
+ map_rem_prov_error(dev.generateEcdsaP256KeyPair(is_test_mode, &mut maced_key))
+ .context("In generate_key_pair: Failed to generated ECDSA keypair.")?;
+ // TODO(b/180392379): This is a brittle hack that relies on the consistent formatting of
+ // the returned CBOR blob in order to extract the public key.
+ let data = &maced_key.macedKey;
+ if data.len() < 85 {
+ return Err(error::Error::sys()).context(concat!(
+ "In generate_key_pair: CBOR blob returned from",
+ "RemotelyProvisionedComponent is definitely malformatted or empty."
+ ));
+ }
+ let mut raw_key: Vec<u8> = vec![0; 64];
+ raw_key[0..32].clone_from_slice(&data[18..18 + 32]);
+ raw_key[32..64].clone_from_slice(&data[53..53 + 32]);
+ DB.with::<_, Result<()>>(|db| {
+ let mut db = db.borrow_mut();
+ Ok(db.create_attestation_key_entry(&maced_key.macedKey, &raw_key, &priv_key, &uuid)?)
+ })
+ }
+
+ /// Checks the security level of each available IRemotelyProvisionedComponent hal and returns
+ /// all levels in an array to the caller.
+ pub fn get_security_levels(&self) -> Result<Vec<SecurityLevel>> {
+ Ok(self.device_by_sec_level.keys().cloned().collect())
+ }
+
+ /// Deletes all attestation keys generated by the IRemotelyProvisionedComponent from the device,
+ /// regardless of what state of the attestation key lifecycle they were in.
+ pub fn delete_all_keys(&self) -> Result<i64> {
+ DB.with::<_, Result<i64>>(|db| {
+ let mut db = db.borrow_mut();
+ Ok(db.delete_all_attestation_keys()?)
+ })
+ }
+}
+
+impl binder::Interface for RemoteProvisioningService {}
+
+// Implementation of IRemoteProvisioning. See AIDL spec at
+// :aidl/android/security/remoteprovisioning/IRemoteProvisioning.aidl
+impl IRemoteProvisioning for RemoteProvisioningService {
+ fn getPoolStatus(
+ &self,
+ expired_by: i64,
+ sec_level: SecurityLevel,
+ ) -> binder::public_api::Result<AttestationPoolStatus> {
+ map_or_log_err(self.get_pool_status(expired_by, sec_level), Ok)
+ }
+
+ fn generateCsr(
+ &self,
+ test_mode: bool,
+ num_csr: i32,
+ eek: &[u8],
+ challenge: &[u8],
+ sec_level: SecurityLevel,
+ protected_data: &mut ProtectedData,
+ device_info: &mut DeviceInfo,
+ ) -> binder::public_api::Result<Vec<u8>> {
+ map_or_log_err(
+ self.generate_csr(
+ test_mode,
+ num_csr,
+ eek,
+ challenge,
+ sec_level,
+ protected_data,
+ device_info,
+ ),
+ Ok,
+ )
+ }
+
+ fn provisionCertChain(
+ &self,
+ public_key: &[u8],
+ batch_cert: &[u8],
+ certs: &[u8],
+ expiration_date: i64,
+ sec_level: SecurityLevel,
+ ) -> binder::public_api::Result<()> {
+ map_or_log_err(
+ self.provision_cert_chain(public_key, batch_cert, certs, expiration_date, sec_level),
+ Ok,
+ )
+ }
+
+ fn generateKeyPair(
+ &self,
+ is_test_mode: bool,
+ sec_level: SecurityLevel,
+ ) -> binder::public_api::Result<()> {
+ map_or_log_err(self.generate_key_pair(is_test_mode, sec_level), Ok)
+ }
+
+ fn getSecurityLevels(&self) -> binder::public_api::Result<Vec<SecurityLevel>> {
+ map_or_log_err(self.get_security_levels(), Ok)
+ }
+
+ fn deleteAllKeys(&self) -> binder::public_api::Result<i64> {
+ map_or_log_err(self.delete_all_keys(), Ok)
+ }
+}
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index 12b75bf..ec6c4d7 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -12,16 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#![allow(unused_variables)]
-
//! This crate implements the IKeystoreSecurityLevel interface.
-use crate::gc::Gc;
+use crate::globals::get_keymint_device;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- Algorithm::Algorithm, HardwareAuthenticatorType::HardwareAuthenticatorType,
- IKeyMintDevice::IKeyMintDevice, KeyCreationResult::KeyCreationResult, KeyFormat::KeyFormat,
- KeyParameter::KeyParameter, KeyParameterValue::KeyParameterValue, SecurityLevel::SecurityLevel,
- Tag::Tag,
+ Algorithm::Algorithm, AttestationKey::AttestationKey,
+ HardwareAuthenticatorType::HardwareAuthenticatorType, IKeyMintDevice::IKeyMintDevice,
+ KeyCreationResult::KeyCreationResult, KeyFormat::KeyFormat,
+ KeyMintHardwareInfo::KeyMintHardwareInfo, KeyParameter::KeyParameter,
+ KeyParameterValue::KeyParameterValue, SecurityLevel::SecurityLevel, Tag::Tag,
};
use android_system_keystore2::aidl::android::system::keystore2::{
AuthenticatorSpec::AuthenticatorSpec, CreateOperationResponse::CreateOperationResponse,
@@ -31,53 +30,72 @@
KeyMetadata::KeyMetadata, KeyParameters::KeyParameters,
};
-use crate::globals::ENFORCEMENTS;
+use crate::attestation_key_utils::{get_attest_key_info, AttestationKeyInfo};
+use crate::database::{CertificateInfo, KeyIdGuard};
+use crate::globals::{DB, ENFORCEMENTS, LEGACY_MIGRATOR, SUPER_KEY};
use crate::key_parameter::KeyParameter as KsKeyParam;
use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
-use crate::utils::{check_key_permission, uid_to_android_user, Asp};
-use crate::{database::KeyIdGuard, globals::DB};
-use crate::{
- database::{DateTime, KeyMetaData, KeyMetaEntry, KeyType},
- permission::KeyPerm,
+use crate::metrics::log_key_creation_event_stats;
+use crate::remote_provisioning::RemProvState;
+use crate::super_key::{KeyBlob, SuperKeyManager};
+use crate::utils::{
+ check_device_attestation_permissions, check_key_permission, is_device_id_attestation_tag,
+ uid_to_android_user, Asp,
};
use crate::{
- database::{KeyEntry, KeyEntryLoadBits, SubComponentType},
+ database::{
+ BlobMetaData, BlobMetaEntry, DateTime, KeyEntry, KeyEntryLoadBits, KeyMetaData,
+ KeyMetaEntry, KeyType, SubComponentType, Uuid,
+ },
operation::KeystoreOperation,
+ operation::LoggingInfo,
operation::OperationDb,
+ permission::KeyPerm,
};
use crate::{
error::{self, map_km_error, map_or_log_err, Error, ErrorCode},
utils::key_characteristics_to_internal,
};
use anyhow::{anyhow, Context, Result};
-use binder::{IBinder, Interface, ThreadState};
+use binder::{IBinderInternal, Strong, ThreadState};
/// Implementation of the IKeystoreSecurityLevel Interface.
pub struct KeystoreSecurityLevel {
security_level: SecurityLevel,
keymint: Asp,
+ hw_info: KeyMintHardwareInfo,
+ km_uuid: Uuid,
operation_db: OperationDb,
+ rem_prov_state: RemProvState,
}
// Blob of 32 zeroes used as empty masking key.
static ZERO_BLOB_32: &[u8] = &[0; 32];
+// Per RFC 5280 4.1.2.5, an undefined expiration (not-after) field should be set to GeneralizedTime
+// 999912312359559, which is 253402300799000 ms from Jan 1, 1970.
+const UNDEFINED_NOT_AFTER: i64 = 253402300799000i64;
+
impl KeystoreSecurityLevel {
/// Creates a new security level instance wrapped in a
/// BnKeystoreSecurityLevel proxy object. It also
- /// calls `IBinder::set_requesting_sid` on the new interface, because
+ /// calls `IBinderInternal::set_requesting_sid` on the new interface, because
/// we need it for checking keystore permissions.
pub fn new_native_binder(
security_level: SecurityLevel,
- ) -> Result<impl IKeystoreSecurityLevel + Send> {
+ ) -> Result<(Strong<dyn IKeystoreSecurityLevel>, Uuid)> {
+ let (dev, hw_info, km_uuid) = get_keymint_device(&security_level)
+ .context("In KeystoreSecurityLevel::new_native_binder.")?;
let result = BnKeystoreSecurityLevel::new_binder(Self {
security_level,
- keymint: crate::globals::get_keymint_device(security_level)
- .context("In KeystoreSecurityLevel::new_native_binder.")?,
+ keymint: dev,
+ hw_info,
+ km_uuid,
operation_db: OperationDb::new(),
+ rem_prov_state: RemProvState::new(security_level, km_uuid),
});
result.as_binder().set_requesting_sid(true);
- Ok(result)
+ Ok((result, km_uuid))
}
fn store_new_key(
@@ -85,6 +103,7 @@
key: KeyDescriptor,
creation_result: KeyCreationResult,
user_id: u32,
+ flags: Option<i32>,
) -> Result<KeyMetadata> {
let KeyCreationResult {
keyBlob: key_blob,
@@ -92,7 +111,7 @@
certificateChain: mut certificate_chain,
} = creation_result;
- let (cert, cert_chain): (Option<Vec<u8>>, Option<Vec<u8>>) = (
+ let mut cert_info: CertificateInfo = CertificateInfo::new(
match certificate_chain.len() {
0 => None,
_ => Some(certificate_chain.remove(0).encodedCertificate),
@@ -120,28 +139,41 @@
let creation_date = DateTime::now().context("Trying to make creation time.")?;
let key = match key.domain {
- Domain::BLOB => {
- KeyDescriptor { domain: Domain::BLOB, blob: Some(key_blob), ..Default::default() }
- }
+ Domain::BLOB => KeyDescriptor {
+ domain: Domain::BLOB,
+ blob: Some(key_blob.to_vec()),
+ ..Default::default()
+ },
_ => DB
.with::<_, Result<KeyDescriptor>>(|db| {
- let mut metadata = KeyMetaData::new();
- metadata.add(KeyMetaEntry::CreationDate(creation_date));
-
let mut db = db.borrow_mut();
- let (need_gc, key_id) = db
- .store_new_key(
- key,
+
+ let (key_blob, mut blob_metadata) = SUPER_KEY
+ .handle_super_encryption_on_key_init(
+ &mut db,
+ &LEGACY_MIGRATOR,
+ &(key.domain),
&key_parameters,
+ flags,
+ user_id,
&key_blob,
- cert.as_deref(),
- cert_chain.as_deref(),
- &metadata,
+ )
+ .context("In store_new_key. Failed to handle super encryption.")?;
+
+ let mut key_metadata = KeyMetaData::new();
+ key_metadata.add(KeyMetaEntry::CreationDate(creation_date));
+ blob_metadata.add(BlobMetaEntry::KmUuid(self.km_uuid));
+
+ let key_id = db
+ .store_new_key(
+ &key,
+ &key_parameters,
+ &(&key_blob, &blob_metadata),
+ &cert_info,
+ &key_metadata,
+ &self.km_uuid,
)
.context("In store_new_key.")?;
- if need_gc {
- Gc::notify_gc();
- }
Ok(KeyDescriptor {
domain: Domain::KEY_ID,
nspace: key_id.id(),
@@ -154,8 +186,8 @@
Ok(KeyMetadata {
key,
keySecurityLevel: self.security_level,
- certificate: cert,
- certificateChain: cert_chain,
+ certificate: cert_info.take_cert(),
+ certificateChain: cert_info.take_cert_chain(),
authorizations: crate::utils::key_parameters_to_authorizations(key_parameters),
modificationTimeMs: creation_date.to_millis_epoch(),
})
@@ -172,10 +204,15 @@
// so that we can use it by reference like the blob provided by the key descriptor.
// Otherwise, we would have to clone the blob from the key descriptor.
let scoping_blob: Vec<u8>;
- let (km_blob, key_properties, key_id_guard) = match key.domain {
+ let (km_blob, key_properties, key_id_guard, blob_metadata) = match key.domain {
Domain::BLOB => {
check_key_permission(KeyPerm::use_(), key, &None)
.context("In create_operation: checking use permission for Domain::BLOB.")?;
+ if forced {
+ check_key_permission(KeyPerm::req_forced_op(), key, &None).context(
+ "In create_operation: checking forced permission for Domain::BLOB.",
+ )?;
+ }
(
match &key.blob {
Some(blob) => blob,
@@ -188,33 +225,42 @@
},
None,
None,
+ BlobMetaData::new(),
)
}
_ => {
let (key_id_guard, mut key_entry) = DB
.with::<_, Result<(KeyIdGuard, KeyEntry)>>(|db| {
- db.borrow_mut().load_key_entry(
- key.clone(),
- KeyType::Client,
- KeyEntryLoadBits::KM,
- caller_uid,
- |k, av| check_key_permission(KeyPerm::use_(), k, &av),
- )
+ LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().load_key_entry(
+ &key,
+ KeyType::Client,
+ KeyEntryLoadBits::KM,
+ caller_uid,
+ |k, av| {
+ check_key_permission(KeyPerm::use_(), k, &av)?;
+ if forced {
+ check_key_permission(KeyPerm::req_forced_op(), k, &av)?;
+ }
+ Ok(())
+ },
+ )
+ })
})
.context("In create_operation: Failed to load key blob.")?;
- scoping_blob = match key_entry.take_km_blob() {
- Some(blob) => blob,
- None => {
- return Err(Error::sys()).context(concat!(
- "In create_operation: Successfully loaded key entry,",
- " but KM blob was missing."
- ))
- }
- };
+
+ let (blob, blob_metadata) =
+ key_entry.take_key_blob_info().ok_or_else(Error::sys).context(concat!(
+ "In create_operation: Successfully loaded key entry, ",
+ "but KM blob was missing."
+ ))?;
+ scoping_blob = blob;
+
(
&scoping_blob,
Some((key_id_guard.id(), key_entry.into_key_parameters())),
Some(key_id_guard),
+ blob_metadata,
)
}
};
@@ -234,16 +280,17 @@
purpose,
key_properties.as_ref(),
operation_parameters.as_ref(),
- // TODO b/178222844 Replace this with the configuration returned by
- // KeyMintDevice::getHardwareInfo.
- // For now we assume that strongbox implementations need secure timestamps.
- self.security_level == SecurityLevel::STRONGBOX,
+ self.hw_info.timestampTokenRequired,
)
.context("In create_operation.")?;
let immediate_hat = immediate_hat.unwrap_or_default();
- let km_dev: Box<dyn IKeyMintDevice> = self
+ let km_blob = SUPER_KEY
+ .unwrap_key_if_required(&blob_metadata, km_blob)
+ .context("In create_operation. Failed to handle super encryption.")?;
+
+ let km_dev: Strong<dyn IKeyMintDevice> = self
.keymint
.get_interface()
.context("In create_operation: Failed to get KeyMint device")?;
@@ -253,6 +300,7 @@
&*km_dev,
key_id_guard,
&km_blob,
+ &blob_metadata,
&operation_parameters,
|blob| loop {
match map_km_error(km_dev.begin(
@@ -262,7 +310,7 @@
&immediate_hat,
)) {
Err(Error::Km(ErrorCode::TOO_MANY_OPERATIONS)) => {
- self.operation_db.prune(caller_uid)?;
+ self.operation_db.prune(caller_uid, forced)?;
continue;
}
v => return v,
@@ -273,14 +321,17 @@
let operation_challenge = auth_info.finalize_create_authorization(begin_result.challenge);
+ let op_params: Vec<KeyParameter> = operation_parameters.to_vec();
+
let operation = match begin_result.operation {
Some(km_op) => {
- self.operation_db.create_operation(km_op, caller_uid, auth_info)
+ self.operation_db.create_operation(km_op, caller_uid, auth_info, forced,
+ LoggingInfo::new(purpose, op_params, upgraded_blob.is_some()))
},
None => return Err(Error::sys()).context("In create_operation: Begin operation returned successfully, but did not return a valid operation."),
};
- let op_binder: Box<dyn IKeystoreOperation> =
+ let op_binder: binder::public_api::Strong<dyn IKeystoreOperation> =
KeystoreOperation::new_native_binder(operation)
.as_binder()
.into_interface()
@@ -293,30 +344,76 @@
0 => None,
_ => Some(KeyParameters { keyParameter: begin_result.params }),
},
+ // An upgraded blob should only be returned if the caller has permission
+ // to use Domain::BLOB keys. If we got to this point, we already checked
+ // that the caller had that permission.
+ upgradedBlob: if key.domain == Domain::BLOB { upgraded_blob } else { None },
})
}
- fn add_attestation_parameters(uid: u32, params: &[KeyParameter]) -> Result<Vec<KeyParameter>> {
+ fn add_certificate_parameters(
+ uid: u32,
+ params: &[KeyParameter],
+ key: &KeyDescriptor,
+ ) -> Result<Vec<KeyParameter>> {
let mut result = params.to_vec();
+ // If there is an attestation challenge we need to get an application id.
if params.iter().any(|kp| kp.tag == Tag::ATTESTATION_CHALLENGE) {
let aaid = keystore2_aaid::get_aaid(uid).map_err(|e| {
- anyhow!(format!("In add_attestation_parameters: get_aaid returned status {}.", e))
+ anyhow!(format!("In add_certificate_parameters: get_aaid returned status {}.", e))
})?;
result.push(KeyParameter {
tag: Tag::ATTESTATION_APPLICATION_ID,
value: KeyParameterValue::Blob(aaid),
});
}
+
+ if params.iter().any(|kp| kp.tag == Tag::INCLUDE_UNIQUE_ID) {
+ check_key_permission(KeyPerm::gen_unique_id(), key, &None).context(concat!(
+ "In add_certificate_parameters: ",
+ "Caller does not have the permission for device unique attestation."
+ ))?;
+ }
+
+ // If the caller requests any device identifier attestation tag, check that they hold the
+ // correct Android permission.
+ if params.iter().any(|kp| is_device_id_attestation_tag(kp.tag)) {
+ check_device_attestation_permissions().context(concat!(
+ "In add_certificate_parameters: ",
+ "Caller does not have the permission to attest device identifiers."
+ ))?;
+ }
+
+ // If we are generating/importing an asymmetric key, we need to make sure
+ // that NOT_BEFORE and NOT_AFTER are present.
+ match params.iter().find(|kp| kp.tag == Tag::ALGORITHM) {
+ Some(KeyParameter { tag: _, value: KeyParameterValue::Algorithm(Algorithm::RSA) })
+ | Some(KeyParameter { tag: _, value: KeyParameterValue::Algorithm(Algorithm::EC) }) => {
+ if !params.iter().any(|kp| kp.tag == Tag::CERTIFICATE_NOT_BEFORE) {
+ result.push(KeyParameter {
+ tag: Tag::CERTIFICATE_NOT_BEFORE,
+ value: KeyParameterValue::DateTime(0),
+ })
+ }
+ if !params.iter().any(|kp| kp.tag == Tag::CERTIFICATE_NOT_AFTER) {
+ result.push(KeyParameter {
+ tag: Tag::CERTIFICATE_NOT_AFTER,
+ value: KeyParameterValue::DateTime(UNDEFINED_NOT_AFTER),
+ })
+ }
+ }
+ _ => {}
+ }
Ok(result)
}
fn generate_key(
&self,
key: &KeyDescriptor,
- attestation_key: Option<&KeyDescriptor>,
+ attest_key_descriptor: Option<&KeyDescriptor>,
params: &[KeyParameter],
flags: i32,
- entropy: &[u8],
+ _entropy: &[u8],
) -> Result<KeyMetadata> {
if key.domain != Domain::BLOB && key.alias.is_none() {
return Err(error::Error::Km(ErrorCode::INVALID_ARGUMENT))
@@ -335,25 +432,74 @@
};
// generate_key requires the rebind permission.
+ // Must return on error for security reasons.
check_key_permission(KeyPerm::rebind(), &key, &None).context("In generate_key.")?;
- let params = Self::add_attestation_parameters(caller_uid, params)
+ let attestation_key_info = match (key.domain, attest_key_descriptor) {
+ (Domain::BLOB, _) => None,
+ _ => DB
+ .with(|db| {
+ get_attest_key_info(
+ &key,
+ caller_uid,
+ attest_key_descriptor,
+ params,
+ &self.rem_prov_state,
+ &mut db.borrow_mut(),
+ )
+ })
+ .context("In generate_key: Trying to get an attestation key")?,
+ };
+ let params = Self::add_certificate_parameters(caller_uid, params, &key)
.context("In generate_key: Trying to get aaid.")?;
- let km_dev: Box<dyn IKeyMintDevice> = self.keymint.get_interface()?;
- map_km_error(km_dev.addRngEntropy(entropy))
- .context("In generate_key: Trying to add entropy.")?;
- let creation_result = map_km_error(km_dev.generateKey(¶ms))
- .context("In generate_key: While generating Key")?;
+ let km_dev: Strong<dyn IKeyMintDevice> = self.keymint.get_interface()?;
+
+ let creation_result = match attestation_key_info {
+ Some(AttestationKeyInfo::UserGenerated {
+ key_id_guard,
+ blob,
+ blob_metadata,
+ issuer_subject,
+ }) => self
+ .upgrade_keyblob_if_required_with(
+ &*km_dev,
+ Some(key_id_guard),
+ &KeyBlob::Ref(&blob),
+ &blob_metadata,
+ ¶ms,
+ |blob| {
+ let attest_key = Some(AttestationKey {
+ keyBlob: blob.to_vec(),
+ attestKeyParams: vec![],
+ issuerSubjectName: issuer_subject.clone(),
+ });
+ map_km_error(km_dev.generateKey(¶ms, attest_key.as_ref()))
+ },
+ )
+ .context("In generate_key: Using user generated attestation key.")
+ .map(|(result, _)| result),
+ Some(AttestationKeyInfo::RemoteProvisioned { attestation_key, attestation_certs }) => {
+ map_km_error(km_dev.generateKey(¶ms, Some(&attestation_key)))
+ .context("While generating Key with remote provisioned attestation key.")
+ .map(|mut creation_result| {
+ creation_result.certificateChain.push(attestation_certs);
+ creation_result
+ })
+ }
+ None => map_km_error(km_dev.generateKey(¶ms, None))
+ .context("While generating Key without explicit attestation key."),
+ }
+ .context("In generate_key.")?;
let user_id = uid_to_android_user(caller_uid);
- self.store_new_key(key, creation_result, user_id).context("In generate_key.")
+ self.store_new_key(key, creation_result, user_id, Some(flags)).context("In generate_key.")
}
fn import_key(
&self,
key: &KeyDescriptor,
- attestation_key: Option<&KeyDescriptor>,
+ _attestation_key: Option<&KeyDescriptor>,
params: &[KeyParameter],
flags: i32,
key_data: &[u8],
@@ -377,7 +523,7 @@
// import_key requires the rebind permission.
check_key_permission(KeyPerm::rebind(), &key, &None).context("In import_key.")?;
- let params = Self::add_attestation_parameters(caller_uid, params)
+ let params = Self::add_certificate_parameters(caller_uid, params, &key)
.context("In import_key: Trying to get aaid.")?;
let format = params
@@ -396,13 +542,14 @@
})
.context("In import_key.")?;
- let km_dev: Box<dyn IKeyMintDevice> =
+ let km_dev: Strong<dyn IKeyMintDevice> =
self.keymint.get_interface().context("In import_key: Trying to get the KM device")?;
- let creation_result = map_km_error(km_dev.importKey(¶ms, format, key_data))
- .context("In import_key: Trying to call importKey")?;
+ let creation_result =
+ map_km_error(km_dev.importKey(¶ms, format, key_data, None /* attestKey */))
+ .context("In import_key: Trying to call importKey")?;
let user_id = uid_to_android_user(caller_uid);
- self.store_new_key(key, creation_result, user_id).context("In import_key.")
+ self.store_new_key(key, creation_result, user_id, Some(flags)).context("In import_key.")
}
fn import_wrapped_key(
@@ -413,10 +560,21 @@
params: &[KeyParameter],
authenticators: &[AuthenticatorSpec],
) -> Result<KeyMetadata> {
- if !(key.domain == Domain::BLOB && key.alias.is_some()) {
- return Err(error::Error::Km(ErrorCode::INVALID_ARGUMENT))
- .context("In import_wrapped_key: Alias must be specified.");
- }
+ let wrapped_data: &[u8] = match key {
+ KeyDescriptor { domain: Domain::APP, blob: Some(ref blob), alias: Some(_), .. }
+ | KeyDescriptor {
+ domain: Domain::SELINUX, blob: Some(ref blob), alias: Some(_), ..
+ } => blob,
+ _ => {
+ return Err(error::Error::Km(ErrorCode::INVALID_ARGUMENT)).context(format!(
+ concat!(
+ "In import_wrapped_key: Alias and blob must be specified ",
+ "and domain must be APP or SELINUX. {:?}"
+ ),
+ key
+ ))
+ }
+ };
if wrapping_key.domain == Domain::BLOB {
return Err(error::Error::Km(ErrorCode::INVALID_ARGUMENT)).context(
@@ -424,16 +582,9 @@
);
}
- let wrapped_data = match &key.blob {
- Some(d) => d,
- None => {
- return Err(error::Error::Km(ErrorCode::INVALID_ARGUMENT)).context(
- "In import_wrapped_key: Blob must be specified and hold wrapped key data.",
- )
- }
- };
-
let caller_uid = ThreadState::get_calling_uid();
+ let user_id = uid_to_android_user(caller_uid);
+
let key = match key.domain {
Domain::APP => KeyDescriptor {
domain: key.domain,
@@ -441,32 +592,41 @@
alias: key.alias.clone(),
blob: None,
},
- _ => key.clone(),
+ Domain::SELINUX => KeyDescriptor {
+ domain: Domain::SELINUX,
+ nspace: key.nspace,
+ alias: key.alias.clone(),
+ blob: None,
+ },
+ _ => panic!("Unreachable."),
};
- // import_wrapped_key requires the rebind permission for the new key.
+ // Import_wrapped_key requires the rebind permission for the new key.
check_key_permission(KeyPerm::rebind(), &key, &None).context("In import_wrapped_key.")?;
- let (wrapping_key_id_guard, wrapping_key_entry) = DB
+ let (wrapping_key_id_guard, mut wrapping_key_entry) = DB
.with(|db| {
- db.borrow_mut().load_key_entry(
- wrapping_key.clone(),
- KeyType::Client,
- KeyEntryLoadBits::KM,
- caller_uid,
- |k, av| check_key_permission(KeyPerm::use_(), k, &av),
- )
+ LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().load_key_entry(
+ &wrapping_key,
+ KeyType::Client,
+ KeyEntryLoadBits::KM,
+ caller_uid,
+ |k, av| check_key_permission(KeyPerm::use_(), k, &av),
+ )
+ })
})
.context("Failed to load wrapping key.")?;
- let wrapping_key_blob = match wrapping_key_entry.km_blob() {
- Some(blob) => blob,
- None => {
- return Err(error::Error::sys()).context(concat!(
- "No km_blob after successfully loading key.",
- " This should never happen."
- ))
- }
- };
+
+ let (wrapping_key_blob, wrapping_blob_metadata) = wrapping_key_entry
+ .take_key_blob_info()
+ .ok_or_else(error::Error::sys)
+ .context("No km_blob after successfully loading key. This should never happen.")?;
+
+ let wrapping_key_blob =
+ SUPER_KEY.unwrap_key_if_required(&wrapping_blob_metadata, &wrapping_key_blob).context(
+ "In import_wrapped_key. Failed to handle super encryption for wrapping key.",
+ )?;
// km_dev.importWrappedKey does not return a certificate chain.
// TODO Do we assume that all wrapped keys are symmetric?
@@ -478,8 +638,7 @@
HardwareAuthenticatorType::PASSWORD => Some(a.authenticatorId),
_ => None,
})
- .ok_or(error::Error::Km(ErrorCode::INVALID_ARGUMENT))
- .context("A password authenticator SID must be specified.")?;
+ .unwrap_or(-1);
let fp_sid = authenticators
.iter()
@@ -487,22 +646,22 @@
HardwareAuthenticatorType::FINGERPRINT => Some(a.authenticatorId),
_ => None,
})
- .ok_or(error::Error::Km(ErrorCode::INVALID_ARGUMENT))
- .context("A fingerprint authenticator SID must be specified.")?;
+ .unwrap_or(-1);
let masking_key = masking_key.unwrap_or(ZERO_BLOB_32);
- let km_dev: Box<dyn IKeyMintDevice> = self.keymint.get_interface()?;
+ let km_dev: Strong<dyn IKeyMintDevice> = self.keymint.get_interface()?;
let (creation_result, _) = self
.upgrade_keyblob_if_required_with(
&*km_dev,
Some(wrapping_key_id_guard),
- wrapping_key_blob,
+ &wrapping_key_blob,
+ &wrapping_blob_metadata,
&[],
|wrapping_blob| {
let creation_result = map_km_error(km_dev.importWrappedKey(
wrapped_data,
- wrapping_key_blob,
+ wrapping_blob,
masking_key,
¶ms,
pw_sid,
@@ -513,39 +672,66 @@
)
.context("In import_wrapped_key.")?;
- let user_id = uid_to_android_user(caller_uid);
- self.store_new_key(key, creation_result, user_id)
+ self.store_new_key(key, creation_result, user_id, None)
.context("In import_wrapped_key: Trying to store the new key.")
}
+ fn store_upgraded_keyblob(
+ key_id_guard: KeyIdGuard,
+ km_uuid: Option<&Uuid>,
+ key_blob: &KeyBlob,
+ upgraded_blob: &[u8],
+ ) -> Result<()> {
+ let (upgraded_blob_to_be_stored, new_blob_metadata) =
+ SuperKeyManager::reencrypt_if_required(key_blob, &upgraded_blob)
+ .context("In store_upgraded_keyblob: Failed to handle super encryption.")?;
+
+ let mut new_blob_metadata = new_blob_metadata.unwrap_or_else(BlobMetaData::new);
+ if let Some(uuid) = km_uuid {
+ new_blob_metadata.add(BlobMetaEntry::KmUuid(*uuid));
+ }
+
+ DB.with(|db| {
+ let mut db = db.borrow_mut();
+ db.set_blob(
+ &key_id_guard,
+ SubComponentType::KEY_BLOB,
+ Some(&upgraded_blob_to_be_stored),
+ Some(&new_blob_metadata),
+ )
+ })
+ .context("In store_upgraded_keyblob: Failed to insert upgraded blob into the database.")
+ }
+
fn upgrade_keyblob_if_required_with<T, F>(
&self,
km_dev: &dyn IKeyMintDevice,
key_id_guard: Option<KeyIdGuard>,
- blob: &[u8],
+ key_blob: &KeyBlob,
+ blob_metadata: &BlobMetaData,
params: &[KeyParameter],
f: F,
) -> Result<(T, Option<Vec<u8>>)>
where
F: Fn(&[u8]) -> Result<T, Error>,
{
- match f(blob) {
+ match f(key_blob) {
Err(Error::Km(ErrorCode::KEY_REQUIRES_UPGRADE)) => {
- let upgraded_blob = map_km_error(km_dev.upgradeKey(blob, params))
+ let upgraded_blob = map_km_error(km_dev.upgradeKey(key_blob, params))
.context("In upgrade_keyblob_if_required_with: Upgrade failed.")?;
- key_id_guard.map_or(Ok(()), |key_id_guard| {
- DB.with(|db| {
- db.borrow_mut().insert_blob(
- &key_id_guard,
- SubComponentType::KEY_BLOB,
- &upgraded_blob,
- )
- })
- .context(concat!(
- "In upgrade_keyblob_if_required_with: ",
- "Failed to insert upgraded blob into the database.",
- ))
- })?;
+
+ if let Some(kid) = key_id_guard {
+ Self::store_upgraded_keyblob(
+ kid,
+ blob_metadata.km_uuid(),
+ key_blob,
+ &upgraded_blob,
+ )
+ .context(
+ "In upgrade_keyblob_if_required_with: store_upgraded_keyblob failed",
+ )?;
+ }
+
match f(&upgraded_blob) {
Ok(v) => Ok((v, Some(upgraded_blob))),
Err(e) => Err(e).context(concat!(
@@ -554,12 +740,76 @@
)),
}
}
- Err(e) => {
- Err(e).context("In upgrade_keyblob_if_required_with: Failed perform operation.")
+ result => {
+ if let Some(kid) = key_id_guard {
+ if key_blob.force_reencrypt() {
+ Self::store_upgraded_keyblob(
+ kid,
+ blob_metadata.km_uuid(),
+ key_blob,
+ key_blob,
+ )
+ .context(concat!(
+ "In upgrade_keyblob_if_required_with: ",
+ "store_upgraded_keyblob failed in forced reencrypt"
+ ))?;
+ }
+ }
+ result
+ .map(|v| (v, None))
+ .context("In upgrade_keyblob_if_required_with: Called closure failed.")
}
- Ok(v) => Ok((v, None)),
}
}
+
+ fn convert_storage_key_to_ephemeral(&self, storage_key: &KeyDescriptor) -> Result<Vec<u8>> {
+ if storage_key.domain != Domain::BLOB {
+ return Err(error::Error::Km(ErrorCode::INVALID_ARGUMENT)).context(concat!(
+ "In IKeystoreSecurityLevel convert_storage_key_to_ephemeral: ",
+ "Key must be of Domain::BLOB"
+ ));
+ }
+ let key_blob = storage_key
+ .blob
+ .as_ref()
+ .ok_or(error::Error::Km(ErrorCode::INVALID_ARGUMENT))
+ .context(
+ "In IKeystoreSecurityLevel convert_storage_key_to_ephemeral: No key blob specified",
+ )?;
+
+ // convert_storage_key_to_ephemeral requires the associated permission
+ check_key_permission(KeyPerm::convert_storage_key_to_ephemeral(), storage_key, &None)
+ .context("In convert_storage_key_to_ephemeral: Check permission")?;
+
+ let km_dev: Strong<dyn IKeyMintDevice> = self.keymint.get_interface().context(concat!(
+ "In IKeystoreSecurityLevel convert_storage_key_to_ephemeral: ",
+ "Getting keymint device interface"
+ ))?;
+ map_km_error(km_dev.convertStorageKeyToEphemeral(key_blob))
+ .context("In keymint device convertStorageKeyToEphemeral")
+ }
+
+ fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
+ if key.domain != Domain::BLOB {
+ return Err(error::Error::Km(ErrorCode::INVALID_ARGUMENT))
+ .context("In IKeystoreSecurityLevel delete_key: Key must be of Domain::BLOB");
+ }
+
+ let key_blob = key
+ .blob
+ .as_ref()
+ .ok_or(error::Error::Km(ErrorCode::INVALID_ARGUMENT))
+ .context("In IKeystoreSecurityLevel delete_key: No key blob specified")?;
+
+ check_key_permission(KeyPerm::delete(), key, &None)
+ .context("In IKeystoreSecurityLevel delete_key: Checking delete permissions")?;
+
+ let km_dev: Strong<dyn IKeyMintDevice> = self
+ .keymint
+ .get_interface()
+ .context("In IKeystoreSecurityLevel delete_key: Getting keymint device interface")?;
+ map_km_error(km_dev.deleteKey(&key_blob)).context("In keymint device deleteKey")
+ }
}
impl binder::Interface for KeystoreSecurityLevel {}
@@ -581,7 +831,9 @@
flags: i32,
entropy: &[u8],
) -> binder::public_api::Result<KeyMetadata> {
- map_or_log_err(self.generate_key(key, attestation_key, params, flags, entropy), Ok)
+ let result = self.generate_key(key, attestation_key, params, flags, entropy);
+ log_key_creation_event_stats(params, &result);
+ map_or_log_err(result, Ok)
}
fn importKey(
&self,
@@ -591,7 +843,9 @@
flags: i32,
key_data: &[u8],
) -> binder::public_api::Result<KeyMetadata> {
- map_or_log_err(self.import_key(key, attestation_key, params, flags, key_data), Ok)
+ let result = self.import_key(key, attestation_key, params, flags, key_data);
+ log_key_creation_event_stats(params, &result);
+ map_or_log_err(result, Ok)
}
fn importWrappedKey(
&self,
@@ -601,9 +855,18 @@
params: &[KeyParameter],
authenticators: &[AuthenticatorSpec],
) -> binder::public_api::Result<KeyMetadata> {
- map_or_log_err(
- self.import_wrapped_key(key, wrapping_key, masking_key, params, authenticators),
- Ok,
- )
+ let result =
+ self.import_wrapped_key(key, wrapping_key, masking_key, params, authenticators);
+ log_key_creation_event_stats(params, &result);
+ map_or_log_err(result, Ok)
+ }
+ fn convertStorageKeyToEphemeral(
+ &self,
+ storage_key: &KeyDescriptor,
+ ) -> binder::public_api::Result<Vec<u8>> {
+ map_or_log_err(self.convert_storage_key_to_ephemeral(storage_key), Ok)
+ }
+ fn deleteKey(&self, key: &KeyDescriptor) -> binder::public_api::Result<()> {
+ map_or_log_err(self.delete_key(key), Ok)
}
}
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index f8650e6..73bd526 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -12,14 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// TODO remove when fully implemented.
-#![allow(unused_variables)]
-
//! This crate implement the core Keystore 2.0 service API as defined by the Keystore 2.0
//! AIDL spec.
-use crate::globals::DB;
-use crate::permission;
+use std::collections::HashMap;
+
+use crate::error::{self, map_or_log_err, ErrorCode};
use crate::permission::{KeyPerm, KeystorePerm};
use crate::security_level::KeystoreSecurityLevel;
use crate::utils::{
@@ -27,112 +25,135 @@
key_parameters_to_authorizations, Asp,
};
use crate::{
+ database::Uuid,
+ globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_MIGRATOR},
+};
+use crate::{database::KEYSTORE_UUID, permission};
+use crate::{
database::{KeyEntryLoadBits, KeyType, SubComponentType},
error::ResponseCode,
};
-use crate::{
- error::{self, map_or_log_err, ErrorCode},
- gc::Gc,
-};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, IKeystoreSecurityLevel::IKeystoreSecurityLevel,
IKeystoreService::BnKeystoreService, IKeystoreService::IKeystoreService,
KeyDescriptor::KeyDescriptor, KeyEntryResponse::KeyEntryResponse, KeyMetadata::KeyMetadata,
};
-use anyhow::{anyhow, Context, Result};
-use binder::{IBinder, Interface, ThreadState};
+use anyhow::{Context, Result};
+use binder::{IBinderInternal, Strong, ThreadState};
use error::Error;
use keystore2_selinux as selinux;
/// Implementation of the IKeystoreService.
+#[derive(Default)]
pub struct KeystoreService {
- sec_level_tee: Asp,
- sec_level_strongbox: Option<Asp>,
+ i_sec_level_by_uuid: HashMap<Uuid, Asp>,
+ uuid_by_sec_level: HashMap<SecurityLevel, Uuid>,
}
impl KeystoreService {
/// Create a new instance of the Keystore 2.0 service.
- pub fn new_native_binder() -> Result<impl IKeystoreService> {
- let tee = KeystoreSecurityLevel::new_native_binder(SecurityLevel::TRUSTED_ENVIRONMENT)
- .map(|tee| Asp::new(tee.as_binder()))
- .context(concat!(
- "In KeystoreService::new_native_binder: ",
- "Trying to construct mendatory security level TEE."
- ))?;
- // Strongbox is optional, so we ignore errors and turn the result into an Option.
- let strongbox =
+ pub fn new_native_binder() -> Result<Strong<dyn IKeystoreService>> {
+ let mut result: Self = Default::default();
+ let (dev, uuid) =
KeystoreSecurityLevel::new_native_binder(SecurityLevel::TRUSTED_ENVIRONMENT)
- .map(|tee| Asp::new(tee.as_binder()))
- .ok();
+ .context(concat!(
+ "In KeystoreService::new_native_binder: ",
+ "Trying to construct mandatory security level TEE."
+ ))
+ .map(|(dev, uuid)| (Asp::new(dev.as_binder()), uuid))?;
+ result.i_sec_level_by_uuid.insert(uuid, dev);
+ result.uuid_by_sec_level.insert(SecurityLevel::TRUSTED_ENVIRONMENT, uuid);
- let result = BnKeystoreService::new_binder(Self {
- sec_level_tee: tee,
- sec_level_strongbox: strongbox,
- });
+ // Strongbox is optional, so we ignore errors and turn the result into an Option.
+ if let Ok((dev, uuid)) = KeystoreSecurityLevel::new_native_binder(SecurityLevel::STRONGBOX)
+ .map(|(dev, uuid)| (Asp::new(dev.as_binder()), uuid))
+ {
+ result.i_sec_level_by_uuid.insert(uuid, dev);
+ result.uuid_by_sec_level.insert(SecurityLevel::STRONGBOX, uuid);
+ }
+
+ let uuid_by_sec_level = result.uuid_by_sec_level.clone();
+ LEGACY_MIGRATOR
+ .set_init(move || {
+ (create_thread_local_db(), uuid_by_sec_level, LEGACY_BLOB_LOADER.clone())
+ })
+ .context(
+ "In KeystoreService::new_native_binder: Trying to initialize the legacy migrator.",
+ )?;
+
+ let result = BnKeystoreService::new_binder(result);
result.as_binder().set_requesting_sid(true);
Ok(result)
}
- fn get_security_level_internal(
- &self,
- security_level: SecurityLevel,
- ) -> Result<Option<Box<dyn IKeystoreSecurityLevel>>> {
- Ok(match (security_level, &self.sec_level_strongbox) {
- (SecurityLevel::TRUSTED_ENVIRONMENT, _) => Some(self.sec_level_tee.get_interface().context(
- "In get_security_level_internal: Failed to get IKeystoreSecurityLevel (TEE).",
- )?),
- (SecurityLevel::STRONGBOX, Some(strongbox)) => Some(strongbox.get_interface().context(
- "In get_security_level_internal: Failed to get IKeystoreSecurityLevel (Strongbox).",
- )?),
- _ => None,
- })
+ fn uuid_to_sec_level(&self, uuid: &Uuid) -> SecurityLevel {
+ self.uuid_by_sec_level
+ .iter()
+ .find(|(_, v)| **v == *uuid)
+ .map(|(s, _)| *s)
+ .unwrap_or(SecurityLevel::SOFTWARE)
+ }
+
+ fn get_i_sec_level_by_uuid(&self, uuid: &Uuid) -> Result<Strong<dyn IKeystoreSecurityLevel>> {
+ if let Some(dev) = self.i_sec_level_by_uuid.get(uuid) {
+ dev.get_interface().context("In get_i_sec_level_by_uuid.")
+ } else {
+ Err(error::Error::sys())
+ .context("In get_i_sec_level_by_uuid: KeyMint instance for key not found.")
+ }
}
fn get_security_level(
&self,
- security_level: SecurityLevel,
- ) -> Result<Box<dyn IKeystoreSecurityLevel>> {
- self.get_security_level_internal(security_level)
- .context("In get_security_level.")?
- .ok_or_else(|| anyhow!(error::Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE)))
+ sec_level: SecurityLevel,
+ ) -> Result<Strong<dyn IKeystoreSecurityLevel>> {
+ if let Some(dev) = self
+ .uuid_by_sec_level
+ .get(&sec_level)
+ .and_then(|uuid| self.i_sec_level_by_uuid.get(uuid))
+ {
+ dev.get_interface().context("In get_security_level.")
+ } else {
+ Err(error::Error::Km(ErrorCode::HARDWARE_TYPE_UNAVAILABLE))
+ .context("In get_security_level: No such security level.")
+ }
}
fn get_key_entry(&self, key: &KeyDescriptor) -> Result<KeyEntryResponse> {
+ let caller_uid = ThreadState::get_calling_uid();
let (key_id_guard, mut key_entry) = DB
.with(|db| {
- db.borrow_mut().load_key_entry(
- key.clone(),
- KeyType::Client,
- KeyEntryLoadBits::PUBLIC,
- ThreadState::get_calling_uid(),
- |k, av| check_key_permission(KeyPerm::get_info(), k, &av),
- )
+ LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().load_key_entry(
+ &key,
+ KeyType::Client,
+ KeyEntryLoadBits::PUBLIC,
+ caller_uid,
+ |k, av| check_key_permission(KeyPerm::get_info(), k, &av),
+ )
+ })
})
.context("In get_key_entry, while trying to load key info.")?;
- let i_sec_level = self
- .get_security_level_internal(key_entry.sec_level())
- .context("In get_key_entry.")?
- .ok_or_else(|| {
- anyhow!(error::Error::sys()).context(format!(
- concat!(
- "Found key with security level {:?} ",
- "but no KeyMint instance of that security level."
- ),
- key_entry.sec_level()
- ))
- })?;
+ let i_sec_level = if !key_entry.pure_cert() {
+ Some(
+ self.get_i_sec_level_by_uuid(key_entry.km_uuid())
+ .context("In get_key_entry: Trying to get security level proxy.")?,
+ )
+ } else {
+ None
+ };
Ok(KeyEntryResponse {
- iSecurityLevel: Some(i_sec_level),
+ iSecurityLevel: i_sec_level,
metadata: KeyMetadata {
key: KeyDescriptor {
domain: Domain::KEY_ID,
nspace: key_id_guard.id(),
..Default::default()
},
- keySecurityLevel: key_entry.sec_level(),
+ keySecurityLevel: self.uuid_to_sec_level(key_entry.km_uuid()),
certificate: key_entry.take_cert(),
certificateChain: key_entry.take_cert_chain(),
modificationTimeMs: key_entry
@@ -152,30 +173,66 @@
public_cert: Option<&[u8]>,
certificate_chain: Option<&[u8]>,
) -> Result<()> {
+ let caller_uid = ThreadState::get_calling_uid();
DB.with::<_, Result<()>>(|db| {
- let mut db = db.borrow_mut();
- let (key_id_guard, key_entry) = db
- .load_key_entry(
- key.clone(),
+ let entry = match LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().load_key_entry(
+ &key,
KeyType::Client,
KeyEntryLoadBits::NONE,
- ThreadState::get_calling_uid(),
+ caller_uid,
|k, av| {
check_key_permission(KeyPerm::update(), k, &av)
.context("In update_subcomponent.")
},
)
- .context("Failed to load key_entry.")?;
+ }) {
+ Err(e) => match e.root_cause().downcast_ref::<Error>() {
+ Some(Error::Rc(ResponseCode::KEY_NOT_FOUND)) => Ok(None),
+ _ => Err(e),
+ },
+ Ok(v) => Ok(Some(v)),
+ }
+ .context("Failed to load key entry.")?;
- if let Some(cert) = public_cert {
- db.insert_blob(&key_id_guard, SubComponentType::CERT, cert)
+ let mut db = db.borrow_mut();
+ if let Some((key_id_guard, _key_entry)) = entry {
+ db.set_blob(&key_id_guard, SubComponentType::CERT, public_cert, None)
.context("Failed to update cert subcomponent.")?;
+
+ db.set_blob(&key_id_guard, SubComponentType::CERT_CHAIN, certificate_chain, None)
+ .context("Failed to update cert chain subcomponent.")?;
+ return Ok(());
}
- if let Some(cert_chain) = certificate_chain {
- db.insert_blob(&key_id_guard, SubComponentType::CERT_CHAIN, cert_chain)
- .context("Failed to update cert chain subcomponent.")?;
+ // If we reach this point we have to check the special condition where a certificate
+ // entry may be made.
+ if !(public_cert.is_none() && certificate_chain.is_some()) {
+ return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("No key to update.");
}
+
+ // So we know that we have a certificate chain and no public cert.
+ // Now check that we have everything we need to make a new certificate entry.
+ let key = match (key.domain, &key.alias) {
+ (Domain::APP, Some(ref alias)) => KeyDescriptor {
+ domain: Domain::APP,
+ nspace: ThreadState::get_calling_uid() as i64,
+ alias: Some(alias.clone()),
+ blob: None,
+ },
+ (Domain::SELINUX, Some(_)) => key.clone(),
+ _ => {
+ return Err(Error::Rc(ResponseCode::INVALID_ARGUMENT))
+ .context("Domain must be APP or SELINUX to insert a certificate.")
+ }
+ };
+
+ // Security critical: This must return on failure. Do not remove the `?`;
+ check_key_permission(KeyPerm::rebind(), &key, &None)
+ .context("Caller does not have permission to insert this certificate.")?;
+
+ db.store_new_certificate(&key, certificate_chain.unwrap(), &KEYSTORE_UUID)
+ .context("Failed to insert new certificate.")?;
Ok(())
})
.context("In update_subcomponent.")
@@ -216,24 +273,34 @@
Ok(()) => {}
};
- DB.with(|db| {
- let mut db = db.borrow_mut();
- db.list(k.domain, k.nspace)
- })
+ let mut result = LEGACY_MIGRATOR
+ .list_uid(k.domain, k.nspace)
+ .context("In list_entries: Trying to list legacy keys.")?;
+
+ result.append(
+ &mut DB
+ .with(|db| {
+ let mut db = db.borrow_mut();
+ db.list(k.domain, k.nspace)
+ })
+ .context("In list_entries: Trying to list keystore database.")?,
+ );
+
+ result.sort_unstable();
+ result.dedup();
+ Ok(result)
}
fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
- let need_gc = DB
- .with(|db| {
- db.borrow_mut().unbind_key(key.clone(), KeyType::Client, caller_uid, |k, av| {
+ DB.with(|db| {
+ LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().unbind_key(&key, KeyType::Client, caller_uid, |k, av| {
check_key_permission(KeyPerm::delete(), k, &av).context("During delete_key.")
})
})
- .context("In delete_key: Trying to unbind the key.")?;
- if need_gc {
- Gc::notify_gc();
- }
+ })
+ .context("In delete_key: Trying to unbind the key.")?;
Ok(())
}
@@ -243,26 +310,26 @@
grantee_uid: i32,
access_vector: permission::KeyPermSet,
) -> Result<KeyDescriptor> {
+ let caller_uid = ThreadState::get_calling_uid();
DB.with(|db| {
- db.borrow_mut().grant(
- key.clone(),
- ThreadState::get_calling_uid(),
- grantee_uid as u32,
- access_vector,
- |k, av| check_grant_permission(*av, k).context("During grant."),
- )
+ LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ db.borrow_mut().grant(
+ &key,
+ caller_uid,
+ grantee_uid as u32,
+ access_vector,
+ |k, av| check_grant_permission(*av, k).context("During grant."),
+ )
+ })
})
.context("In KeystoreService::grant.")
}
fn ungrant(&self, key: &KeyDescriptor, grantee_uid: i32) -> Result<()> {
DB.with(|db| {
- db.borrow_mut().ungrant(
- key.clone(),
- ThreadState::get_calling_uid(),
- grantee_uid as u32,
- |k| check_key_permission(KeyPerm::grant(), k, &None),
- )
+ db.borrow_mut().ungrant(&key, ThreadState::get_calling_uid(), grantee_uid as u32, |k| {
+ check_key_permission(KeyPerm::grant(), k, &None)
+ })
})
.context("In KeystoreService::ungrant.")
}
@@ -276,8 +343,8 @@
fn getSecurityLevel(
&self,
security_level: SecurityLevel,
- ) -> binder::public_api::Result<Box<dyn IKeystoreSecurityLevel>> {
- map_or_log_err(self.get_security_level(SecurityLevel(security_level.0)), Ok)
+ ) -> binder::public_api::Result<Strong<dyn IKeystoreSecurityLevel>> {
+ map_or_log_err(self.get_security_level(security_level), Ok)
}
fn getKeyEntry(&self, key: &KeyDescriptor) -> binder::public_api::Result<KeyEntryResponse> {
map_or_log_err(self.get_key_entry(key), Ok)
diff --git a/keystore2/src/shared_secret_negotiation.rs b/keystore2/src/shared_secret_negotiation.rs
new file mode 100644
index 0000000..fb55f33
--- /dev/null
+++ b/keystore2/src/shared_secret_negotiation.rs
@@ -0,0 +1,265 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This module implements the shared secret negotiation.
+
+use crate::error::{map_binder_status, map_binder_status_code, Error};
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use android_hardware_security_keymint::binder::Strong;
+use android_hardware_security_sharedsecret::aidl::android::hardware::security::sharedsecret::{
+ ISharedSecret::ISharedSecret, SharedSecretParameters::SharedSecretParameters,
+};
+use android_security_compat::aidl::android::security::compat::IKeystoreCompatService::IKeystoreCompatService;
+use anyhow::{Context, Result};
+use keystore2_vintf::{get_aidl_instances, get_hidl_instances};
+use std::fmt::{self, Display, Formatter};
+
+/// This function initiates the shared secret negotiation. It starts a thread and then returns
+/// immediately. The thread consults the vintf manifest to enumerate expected negotiation
+/// participants. It then attempts to connect to all of these participants. If any connection
+/// fails the thread will retry once per second to connect to the failed instance(s) until all of
+/// the instances are connected. It then performs the negotiation.
+///
+/// During the first phase of the negotiation it will again try every second until
+/// all instances have responded successfully to account for instances that register early but
+/// are not fully functioning at this time due to hardware delays or boot order dependency issues.
+/// An error during the second phase or a checksum mismatch leads to a panic.
+pub fn perform_shared_secret_negotiation() {
+ std::thread::spawn(|| {
+ let participants = list_participants()
+ .expect("In perform_shared_secret_negotiation: Trying to list participants.");
+ let connected = connect_participants(participants);
+ negotiate_shared_secret(connected);
+ log::info!("Shared secret negotiation concluded successfully.");
+ });
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+enum SharedSecretParticipant {
+ /// Represents an instance of android.hardware.security.sharedsecret.ISharedSecret.
+ Aidl(String),
+ /// In the legacy case there can be at most one TEE and one Strongbox hal.
+ Hidl { is_strongbox: bool, version: (usize, usize) },
+}
+
+impl Display for SharedSecretParticipant {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ match self {
+ Self::Aidl(instance) => write!(
+ f,
+ "{}.{}/{}",
+ SHARED_SECRET_PACKAGE_NAME, SHARED_SECRET_INTERFACE_NAME, instance
+ ),
+ Self::Hidl { is_strongbox, version: (ma, mi) } => write!(
+ f,
+ "{}@V{}.{}::{}/{}",
+ KEYMASTER_PACKAGE_NAME,
+ ma,
+ mi,
+ KEYMASTER_INTERFACE_NAME,
+ if *is_strongbox { "strongbox" } else { "default" }
+ ),
+ }
+ }
+}
+
+#[derive(thiserror::Error, Debug)]
+enum SharedSecretError {
+ #[error("Shared parameter retrieval failed on instance {p} with error {e:?}.")]
+ ParameterRetrieval { e: Error, p: SharedSecretParticipant },
+ #[error("Shared secret computation failed on instance {p} with error {e:?}.")]
+ Computation { e: Error, p: SharedSecretParticipant },
+ #[error("Checksum comparison failed on instance {0}.")]
+ Checksum(SharedSecretParticipant),
+}
+
+fn filter_map_legacy_km_instances(
+ name: String,
+ version: (usize, usize),
+) -> Option<SharedSecretParticipant> {
+ match name.as_str() {
+ "default" => Some(SharedSecretParticipant::Hidl { is_strongbox: false, version }),
+ "strongbox" => Some(SharedSecretParticipant::Hidl { is_strongbox: true, version }),
+ _ => {
+ log::warn!("Found unexpected keymaster instance: \"{}\"", name);
+ log::warn!("Device is misconfigured. Allowed instances are:");
+ log::warn!(" * default");
+ log::warn!(" * strongbox");
+ None
+ }
+ }
+}
+
+static KEYMASTER_PACKAGE_NAME: &str = "android.hardware.keymaster";
+static KEYMASTER_INTERFACE_NAME: &str = "IKeymasterDevice";
+static SHARED_SECRET_PACKAGE_NAME: &str = "android.hardware.security.sharedsecret";
+static SHARED_SECRET_INTERFACE_NAME: &str = "ISharedSecret";
+static COMPAT_PACKAGE_NAME: &str = "android.security.compat";
+
+/// Lists participants.
+fn list_participants() -> Result<Vec<SharedSecretParticipant>> {
+ Ok([(4, 0), (4, 1)]
+ .iter()
+ .map(|(ma, mi)| {
+ get_hidl_instances(KEYMASTER_PACKAGE_NAME, *ma, *mi, KEYMASTER_INTERFACE_NAME)
+ .as_vec()
+ .with_context(|| format!("Trying to convert KM{}.{} names to vector.", *ma, *mi))
+ .map(|instances| {
+ instances
+ .into_iter()
+ .filter_map(|name| {
+ filter_map_legacy_km_instances(name.to_string(), (*ma, *mi))
+ })
+ .collect::<Vec<SharedSecretParticipant>>()
+ })
+ })
+ .collect::<Result<Vec<_>>>()
+ .map(|v| v.into_iter().flatten())
+ .and_then(|i| {
+ let participants_aidl: Vec<SharedSecretParticipant> =
+ get_aidl_instances(SHARED_SECRET_PACKAGE_NAME, 1, SHARED_SECRET_INTERFACE_NAME)
+ .as_vec()
+ .context("In list_participants: Trying to convert KM1.0 names to vector.")?
+ .into_iter()
+ .map(|name| SharedSecretParticipant::Aidl(name.to_string()))
+ .collect();
+ Ok(i.chain(participants_aidl.into_iter()))
+ })
+ .context("In list_participants.")?
+ .collect())
+}
+
+fn connect_participants(
+ mut participants: Vec<SharedSecretParticipant>,
+) -> Vec<(Strong<dyn ISharedSecret>, SharedSecretParticipant)> {
+ let mut connected_participants: Vec<(Strong<dyn ISharedSecret>, SharedSecretParticipant)> =
+ vec![];
+ loop {
+ let (connected, not_connected) = participants.into_iter().fold(
+ (connected_participants, vec![]),
+ |(mut connected, mut failed), e| {
+ match e {
+ SharedSecretParticipant::Aidl(instance_name) => {
+ let service_name = format!(
+ "{}.{}/{}",
+ SHARED_SECRET_PACKAGE_NAME, SHARED_SECRET_INTERFACE_NAME, instance_name
+ );
+ match map_binder_status_code(binder::get_interface(&service_name)) {
+ Err(e) => {
+ log::warn!(
+ "Unable to connect \"{}\" with error:\n{:?}\nRetrying later.",
+ service_name,
+ e
+ );
+ failed.push(SharedSecretParticipant::Aidl(instance_name));
+ }
+ Ok(service) => connected
+ .push((service, SharedSecretParticipant::Aidl(instance_name))),
+ }
+ }
+ SharedSecretParticipant::Hidl { is_strongbox, version } => {
+ // This is a no-op if it was called before.
+ keystore2_km_compat::add_keymint_device_service();
+
+ // If we cannot connect to the compatibility service there is no way to
+ // recover.
+ // PANIC! - Unless you brought your towel.
+ let keystore_compat_service: Strong<dyn IKeystoreCompatService> =
+ map_binder_status_code(binder::get_interface(COMPAT_PACKAGE_NAME))
+ .expect(
+ "In connect_participants: Trying to connect to compat service.",
+ );
+
+ match map_binder_status(keystore_compat_service.getSharedSecret(
+ if is_strongbox {
+ SecurityLevel::STRONGBOX
+ } else {
+ SecurityLevel::TRUSTED_ENVIRONMENT
+ },
+ )) {
+ Err(e) => {
+ log::warn!(
+ concat!(
+ "Unable to connect keymaster device \"{}\" ",
+ "with error:\n{:?}\nRetrying later."
+ ),
+ if is_strongbox { "strongbox" } else { "TEE" },
+ e
+ );
+ failed
+ .push(SharedSecretParticipant::Hidl { is_strongbox, version });
+ }
+ Ok(service) => connected.push((
+ service,
+ SharedSecretParticipant::Hidl { is_strongbox, version },
+ )),
+ }
+ }
+ }
+ (connected, failed)
+ },
+ );
+ participants = not_connected;
+ connected_participants = connected;
+ if participants.is_empty() {
+ break;
+ }
+ std::thread::sleep(std::time::Duration::from_millis(1000));
+ }
+ connected_participants
+}
+
+fn negotiate_shared_secret(
+ participants: Vec<(Strong<dyn ISharedSecret>, SharedSecretParticipant)>,
+) {
+ // Phase 1: Get the sharing parameters from all participants.
+ let mut params = loop {
+ let result: Result<Vec<SharedSecretParameters>, SharedSecretError> = participants
+ .iter()
+ .map(|(s, p)| {
+ map_binder_status(s.getSharedSecretParameters())
+ .map_err(|e| SharedSecretError::ParameterRetrieval { e, p: (*p).clone() })
+ })
+ .collect();
+
+ match result {
+ Err(e) => {
+ log::warn!("{:?}", e);
+ log::warn!("Retrying in one second.");
+ std::thread::sleep(std::time::Duration::from_millis(1000));
+ }
+ Ok(params) => break params,
+ }
+ };
+
+ params.sort_unstable();
+
+ // Phase 2: Send the sorted sharing parameters to all participants.
+ participants
+ .into_iter()
+ .try_fold(None, |acc, (s, p)| {
+ match (acc, map_binder_status(s.computeSharedSecret(¶ms))) {
+ (None, Ok(new_sum)) => Ok(Some(new_sum)),
+ (Some(old_sum), Ok(new_sum)) => {
+ if old_sum == new_sum {
+ Ok(Some(old_sum))
+ } else {
+ Err(SharedSecretError::Checksum(p))
+ }
+ }
+ (_, Err(e)) => Err(SharedSecretError::Computation { e, p }),
+ }
+ })
+ .expect("Fatal: Shared secret computation failed.");
+}
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index 4ffe897..d490354 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -12,18 +12,29 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#![allow(dead_code)]
-
use crate::{
- database::EncryptedBy, database::KeyMetaData, database::KeyMetaEntry, database::KeystoreDB,
- error::Error, error::ResponseCode, legacy_blob::LegacyBlobLoader,
+ database::BlobMetaData,
+ database::BlobMetaEntry,
+ database::EncryptedBy,
+ database::KeyEntry,
+ database::KeyType,
+ database::{KeyMetaData, KeyMetaEntry, KeystoreDB},
+ ec_crypto::ECDHPrivateKey,
+ enforcements::Enforcements,
+ error::Error,
+ error::ResponseCode,
+ key_parameter::KeyParameter,
+ legacy_blob::LegacyBlobLoader,
+ legacy_migrator::LegacyMigrator,
+ try_insert::TryInsert,
};
use android_system_keystore2::aidl::android::system::keystore2::Domain::Domain;
use anyhow::{Context, Result};
use keystore2_crypto::{
- aes_gcm_decrypt, aes_gcm_encrypt, derive_key_from_password, generate_salt, ZVec,
+ aes_gcm_decrypt, aes_gcm_encrypt, generate_aes256_key, generate_salt, Password, ZVec,
AES_256_KEY_LENGTH,
};
+use std::ops::Deref;
use std::{
collections::HashMap,
sync::Arc,
@@ -32,6 +43,55 @@
type UserId = u32;
+/// Encryption algorithm used by a particular type of superencryption key
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum SuperEncryptionAlgorithm {
+ /// Symmetric encryption with AES-256-GCM
+ Aes256Gcm,
+ /// Public-key encryption with ECDH P-256
+ EcdhP256,
+}
+
+/// A particular user may have several superencryption keys in the database, each for a
+/// different purpose, distinguished by alias. Each is associated with a static
+/// constant of this type.
+pub struct SuperKeyType {
+ /// Alias used to look the key up in the `persistent.keyentry` table.
+ pub alias: &'static str,
+ /// Encryption algorithm
+ pub algorithm: SuperEncryptionAlgorithm,
+}
+
+/// Key used for LskfLocked keys; the corresponding superencryption key is loaded in memory
+/// when the user first unlocks, and remains in memory until the device reboots.
+pub const USER_SUPER_KEY: SuperKeyType =
+ SuperKeyType { alias: "USER_SUPER_KEY", algorithm: SuperEncryptionAlgorithm::Aes256Gcm };
+/// Key used for ScreenLockBound keys; the corresponding superencryption key is loaded in memory
+/// each time the user enters their LSKF, and cleared from memory each time the device is locked.
+/// Symmetric.
+pub const USER_SCREEN_LOCK_BOUND_KEY: SuperKeyType = SuperKeyType {
+ alias: "USER_SCREEN_LOCK_BOUND_KEY",
+ algorithm: SuperEncryptionAlgorithm::Aes256Gcm,
+};
+/// Key used for ScreenLockBound keys; the corresponding superencryption key is loaded in memory
+/// each time the user enters their LSKF, and cleared from memory each time the device is locked.
+/// Asymmetric, so keys can be encrypted when the device is locked.
+pub const USER_SCREEN_LOCK_BOUND_ECDH_KEY: SuperKeyType = SuperKeyType {
+ alias: "USER_SCREEN_LOCK_BOUND_ECDH_KEY",
+ algorithm: SuperEncryptionAlgorithm::EcdhP256,
+};
+
+/// Superencryption to apply to a new key.
+#[derive(Debug, Clone, Copy)]
+pub enum SuperEncryptionType {
+ /// Do not superencrypt this key.
+ None,
+ /// Superencrypt with a key that remains in memory from first unlock to reboot.
+ LskfBound,
+ /// Superencrypt with a key cleared from memory when the device is locked.
+ ScreenLockBound,
+}
+
#[derive(Default)]
struct UserSuperKeys {
/// The per boot key is used for LSKF binding of authentication bound keys. There is one
@@ -39,17 +99,53 @@
/// secret, that is itself derived from the user's lock screen knowledge factor (LSKF).
/// When the user unlocks the device for the first time, this key is unlocked, i.e., decrypted,
/// and stays memory resident until the device reboots.
- per_boot: Option<Arc<ZVec>>,
+ per_boot: Option<Arc<SuperKey>>,
/// The screen lock key works like the per boot key with the distinction that it is cleared
/// from memory when the screen lock is engaged.
- /// TODO the life cycle is not fully implemented at this time.
- screen_lock: Option<Arc<ZVec>>,
+ screen_lock_bound: Option<Arc<SuperKey>>,
+ /// When the device is locked, screen-lock-bound keys can still be encrypted, using
+ /// ECDH public-key encryption. This field holds the decryption private key.
+ screen_lock_bound_private: Option<Arc<SuperKey>>,
+}
+
+pub struct SuperKey {
+ algorithm: SuperEncryptionAlgorithm,
+ key: ZVec,
+ // id of the super key in the database.
+ id: i64,
+ /// ECDH is more expensive than AES. So on ECDH private keys we set the
+ /// reencrypt_with field to point at the corresponding AES key, and the
+ /// keys will be re-encrypted with AES on first use.
+ reencrypt_with: Option<Arc<SuperKey>>,
+}
+
+impl SuperKey {
+ /// For most purposes `unwrap_key` handles decryption,
+ /// but legacy handling and some tests need to assume AES and decrypt directly.
+ pub fn aes_gcm_decrypt(&self, data: &[u8], iv: &[u8], tag: &[u8]) -> Result<ZVec> {
+ if self.algorithm == SuperEncryptionAlgorithm::Aes256Gcm {
+ aes_gcm_decrypt(data, iv, tag, &self.key)
+ .context("In aes_gcm_decrypt: decryption failed")
+ } else {
+ Err(Error::sys()).context("In aes_gcm_decrypt: Key is not an AES key")
+ }
+ }
+
+ pub fn get_id(&self) -> i64 {
+ self.id
+ }
}
#[derive(Default)]
struct SkmState {
user_keys: HashMap<UserId, UserSuperKeys>,
- key_index: HashMap<i64, Weak<ZVec>>,
+ key_index: HashMap<i64, Weak<SuperKey>>,
+}
+
+impl SkmState {
+ fn add_key_to_key_index(&mut self, super_key: &Arc<SuperKey>) {
+ self.key_index.insert(super_key.id, Arc::downgrade(super_key));
+ }
}
#[derive(Default)]
@@ -59,21 +155,7 @@
impl SuperKeyManager {
pub fn new() -> Self {
- Self { data: Mutex::new(Default::default()) }
- }
-
- pub fn forget_screen_lock_key_for_user(&self, user: UserId) {
- let mut data = self.data.lock().unwrap();
- if let Some(usk) = data.user_keys.get_mut(&user) {
- usk.screen_lock = None;
- }
- }
-
- pub fn forget_screen_lock_keys(&self) {
- let mut data = self.data.lock().unwrap();
- for (_, usk) in data.user_keys.iter_mut() {
- usk.screen_lock = None;
- }
+ Default::default()
}
pub fn forget_all_keys_for_user(&self, user: UserId) {
@@ -81,26 +163,19 @@
data.user_keys.remove(&user);
}
- pub fn forget_all_keys(&self) {
+ fn install_per_boot_key_for_user(&self, user: UserId, super_key: Arc<SuperKey>) {
let mut data = self.data.lock().unwrap();
- data.user_keys.clear();
- data.key_index.clear();
+ data.add_key_to_key_index(&super_key);
+ data.user_keys.entry(user).or_default().per_boot = Some(super_key);
}
- fn install_per_boot_key_for_user(&self, user: UserId, key_id: i64, key: ZVec) {
- let mut data = self.data.lock().unwrap();
- let key = Arc::new(key);
- data.key_index.insert(key_id, Arc::downgrade(&key));
- data.user_keys.entry(user).or_default().per_boot = Some(key);
- }
-
- fn get_key(&self, key_id: &i64) -> Option<Arc<ZVec>> {
+ fn get_key(&self, key_id: &i64) -> Option<Arc<SuperKey>> {
self.data.lock().unwrap().key_index.get(key_id).and_then(|k| k.upgrade())
}
- pub fn get_per_boot_key_by_user_id(&self, user_id: u32) -> Option<Arc<ZVec>> {
+ pub fn get_per_boot_key_by_user_id(&self, user_id: UserId) -> Option<Arc<SuperKey>> {
let data = self.data.lock().unwrap();
- data.user_keys.get(&user_id).map(|e| e.per_boot.clone()).flatten()
+ data.user_keys.get(&user_id).and_then(|e| e.per_boot.as_ref().cloned())
}
/// This function unlocks the super keys for a given user.
@@ -109,76 +184,42 @@
/// a key derived from the given password and stored in the database.
pub fn unlock_user_key(
&self,
- user: UserId,
- pw: &[u8],
db: &mut KeystoreDB,
+ user: UserId,
+ pw: &Password,
legacy_blob_loader: &LegacyBlobLoader,
) -> Result<()> {
let (_, entry) = db
- .get_or_create_key_with(Domain::APP, user as u64 as i64, &"USER_SUPER_KEY", || {
- // For backward compatibility we need to check if there is a super key present.
- let super_key = legacy_blob_loader
- .load_super_key(user, pw)
- .context("In create_new_key: Failed to load legacy key blob.")?;
- let super_key = match super_key {
- None => {
- // No legacy file was found. So we generate a new key.
- keystore2_crypto::generate_aes256_key()
- .context("In create_new_key: Failed to generate AES 256 key.")?
- }
- Some(key) => key,
- };
- // Regardless of whether we loaded an old AES128 key or a new AES256 key,
- // we derive a AES256 key and re-encrypt the key before we insert it in the
- // database. The length of the key is preserved by the encryption so we don't
- // need any extra flags to inform us which algorithm to use it with.
- let salt =
- generate_salt().context("In create_new_key: Failed to generate salt.")?;
- let derived_key = derive_key_from_password(pw, Some(&salt), AES_256_KEY_LENGTH)
- .context("In create_new_key: Failed to derive password.")?;
- let mut metadata = KeyMetaData::new();
- metadata.add(KeyMetaEntry::EncryptedBy(EncryptedBy::Password));
- metadata.add(KeyMetaEntry::Salt(salt));
- let (encrypted_key, iv, tag) = aes_gcm_encrypt(&super_key, &derived_key)
- .context("In create_new_key: Failed to encrypt new super key.")?;
- metadata.add(KeyMetaEntry::Iv(iv));
- metadata.add(KeyMetaEntry::AeadTag(tag));
- Ok((encrypted_key, metadata))
- })
+ .get_or_create_key_with(
+ Domain::APP,
+ user as u64 as i64,
+ &USER_SUPER_KEY.alias,
+ crate::database::KEYSTORE_UUID,
+ || {
+ // For backward compatibility we need to check if there is a super key present.
+ let super_key = legacy_blob_loader
+ .load_super_key(user, pw)
+ .context("In create_new_key: Failed to load legacy key blob.")?;
+ let super_key = match super_key {
+ None => {
+ // No legacy file was found. So we generate a new key.
+ generate_aes256_key()
+ .context("In create_new_key: Failed to generate AES 256 key.")?
+ }
+ Some(key) => key,
+ };
+ // Regardless of whether we loaded an old AES128 key or generated a new AES256
+ // key as the super key, we derive a AES256 key from the password and re-encrypt
+ // the super key before we insert it in the database. The length of the key is
+ // preserved by the encryption so we don't need any extra flags to inform us
+ // which algorithm to use it with.
+ Self::encrypt_with_password(&super_key, pw).context("In create_new_key.")
+ },
+ )
.context("In unlock_user_key: Failed to get key id.")?;
- let metadata = entry.metadata();
- let super_key = match (
- metadata.encrypted_by(),
- metadata.salt(),
- metadata.iv(),
- metadata.aead_tag(),
- entry.km_blob(),
- ) {
- (Some(&EncryptedBy::Password), Some(salt), Some(iv), Some(tag), Some(blob)) => {
- let key = derive_key_from_password(pw, Some(salt), AES_256_KEY_LENGTH)
- .context("In unlock_user_key: Failed to generate key from password.")?;
-
- aes_gcm_decrypt(blob, iv, tag, &key)
- .context("In unlock_user_key: Failed to decrypt key blob.")?
- }
- (enc_by, salt, iv, tag, blob) => {
- return Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
- concat!(
- "In unlock_user_key: Super key has incomplete metadata.",
- "Present: encrypted_by: {}, salt: {}, iv: {}, aead_tag: {}, blob: {}."
- ),
- enc_by.is_some(),
- salt.is_some(),
- iv.is_some(),
- tag.is_some(),
- blob.is_some()
- ));
- }
- };
-
- self.install_per_boot_key_for_user(user, entry.id(), super_key);
-
+ self.populate_cache_from_super_key_blob(user, USER_SUPER_KEY.algorithm, entry, pw)
+ .context("In unlock_user_key.")?;
Ok(())
}
@@ -186,14 +227,17 @@
/// The function queries `metadata.encrypted_by()` to determine the encryption key.
/// It then check if the required key is memory resident, and if so decrypts the
/// blob.
- pub fn unwrap_key(&self, blob: &[u8], metadata: &KeyMetaData) -> Result<ZVec> {
+ pub fn unwrap_key<'a>(&self, blob: &'a [u8], metadata: &BlobMetaData) -> Result<KeyBlob<'a>> {
match metadata.encrypted_by() {
Some(EncryptedBy::KeyId(key_id)) => match self.get_key(key_id) {
- Some(key) => {
- Self::unwrap_key_with_key(blob, metadata, &key).context("In unwrap_key.")
- }
+ Some(super_key) => Ok(KeyBlob::Sensitive {
+ key: Self::unwrap_key_with_key(blob, metadata, &super_key)
+ .context("In unwrap_key: unwrap_key_with_key failed")?,
+ reencrypt_with: super_key.reencrypt_with.as_ref().unwrap_or(&super_key).clone(),
+ force_reencrypt: super_key.reencrypt_with.is_some(),
+ }),
None => Err(Error::Rc(ResponseCode::LOCKED))
- .context("In unwrap_key: Key is not usable until the user entered their LSKF."),
+ .context("In unwrap_key: Required super decryption key is not in memory."),
},
_ => Err(Error::Rc(ResponseCode::VALUE_CORRUPTED))
.context("In unwrap_key: Cannot determined wrapping key."),
@@ -201,18 +245,642 @@
}
/// Unwraps an encrypted key blob given an encryption key.
- fn unwrap_key_with_key(blob: &[u8], metadata: &KeyMetaData, key: &[u8]) -> Result<ZVec> {
- match (metadata.iv(), metadata.aead_tag()) {
- (Some(iv), Some(tag)) => aes_gcm_decrypt(blob, iv, tag, key)
- .context("In unwrap_key_with_key: Failed to decrypt the key blob."),
- (iv, tag) => Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
- concat!(
- "In unwrap_key_with_key: Key has incomplete metadata.",
- "Present: iv: {}, aead_tag: {}."
+ fn unwrap_key_with_key(blob: &[u8], metadata: &BlobMetaData, key: &SuperKey) -> Result<ZVec> {
+ match key.algorithm {
+ SuperEncryptionAlgorithm::Aes256Gcm => match (metadata.iv(), metadata.aead_tag()) {
+ (Some(iv), Some(tag)) => key
+ .aes_gcm_decrypt(blob, iv, tag)
+ .context("In unwrap_key_with_key: Failed to decrypt the key blob."),
+ (iv, tag) => Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
+ concat!(
+ "In unwrap_key_with_key: Key has incomplete metadata.",
+ "Present: iv: {}, aead_tag: {}."
+ ),
+ iv.is_some(),
+ tag.is_some(),
+ )),
+ },
+ SuperEncryptionAlgorithm::EcdhP256 => {
+ match (metadata.public_key(), metadata.salt(), metadata.iv(), metadata.aead_tag()) {
+ (Some(public_key), Some(salt), Some(iv), Some(aead_tag)) => {
+ ECDHPrivateKey::from_private_key(&key.key)
+ .and_then(|k| k.decrypt_message(public_key, salt, iv, blob, aead_tag))
+ .context(
+ "In unwrap_key_with_key: Failed to decrypt the key blob with ECDH.",
+ )
+ }
+ (public_key, salt, iv, aead_tag) => {
+ Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
+ concat!(
+ "In unwrap_key_with_key: Key has incomplete metadata.",
+ "Present: public_key: {}, salt: {}, iv: {}, aead_tag: {}."
+ ),
+ public_key.is_some(),
+ salt.is_some(),
+ iv.is_some(),
+ aead_tag.is_some(),
+ ))
+ }
+ }
+ }
+ }
+ }
+
+ /// Checks if user has setup LSKF, even when super key cache is empty for the user.
+ pub fn super_key_exists_in_db_for_user(
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ user_id: UserId,
+ ) -> Result<bool> {
+ let key_in_db = db
+ .key_exists(Domain::APP, user_id as u64 as i64, &USER_SUPER_KEY.alias, KeyType::Super)
+ .context("In super_key_exists_in_db_for_user.")?;
+
+ if key_in_db {
+ Ok(key_in_db)
+ } else {
+ legacy_migrator
+ .has_super_key(user_id)
+ .context("In super_key_exists_in_db_for_user: Trying to query legacy db.")
+ }
+ }
+
+ /// Checks if user has already setup LSKF (i.e. a super key is persisted in the database or the
+ /// legacy database). If not, return Uninitialized state.
+ /// Otherwise, decrypt the super key from the password and return LskfUnlocked state.
+ pub fn check_and_unlock_super_key(
+ &self,
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ user_id: UserId,
+ pw: &Password,
+ ) -> Result<UserState> {
+ let alias = &USER_SUPER_KEY;
+ let result = legacy_migrator
+ .with_try_migrate_super_key(user_id, pw, || db.load_super_key(alias, user_id))
+ .context("In check_and_unlock_super_key. Failed to load super key")?;
+
+ match result {
+ Some((_, entry)) => {
+ let super_key = self
+ .populate_cache_from_super_key_blob(user_id, alias.algorithm, entry, pw)
+ .context("In check_and_unlock_super_key.")?;
+ Ok(UserState::LskfUnlocked(super_key))
+ }
+ None => Ok(UserState::Uninitialized),
+ }
+ }
+
+ /// Checks if user has already setup LSKF (i.e. a super key is persisted in the database or the
+ /// legacy database). If so, return LskfLocked state.
+ /// If the password is provided, generate a new super key, encrypt with the password,
+ /// store in the database and populate the super key cache for the new user
+ /// and return LskfUnlocked state.
+ /// If the password is not provided, return Uninitialized state.
+ pub fn check_and_initialize_super_key(
+ &self,
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ user_id: UserId,
+ pw: Option<&Password>,
+ ) -> Result<UserState> {
+ let super_key_exists_in_db =
+ Self::super_key_exists_in_db_for_user(db, legacy_migrator, user_id).context(
+ "In check_and_initialize_super_key. Failed to check if super key exists.",
+ )?;
+ if super_key_exists_in_db {
+ Ok(UserState::LskfLocked)
+ } else if let Some(pw) = pw {
+ //generate a new super key.
+ let super_key = generate_aes256_key()
+ .context("In check_and_initialize_super_key: Failed to generate AES 256 key.")?;
+ //derive an AES256 key from the password and re-encrypt the super key
+ //before we insert it in the database.
+ let (encrypted_super_key, blob_metadata) = Self::encrypt_with_password(&super_key, pw)
+ .context("In check_and_initialize_super_key.")?;
+
+ let key_entry = db
+ .store_super_key(
+ user_id,
+ &USER_SUPER_KEY,
+ &encrypted_super_key,
+ &blob_metadata,
+ &KeyMetaData::new(),
+ )
+ .context("In check_and_initialize_super_key. Failed to store super key.")?;
+
+ let super_key = self
+ .populate_cache_from_super_key_blob(
+ user_id,
+ USER_SUPER_KEY.algorithm,
+ key_entry,
+ pw,
+ )
+ .context("In check_and_initialize_super_key.")?;
+ Ok(UserState::LskfUnlocked(super_key))
+ } else {
+ Ok(UserState::Uninitialized)
+ }
+ }
+
+ //helper function to populate super key cache from the super key blob loaded from the database
+ fn populate_cache_from_super_key_blob(
+ &self,
+ user_id: UserId,
+ algorithm: SuperEncryptionAlgorithm,
+ entry: KeyEntry,
+ pw: &Password,
+ ) -> Result<Arc<SuperKey>> {
+ let super_key = Self::extract_super_key_from_key_entry(algorithm, entry, pw, None)
+ .context(
+ "In populate_cache_from_super_key_blob. Failed to extract super key from key entry",
+ )?;
+ self.install_per_boot_key_for_user(user_id, super_key.clone());
+ Ok(super_key)
+ }
+
+ /// Extracts super key from the entry loaded from the database
+ pub fn extract_super_key_from_key_entry(
+ algorithm: SuperEncryptionAlgorithm,
+ entry: KeyEntry,
+ pw: &Password,
+ reencrypt_with: Option<Arc<SuperKey>>,
+ ) -> Result<Arc<SuperKey>> {
+ if let Some((blob, metadata)) = entry.key_blob_info() {
+ let key = match (
+ metadata.encrypted_by(),
+ metadata.salt(),
+ metadata.iv(),
+ metadata.aead_tag(),
+ ) {
+ (Some(&EncryptedBy::Password), Some(salt), Some(iv), Some(tag)) => {
+ // Note that password encryption is AES no matter the value of algorithm
+ let key = pw.derive_key(Some(salt), AES_256_KEY_LENGTH).context(
+ "In extract_super_key_from_key_entry: Failed to generate key from password.",
+ )?;
+
+ aes_gcm_decrypt(blob, iv, tag, &key).context(
+ "In extract_super_key_from_key_entry: Failed to decrypt key blob.",
+ )?
+ }
+ (enc_by, salt, iv, tag) => {
+ return Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
+ concat!(
+ "In extract_super_key_from_key_entry: Super key has incomplete metadata.",
+ "Present: encrypted_by: {}, salt: {}, iv: {}, aead_tag: {}."
+ ),
+ enc_by.is_some(),
+ salt.is_some(),
+ iv.is_some(),
+ tag.is_some()
+ ));
+ }
+ };
+ Ok(Arc::new(SuperKey { algorithm, key, id: entry.id(), reencrypt_with }))
+ } else {
+ Err(Error::Rc(ResponseCode::VALUE_CORRUPTED))
+ .context("In extract_super_key_from_key_entry: No key blob info.")
+ }
+ }
+
+ /// Encrypts the super key from a key derived from the password, before storing in the database.
+ pub fn encrypt_with_password(
+ super_key: &[u8],
+ pw: &Password,
+ ) -> Result<(Vec<u8>, BlobMetaData)> {
+ let salt = generate_salt().context("In encrypt_with_password: Failed to generate salt.")?;
+ let derived_key = pw
+ .derive_key(Some(&salt), AES_256_KEY_LENGTH)
+ .context("In encrypt_with_password: Failed to derive password.")?;
+ let mut metadata = BlobMetaData::new();
+ metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::Password));
+ metadata.add(BlobMetaEntry::Salt(salt));
+ let (encrypted_key, iv, tag) = aes_gcm_encrypt(super_key, &derived_key)
+ .context("In encrypt_with_password: Failed to encrypt new super key.")?;
+ metadata.add(BlobMetaEntry::Iv(iv));
+ metadata.add(BlobMetaEntry::AeadTag(tag));
+ Ok((encrypted_key, metadata))
+ }
+
+ // Encrypt the given key blob with the user's super key, if the super key exists and the device
+ // is unlocked. If the super key exists and the device is locked, or LSKF is not setup,
+ // return error. Note that it is out of the scope of this function to check if super encryption
+ // is required. Such check should be performed before calling this function.
+ fn super_encrypt_on_key_init(
+ &self,
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ user_id: UserId,
+ key_blob: &[u8],
+ ) -> Result<(Vec<u8>, BlobMetaData)> {
+ match UserState::get(db, legacy_migrator, self, user_id)
+ .context("In super_encrypt. Failed to get user state.")?
+ {
+ UserState::LskfUnlocked(super_key) => {
+ Self::encrypt_with_aes_super_key(key_blob, &super_key)
+ .context("In super_encrypt_on_key_init. Failed to encrypt the key.")
+ }
+ UserState::LskfLocked => {
+ Err(Error::Rc(ResponseCode::LOCKED)).context("In super_encrypt. Device is locked.")
+ }
+ UserState::Uninitialized => Err(Error::Rc(ResponseCode::UNINITIALIZED))
+ .context("In super_encrypt. LSKF is not setup for the user."),
+ }
+ }
+
+ //Helper function to encrypt a key with the given super key. Callers should select which super
+ //key to be used. This is called when a key is super encrypted at its creation as well as at its
+ //upgrade.
+ fn encrypt_with_aes_super_key(
+ key_blob: &[u8],
+ super_key: &SuperKey,
+ ) -> Result<(Vec<u8>, BlobMetaData)> {
+ if super_key.algorithm != SuperEncryptionAlgorithm::Aes256Gcm {
+ return Err(Error::sys())
+ .context("In encrypt_with_aes_super_key: unexpected algorithm");
+ }
+ let mut metadata = BlobMetaData::new();
+ let (encrypted_key, iv, tag) = aes_gcm_encrypt(key_blob, &(super_key.key))
+ .context("In encrypt_with_aes_super_key: Failed to encrypt new super key.")?;
+ metadata.add(BlobMetaEntry::Iv(iv));
+ metadata.add(BlobMetaEntry::AeadTag(tag));
+ metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key.id)));
+ Ok((encrypted_key, metadata))
+ }
+
+ /// Check if super encryption is required and if so, super-encrypt the key to be stored in
+ /// the database.
+ #[allow(clippy::clippy::too_many_arguments)]
+ pub fn handle_super_encryption_on_key_init(
+ &self,
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ domain: &Domain,
+ key_parameters: &[KeyParameter],
+ flags: Option<i32>,
+ user_id: UserId,
+ key_blob: &[u8],
+ ) -> Result<(Vec<u8>, BlobMetaData)> {
+ match Enforcements::super_encryption_required(domain, key_parameters, flags) {
+ SuperEncryptionType::None => Ok((key_blob.to_vec(), BlobMetaData::new())),
+ SuperEncryptionType::LskfBound => {
+ self.super_encrypt_on_key_init(db, legacy_migrator, user_id, &key_blob).context(
+ "In handle_super_encryption_on_key_init.
+ Failed to super encrypt the key.",
+ )
+ }
+ SuperEncryptionType::ScreenLockBound => {
+ let mut data = self.data.lock().unwrap();
+ let entry = data.user_keys.entry(user_id).or_default();
+ if let Some(super_key) = entry.screen_lock_bound.as_ref() {
+ Self::encrypt_with_aes_super_key(key_blob, &super_key).context(concat!(
+ "In handle_super_encryption_on_key_init. ",
+ "Failed to encrypt the key with screen_lock_bound key."
+ ))
+ } else {
+ // Symmetric key is not available, use public key encryption
+ let loaded =
+ db.load_super_key(&USER_SCREEN_LOCK_BOUND_ECDH_KEY, user_id).context(
+ "In handle_super_encryption_on_key_init: load_super_key failed.",
+ )?;
+ let (key_id_guard, key_entry) = loaded.ok_or_else(Error::sys).context(
+ "In handle_super_encryption_on_key_init: User ECDH key missing.",
+ )?;
+ let public_key =
+ key_entry.metadata().sec1_public_key().ok_or_else(Error::sys).context(
+ "In handle_super_encryption_on_key_init: sec1_public_key missing.",
+ )?;
+ let mut metadata = BlobMetaData::new();
+ let (ephem_key, salt, iv, encrypted_key, aead_tag) =
+ ECDHPrivateKey::encrypt_message(public_key, key_blob).context(concat!(
+ "In handle_super_encryption_on_key_init: ",
+ "ECDHPrivateKey::encrypt_message failed."
+ ))?;
+ metadata.add(BlobMetaEntry::PublicKey(ephem_key));
+ metadata.add(BlobMetaEntry::Salt(salt));
+ metadata.add(BlobMetaEntry::Iv(iv));
+ metadata.add(BlobMetaEntry::AeadTag(aead_tag));
+ metadata.add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(key_id_guard.id())));
+ Ok((encrypted_key, metadata))
+ }
+ }
+ }
+ }
+
+ /// Check if a given key is super-encrypted, from its metadata. If so, unwrap the key using
+ /// the relevant super key.
+ pub fn unwrap_key_if_required<'a>(
+ &self,
+ metadata: &BlobMetaData,
+ key_blob: &'a [u8],
+ ) -> Result<KeyBlob<'a>> {
+ if Self::key_super_encrypted(&metadata) {
+ let unwrapped_key = self
+ .unwrap_key(key_blob, metadata)
+ .context("In unwrap_key_if_required. Error in unwrapping the key.")?;
+ Ok(unwrapped_key)
+ } else {
+ Ok(KeyBlob::Ref(key_blob))
+ }
+ }
+
+ /// Check if a given key needs re-super-encryption, from its KeyBlob type.
+ /// If so, re-super-encrypt the key and return a new set of metadata,
+ /// containing the new super encryption information.
+ pub fn reencrypt_if_required<'a>(
+ key_blob_before_upgrade: &KeyBlob,
+ key_after_upgrade: &'a [u8],
+ ) -> Result<(KeyBlob<'a>, Option<BlobMetaData>)> {
+ match key_blob_before_upgrade {
+ KeyBlob::Sensitive { reencrypt_with: super_key, .. } => {
+ let (key, metadata) =
+ Self::encrypt_with_aes_super_key(key_after_upgrade, super_key)
+ .context("In reencrypt_if_required: Failed to re-super-encrypt key.")?;
+ Ok((KeyBlob::NonSensitive(key), Some(metadata)))
+ }
+ _ => Ok((KeyBlob::Ref(key_after_upgrade), None)),
+ }
+ }
+
+ // Helper function to decide if a key is super encrypted, given metadata.
+ fn key_super_encrypted(metadata: &BlobMetaData) -> bool {
+ if let Some(&EncryptedBy::KeyId(_)) = metadata.encrypted_by() {
+ return true;
+ }
+ false
+ }
+
+ /// Fetch a superencryption key from the database, or create it if it doesn't already exist.
+ /// When this is called, the caller must hold the lock on the SuperKeyManager.
+ /// So it's OK that the check and creation are different DB transactions.
+ fn get_or_create_super_key(
+ db: &mut KeystoreDB,
+ user_id: UserId,
+ key_type: &SuperKeyType,
+ password: &Password,
+ reencrypt_with: Option<Arc<SuperKey>>,
+ ) -> Result<Arc<SuperKey>> {
+ let loaded_key = db.load_super_key(key_type, user_id)?;
+ if let Some((_, key_entry)) = loaded_key {
+ Ok(Self::extract_super_key_from_key_entry(
+ key_type.algorithm,
+ key_entry,
+ password,
+ reencrypt_with,
+ )?)
+ } else {
+ let (super_key, public_key) = match key_type.algorithm {
+ SuperEncryptionAlgorithm::Aes256Gcm => (
+ generate_aes256_key()
+ .context("In get_or_create_super_key: Failed to generate AES 256 key.")?,
+ None,
),
- iv.is_some(),
- tag.is_some(),
- )),
+ SuperEncryptionAlgorithm::EcdhP256 => {
+ let key = ECDHPrivateKey::generate()
+ .context("In get_or_create_super_key: Failed to generate ECDH key")?;
+ (
+ key.private_key()
+ .context("In get_or_create_super_key: private_key failed")?,
+ Some(
+ key.public_key()
+ .context("In get_or_create_super_key: public_key failed")?,
+ ),
+ )
+ }
+ };
+ //derive an AES256 key from the password and re-encrypt the super key
+ //before we insert it in the database.
+ let (encrypted_super_key, blob_metadata) =
+ Self::encrypt_with_password(&super_key, password)
+ .context("In get_or_create_super_key.")?;
+ let mut key_metadata = KeyMetaData::new();
+ if let Some(pk) = public_key {
+ key_metadata.add(KeyMetaEntry::Sec1PublicKey(pk));
+ }
+ let key_entry = db
+ .store_super_key(
+ user_id,
+ key_type,
+ &encrypted_super_key,
+ &blob_metadata,
+ &key_metadata,
+ )
+ .context("In get_or_create_super_key. Failed to store super key.")?;
+ Ok(Arc::new(SuperKey {
+ algorithm: key_type.algorithm,
+ key: super_key,
+ id: key_entry.id(),
+ reencrypt_with,
+ }))
+ }
+ }
+
+ /// Decrypt the screen-lock bound keys for this user using the password and store in memory.
+ pub fn unlock_screen_lock_bound_key(
+ &self,
+ db: &mut KeystoreDB,
+ user_id: UserId,
+ password: &Password,
+ ) -> Result<()> {
+ let mut data = self.data.lock().unwrap();
+ let entry = data.user_keys.entry(user_id).or_default();
+ let aes = entry
+ .screen_lock_bound
+ .get_or_try_to_insert_with(|| {
+ Self::get_or_create_super_key(
+ db,
+ user_id,
+ &USER_SCREEN_LOCK_BOUND_KEY,
+ password,
+ None,
+ )
+ })?
+ .clone();
+ let ecdh = entry
+ .screen_lock_bound_private
+ .get_or_try_to_insert_with(|| {
+ Self::get_or_create_super_key(
+ db,
+ user_id,
+ &USER_SCREEN_LOCK_BOUND_ECDH_KEY,
+ password,
+ Some(aes.clone()),
+ )
+ })?
+ .clone();
+ data.add_key_to_key_index(&aes);
+ data.add_key_to_key_index(&ecdh);
+ Ok(())
+ }
+
+ /// Wipe the screen-lock bound keys for this user from memory.
+ pub fn lock_screen_lock_bound_key(&self, user_id: UserId) {
+ let mut data = self.data.lock().unwrap();
+ let mut entry = data.user_keys.entry(user_id).or_default();
+ entry.screen_lock_bound = None;
+ entry.screen_lock_bound_private = None;
+ }
+}
+
+/// This enum represents different states of the user's life cycle in the device.
+/// For now, only three states are defined. More states may be added later.
+pub enum UserState {
+ // The user has registered LSKF and has unlocked the device by entering PIN/Password,
+ // and hence the per-boot super key is available in the cache.
+ LskfUnlocked(Arc<SuperKey>),
+ // The user has registered LSKF, but has not unlocked the device using password, after reboot.
+ // Hence the per-boot super-key(s) is not available in the cache.
+ // However, the encrypted super key is available in the database.
+ LskfLocked,
+ // There's no user in the device for the given user id, or the user with the user id has not
+ // setup LSKF.
+ Uninitialized,
+}
+
+impl UserState {
+ pub fn get(
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ skm: &SuperKeyManager,
+ user_id: UserId,
+ ) -> Result<UserState> {
+ match skm.get_per_boot_key_by_user_id(user_id) {
+ Some(super_key) => Ok(UserState::LskfUnlocked(super_key)),
+ None => {
+ //Check if a super key exists in the database or legacy database.
+ //If so, return locked user state.
+ if SuperKeyManager::super_key_exists_in_db_for_user(db, legacy_migrator, user_id)
+ .context("In get.")?
+ {
+ Ok(UserState::LskfLocked)
+ } else {
+ Ok(UserState::Uninitialized)
+ }
+ }
+ }
+ }
+
+ /// Queries user state when serving password change requests.
+ pub fn get_with_password_changed(
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ skm: &SuperKeyManager,
+ user_id: UserId,
+ password: Option<&Password>,
+ ) -> Result<UserState> {
+ match skm.get_per_boot_key_by_user_id(user_id) {
+ Some(super_key) => {
+ if password.is_none() {
+ //transitioning to swiping, delete only the super key in database and cache, and
+ //super-encrypted keys in database (and in KM)
+ Self::reset_user(db, skm, legacy_migrator, user_id, true).context(
+ "In get_with_password_changed: Trying to delete keys from the db.",
+ )?;
+ //Lskf is now removed in Keystore
+ Ok(UserState::Uninitialized)
+ } else {
+ //Keystore won't be notified when changing to a new password when LSKF is
+ //already setup. Therefore, ideally this path wouldn't be reached.
+ Ok(UserState::LskfUnlocked(super_key))
+ }
+ }
+ None => {
+ //Check if a super key exists in the database or legacy database.
+ //If so, return LskfLocked state.
+ //Otherwise, i) if the password is provided, initialize the super key and return
+ //LskfUnlocked state ii) if password is not provided, return Uninitialized state.
+ skm.check_and_initialize_super_key(db, legacy_migrator, user_id, password)
+ }
+ }
+ }
+
+ /// Queries user state when serving password unlock requests.
+ pub fn get_with_password_unlock(
+ db: &mut KeystoreDB,
+ legacy_migrator: &LegacyMigrator,
+ skm: &SuperKeyManager,
+ user_id: UserId,
+ password: &Password,
+ ) -> Result<UserState> {
+ match skm.get_per_boot_key_by_user_id(user_id) {
+ Some(super_key) => {
+ log::info!("In get_with_password_unlock. Trying to unlock when already unlocked.");
+ Ok(UserState::LskfUnlocked(super_key))
+ }
+ None => {
+ //Check if a super key exists in the database or legacy database.
+ //If not, return Uninitialized state.
+ //Otherwise, try to unlock the super key and if successful,
+ //return LskfUnlocked state
+ skm.check_and_unlock_super_key(db, legacy_migrator, user_id, password)
+ .context("In get_with_password_unlock. Failed to unlock super key.")
+ }
+ }
+ }
+
+ /// Delete all the keys created on behalf of the user.
+ /// If 'keep_non_super_encrypted_keys' is set to true, delete only the super key and super
+ /// encrypted keys.
+ pub fn reset_user(
+ db: &mut KeystoreDB,
+ skm: &SuperKeyManager,
+ legacy_migrator: &LegacyMigrator,
+ user_id: UserId,
+ keep_non_super_encrypted_keys: bool,
+ ) -> Result<()> {
+ // mark keys created on behalf of the user as unreferenced.
+ legacy_migrator
+ .bulk_delete_user(user_id, keep_non_super_encrypted_keys)
+ .context("In reset_user: Trying to delete legacy keys.")?;
+ db.unbind_keys_for_user(user_id, keep_non_super_encrypted_keys)
+ .context("In reset user. Error in unbinding keys.")?;
+
+ //delete super key in cache, if exists
+ skm.forget_all_keys_for_user(user_id);
+ Ok(())
+ }
+}
+
+/// This enum represents three states a KeyMint Blob can be in, w.r.t super encryption.
+/// `Sensitive` holds the non encrypted key and a reference to its super key.
+/// `NonSensitive` holds a non encrypted key that is never supposed to be encrypted.
+/// `Ref` holds a reference to a key blob when it does not need to be modified if its
+/// life time allows it.
+pub enum KeyBlob<'a> {
+ Sensitive {
+ key: ZVec,
+ /// If KeyMint reports that the key must be upgraded, we must
+ /// re-encrypt the key before writing to the database; we use
+ /// this key.
+ reencrypt_with: Arc<SuperKey>,
+ /// If this key was decrypted with an ECDH key, we want to
+ /// re-encrypt it on first use whether it was upgraded or not;
+ /// this field indicates that that's necessary.
+ force_reencrypt: bool,
+ },
+ NonSensitive(Vec<u8>),
+ Ref(&'a [u8]),
+}
+
+impl<'a> KeyBlob<'a> {
+ pub fn force_reencrypt(&self) -> bool {
+ if let KeyBlob::Sensitive { force_reencrypt, .. } = self {
+ *force_reencrypt
+ } else {
+ false
+ }
+ }
+}
+
+/// Deref returns a reference to the key material in any variant.
+impl<'a> Deref for KeyBlob<'a> {
+ type Target = [u8];
+
+ fn deref(&self) -> &Self::Target {
+ match self {
+ Self::Sensitive { key, .. } => &key,
+ Self::NonSensitive(key) => &key,
+ Self::Ref(key) => key,
}
}
}
diff --git a/keystore2/src/try_insert.rs b/keystore2/src/try_insert.rs
new file mode 100644
index 0000000..6dd3962
--- /dev/null
+++ b/keystore2/src/try_insert.rs
@@ -0,0 +1,100 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! The TryInsert trait adds to Option<T> the method
+//! get_or_try_to_insert_with, which is analogous to
+//! get_or_insert_with, but allows the called function to fail and propagates the failure.
+
+/// The TryInsert trait adds to Option<T> the method
+/// get_or_try_to_insert_with, which is analogous to
+/// get_or_insert_with, but allows the called function to fail and propagates the failure.
+pub trait TryInsert {
+ /// Type of the Ok branch of the Result
+ type Item;
+ /// Inserts a value computed from `f` into the option if it is [`None`],
+ /// then returns a mutable reference to the contained value. If `f`
+ /// returns Err, the Option is unchanged.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = None;
+ /// assert_eq!(x.get_or_try_to_insert_with(Err("oops".to_string())), Err("oops".to_string()))
+ /// {
+ /// let y: &mut u32 = x.get_or_try_to_insert_with(|| Ok(5))?;
+ /// assert_eq!(y, &5);
+ ///
+ /// *y = 7;
+ /// }
+ ///
+ /// assert_eq!(x, Some(7));
+ /// ```
+ fn get_or_try_to_insert_with<E, F: FnOnce() -> Result<Self::Item, E>>(
+ &mut self,
+ f: F,
+ ) -> Result<&mut Self::Item, E>;
+}
+
+impl<T> TryInsert for Option<T> {
+ type Item = T;
+ fn get_or_try_to_insert_with<E, F: FnOnce() -> Result<Self::Item, E>>(
+ &mut self,
+ f: F,
+ ) -> Result<&mut Self::Item, E> {
+ if self.is_none() {
+ *self = Some(f()?);
+ }
+
+ match self {
+ Some(v) => Ok(v),
+ // SAFETY: a `None` variant for `self` would have been replaced by a `Some`
+ // variant in the code above.
+ None => unsafe { std::hint::unreachable_unchecked() },
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ fn fails() -> Result<i32, String> {
+ Err("fail".to_string())
+ }
+
+ fn succeeds() -> Result<i32, String> {
+ Ok(99)
+ }
+
+ #[test]
+ fn test() {
+ let mut x = None;
+ assert_eq!(x.get_or_try_to_insert_with(fails), Err("fail".to_string()));
+ assert_eq!(x, None);
+ assert_eq!(*x.get_or_try_to_insert_with(succeeds).unwrap(), 99);
+ assert_eq!(x, Some(99));
+ x = Some(42);
+ assert_eq!(*x.get_or_try_to_insert_with(fails).unwrap(), 42);
+ assert_eq!(x, Some(42));
+ assert_eq!(*x.get_or_try_to_insert_with(succeeds).unwrap(), 42);
+ assert_eq!(x, Some(42));
+ *x.get_or_try_to_insert_with(fails).unwrap() = 2;
+ assert_eq!(x, Some(2));
+ *x.get_or_try_to_insert_with(succeeds).unwrap() = 3;
+ assert_eq!(x, Some(3));
+ x = None;
+ *x.get_or_try_to_insert_with(succeeds).unwrap() = 5;
+ assert_eq!(x, Some(5));
+ }
+}
diff --git a/keystore2/src/utils.rs b/keystore2/src/utils.rs
index 870b7fc..7b58205 100644
--- a/keystore2/src/utils.rs
+++ b/keystore2/src/utils.rs
@@ -15,12 +15,13 @@
//! This module implements utility functions used by the Keystore 2.0 service
//! implementation.
-use crate::error::Error;
+use crate::error::{map_binder_status, Error, ErrorCode};
use crate::permission;
use crate::permission::{KeyPerm, KeyPermSet, KeystorePerm};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- KeyCharacteristics::KeyCharacteristics, SecurityLevel::SecurityLevel, Tag::Tag,
+ KeyCharacteristics::KeyCharacteristics, Tag::Tag,
};
+use android_os_permissions_aidl::aidl::android::os::IPermissionController;
use android_security_apc::aidl::android::security::apc::{
IProtectedConfirmation::{FLAG_UI_OPTION_INVERTED, FLAG_UI_OPTION_MAGNIFIED},
ResponseCode::ResponseCode as ApcResponseCode,
@@ -88,6 +89,34 @@
})
}
+/// This function checks whether a given tag corresponds to the access of device identifiers.
+pub fn is_device_id_attestation_tag(tag: Tag) -> bool {
+ matches!(tag, Tag::ATTESTATION_ID_IMEI | Tag::ATTESTATION_ID_MEID | Tag::ATTESTATION_ID_SERIAL)
+}
+
+/// This function checks whether the calling app has the Android permissions needed to attest device
+/// identifiers. It throws an error if the permissions cannot be verified, or if the caller doesn't
+/// have the right permissions, and returns silently otherwise.
+pub fn check_device_attestation_permissions() -> anyhow::Result<()> {
+ let permission_controller: binder::Strong<dyn IPermissionController::IPermissionController> =
+ binder::get_interface("permission")?;
+
+ let binder_result = permission_controller.checkPermission(
+ "android.permission.READ_PRIVILEGED_PHONE_STATE",
+ ThreadState::get_calling_pid(),
+ ThreadState::get_calling_uid() as i32,
+ );
+ let has_permissions = map_binder_status(binder_result)
+ .context("In check_device_attestation_permissions: checkPermission failed")?;
+ match has_permissions {
+ true => Ok(()),
+ false => Err(Error::Km(ErrorCode::CANNOT_ATTEST_IDS)).context(concat!(
+ "In check_device_attestation_permissions: ",
+ "caller does not have the permission to attest device IDs"
+ )),
+ }
+}
+
/// Thread safe wrapper around SpIBinder. It is safe to have SpIBinder smart pointers to the
/// same object in multiple threads, but cloning a SpIBinder is not thread safe.
/// Keystore frequently hands out binder tokens to the security level interface. If this
@@ -103,7 +132,7 @@
}
/// Clones the owned SpIBinder and attempts to convert it into the requested interface.
- pub fn get_interface<T: FromIBinder + ?Sized>(&self) -> anyhow::Result<Box<T>> {
+ pub fn get_interface<T: FromIBinder + ?Sized>(&self) -> anyhow::Result<binder::Strong<T>> {
// We can use unwrap here because we never panic when locked, so the mutex
// can never be poisoned.
let lock = self.0.lock().unwrap();
@@ -131,10 +160,6 @@
.flat_map(|aidl_key_char| {
let sec_level = aidl_key_char.securityLevel;
aidl_key_char.authorizations.into_iter().map(move |aidl_kp| {
- let sec_level = match (aidl_kp.tag, sec_level) {
- (Tag::ORIGIN, SecurityLevel::SOFTWARE) => SecurityLevel::TRUSTED_ENVIRONMENT,
- _ => sec_level,
- };
crate::key_parameter::KeyParameter::new(aidl_kp.into(), sec_level)
})
})
@@ -190,10 +215,28 @@
}
/// AID offset for uid space partitioning.
-/// TODO: Replace with bindgen generated from libcutils. b/175619259
-pub const AID_USER_OFFSET: u32 = 100000;
+pub const AID_USER_OFFSET: u32 = cutils_bindgen::AID_USER_OFFSET;
/// Extracts the android user from the given uid.
pub fn uid_to_android_user(uid: u32) -> u32 {
- uid / AID_USER_OFFSET
+ // Safety: No memory access
+ unsafe { cutils_bindgen::multiuser_get_user_id(uid) }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use anyhow::Result;
+
+ #[test]
+ fn check_device_attestation_permissions_test() -> Result<()> {
+ check_device_attestation_permissions().or_else(|error| {
+ match error.root_cause().downcast_ref::<Error>() {
+ // Expected: the context for this test might not be allowed to attest device IDs.
+ Some(Error::Km(ErrorCode::CANNOT_ATTEST_IDS)) => Ok(()),
+ // Other errors are unexpected
+ _ => Err(error),
+ }
+ })
+ }
}
diff --git a/keystore2/src/vintf/Android.bp b/keystore2/src/vintf/Android.bp
new file mode 100644
index 0000000..feec8ae
--- /dev/null
+++ b/keystore2/src/vintf/Android.bp
@@ -0,0 +1,80 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
+rust_library {
+ name: "libkeystore2_vintf_rust",
+ crate_name: "keystore2_vintf",
+ srcs: ["lib.rs"],
+ rustlibs: [
+ "libkeystore2_vintf_bindgen",
+ ],
+ shared_libs: [
+ "libkeystore2_vintf_cpp",
+ "libvintf",
+ ],
+}
+
+cc_library {
+ name: "libkeystore2_vintf_cpp",
+ srcs: [
+ "vintf.cpp",
+ ],
+ shared_libs: [
+ "libvintf",
+ ],
+}
+
+rust_bindgen {
+ name: "libkeystore2_vintf_bindgen",
+ wrapper_src: "vintf.hpp",
+ crate_name: "keystore2_vintf_bindgen",
+ source_stem: "bindings",
+ host_supported: true,
+ shared_libs: ["libvintf"],
+ bindgen_flags: [
+ "--size_t-is-usize",
+ "--whitelist-function", "getHalNames",
+ "--whitelist-function", "getHalNamesAndVersions",
+ "--whitelist-function", "getHidlInstances",
+ "--whitelist-function", "getAidlInstances",
+ "--whitelist-function", "freeNames",
+ ],
+}
+
+rust_test {
+ name: "keystore2_vintf_test",
+ crate_name: "keystore2_vintf_test",
+ srcs: ["lib.rs"],
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ rustlibs: [
+ "libkeystore2_vintf_bindgen",
+ ],
+ static_libs: [
+ "libkeystore2_vintf_cpp",
+ ],
+ shared_libs: [
+ "libc++",
+ "libvintf",
+ ],
+}
diff --git a/keystore2/src/vintf/lib.rs b/keystore2/src/vintf/lib.rs
new file mode 100644
index 0000000..8730a3e
--- /dev/null
+++ b/keystore2/src/vintf/lib.rs
@@ -0,0 +1,127 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Bindings for getting the list of HALs.
+
+use keystore2_vintf_bindgen::{
+ freeNames, getAidlInstances, getHalNames, getHalNamesAndVersions, getHidlInstances,
+};
+use std::ffi::{CStr, CString};
+use std::os::raw::c_char;
+use std::str::Utf8Error;
+
+/// A struct that contains a list of HALs (optionally with version numbers).
+/// To use it, call as_vec to get a Vec view of the data it contains.
+pub struct HalNames {
+ data: *mut *mut c_char,
+ len: usize,
+}
+
+impl Drop for HalNames {
+ fn drop(&mut self) {
+ // Safety: The memory is allocated by our C shim so it must free it as well.
+ unsafe { freeNames(self.data, self.len) }
+ }
+}
+
+impl<'a> HalNames {
+ /// Get a Vec view of the list of HALs.
+ pub fn as_vec(&'a self) -> Result<Vec<&'a str>, Utf8Error> {
+ // Safety: self.data contains self.len C strings.
+ // The lifetimes ensure that the HalNames (and hence the strings) live
+ // at least as long as the returned vector.
+ unsafe { (0..self.len).map(|i| CStr::from_ptr(*self.data.add(i)).to_str()) }.collect()
+ }
+}
+
+/// Gets all HAL names.
+/// Note that this is not a zero-cost shim: it will make copies of the strings.
+pub fn get_hal_names() -> HalNames {
+ let mut len: usize = 0;
+ // Safety: We'll wrap this in HalNames to free the memory it allocates.
+ // It stores the size of the array it returns in len.
+ let raw_strs = unsafe { getHalNames(&mut len) };
+ HalNames { data: raw_strs, len }
+}
+
+/// Gets all HAL names and versions.
+/// Note that this is not a zero-cost shim: it will make copies of the strings.
+pub fn get_hal_names_and_versions() -> HalNames {
+ let mut len: usize = 0;
+ // Safety: We'll wrap this in HalNames to free the memory it allocates.
+ // It stores the size of the array it returns in len.
+ let raw_strs = unsafe { getHalNamesAndVersions(&mut len) };
+ HalNames { data: raw_strs, len }
+}
+
+/// Gets the instances of the given package, version, and interface tuple.
+/// Note that this is not a zero-cost shim: it will make copies of the strings.
+pub fn get_hidl_instances(
+ package: &str,
+ major_version: usize,
+ minor_version: usize,
+ interface_name: &str,
+) -> HalNames {
+ let mut len: usize = 0;
+ let packages = CString::new(package).expect("Failed to make CString from package.");
+ let interface_name =
+ CString::new(interface_name).expect("Failed to make CString from interface_name.");
+ // Safety: We'll wrap this in HalNames to free the memory it allocates.
+ // It stores the size of the array it returns in len.
+ let raw_strs = unsafe {
+ getHidlInstances(
+ &mut len,
+ packages.as_ptr(),
+ major_version,
+ minor_version,
+ interface_name.as_ptr(),
+ )
+ };
+ HalNames { data: raw_strs, len }
+}
+
+/// Gets the instances of the given package, version, and interface tuple.
+/// Note that this is not a zero-cost shim: it will make copies of the strings.
+pub fn get_aidl_instances(package: &str, version: usize, interface_name: &str) -> HalNames {
+ let mut len: usize = 0;
+ let packages = CString::new(package).expect("Failed to make CString from package.");
+ let interface_name =
+ CString::new(interface_name).expect("Failed to make CString from interface_name.");
+ // Safety: We'll wrap this in HalNames to free the memory it allocates.
+ // It stores the size of the array it returns in len.
+ let raw_strs =
+ unsafe { getAidlInstances(&mut len, packages.as_ptr(), version, interface_name.as_ptr()) };
+ HalNames { data: raw_strs, len }
+}
+
+#[cfg(test)]
+mod tests {
+
+ use super::*;
+
+ #[test]
+ fn test() -> Result<(), Utf8Error> {
+ let result = get_hal_names();
+ let names = result.as_vec()?;
+ assert_ne!(names.len(), 0);
+
+ let result = get_hal_names_and_versions();
+ let names_and_versions = result.as_vec()?;
+ assert_ne!(names_and_versions.len(), 0);
+
+ assert!(names_and_versions.len() >= names.len());
+
+ Ok(())
+ }
+}
diff --git a/keystore2/src/vintf/vintf.cpp b/keystore2/src/vintf/vintf.cpp
new file mode 100644
index 0000000..e407efa
--- /dev/null
+++ b/keystore2/src/vintf/vintf.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vintf.hpp"
+
+#include <vintf/HalManifest.h>
+#include <vintf/VintfObject.h>
+
+// Converts a set<string> into a C-style array of C strings.
+static char** convert(const std::set<std::string>& names) {
+ char** ret = new char*[names.size()];
+ char** ptr = ret;
+ for (const auto& name : names) {
+ *(ptr++) = strdup(name.c_str());
+ }
+ return ret;
+}
+
+char** getHalNames(size_t* len) {
+ auto manifest = android::vintf::VintfObject::GetDeviceHalManifest();
+ const auto names = manifest->getHalNames();
+ *len = names.size();
+ return convert(names);
+}
+
+char** getHalNamesAndVersions(size_t* len) {
+ auto manifest = android::vintf::VintfObject::GetDeviceHalManifest();
+ const auto names = manifest->getHalNamesAndVersions();
+ *len = names.size();
+ return convert(names);
+}
+
+char** getHidlInstances(size_t* len, const char* package, size_t major_version,
+ size_t minor_version, const char* interfaceName) {
+ android::vintf::Version version(major_version, minor_version);
+ auto manifest = android::vintf::VintfObject::GetDeviceHalManifest();
+ const auto names = manifest->getHidlInstances(package, version, interfaceName);
+ *len = names.size();
+ return convert(names);
+}
+
+char** getAidlInstances(size_t* len, const char* package, size_t version,
+ const char* interfaceName) {
+ auto manifest = android::vintf::VintfObject::GetDeviceHalManifest();
+ const auto names = manifest->getAidlInstances(package, version, interfaceName);
+ *len = names.size();
+ return convert(names);
+}
+
+void freeNames(char** names, size_t len) {
+ for (int i = 0; i < len; i++) {
+ free(names[i]);
+ }
+ delete[] names;
+}
diff --git a/keystore2/src/vintf/vintf.hpp b/keystore2/src/vintf/vintf.hpp
new file mode 100644
index 0000000..091e8e8
--- /dev/null
+++ b/keystore2/src/vintf/vintf.hpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VINTF_H__
+#define __VINTF_H__
+
+#include <stddef.h>
+
+extern "C" {
+
+char** getHalNames(size_t* len);
+char** getHalNamesAndVersions(size_t* len);
+char** getHidlInstances(size_t* len, const char* package, size_t major_version,
+ size_t minor_version, const char* interfaceName);
+char** getAidlInstances(size_t* len, const char* package, size_t version,
+ const char* interfaceName);
+void freeNames(char** names, size_t len);
+}
+
+#endif // __VINTF_H__
diff --git a/keystore2/system_property/Android.bp b/keystore2/system_property/Android.bp
new file mode 100644
index 0000000..5a13c90
--- /dev/null
+++ b/keystore2/system_property/Android.bp
@@ -0,0 +1,52 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
+rust_bindgen {
+ name: "libkeystore2_system_property_bindgen",
+ wrapper_src: "system_property_bindgen.hpp",
+ crate_name: "keystore2_system_property_bindgen",
+ source_stem: "bindings",
+
+ bindgen_flags: [
+ "--size_t-is-usize",
+ "--whitelist-function=__system_property_find",
+ "--whitelist-function=__system_property_read_callback",
+ "--whitelist-function=__system_property_wait",
+ ],
+}
+
+rust_library {
+ name: "libkeystore2_system_property-rust",
+ crate_name: "keystore2_system_property",
+ srcs: [
+ "lib.rs",
+ ],
+ rustlibs: [
+ "libanyhow",
+ "libkeystore2_system_property_bindgen",
+ "libthiserror",
+ ],
+ shared_libs: [
+ "libbase",
+ ],
+}
diff --git a/keystore2/system_property/lib.rs b/keystore2/system_property/lib.rs
new file mode 100644
index 0000000..f14cf0e
--- /dev/null
+++ b/keystore2/system_property/lib.rs
@@ -0,0 +1,148 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This crate provides the PropertyWatcher type, which watches for changes
+//! in Android system properties.
+
+use std::os::raw::c_char;
+use std::ptr::null_mut;
+use std::{
+ ffi::{c_void, CStr, CString},
+ str::Utf8Error,
+};
+use thiserror::Error;
+
+/// Errors this crate can generate
+#[derive(Error, Debug)]
+pub enum PropertyWatcherError {
+ /// We can't watch for a property whose name contains a NUL character.
+ #[error("Cannot convert name to C string")]
+ BadNameError(#[from] std::ffi::NulError),
+ /// We can only watch for properties that exist when the watcher is created.
+ #[error("System property is absent")]
+ SystemPropertyAbsent,
+ /// __system_property_wait timed out despite being given no timeout.
+ #[error("Wait failed")]
+ WaitFailed,
+ /// read callback was not called
+ #[error("__system_property_read_callback did not call callback")]
+ ReadCallbackNotCalled,
+ /// read callback gave us a NULL pointer
+ #[error("__system_property_read_callback gave us a NULL pointer instead of a string")]
+ MissingCString,
+ /// read callback gave us a bad C string
+ #[error("__system_property_read_callback gave us a non-UTF8 C string")]
+ BadCString(#[from] Utf8Error),
+ /// read callback returned an error
+ #[error("Callback failed")]
+ CallbackError(#[from] anyhow::Error),
+}
+
+/// Result type specific for this crate.
+pub type Result<T> = std::result::Result<T, PropertyWatcherError>;
+
+/// PropertyWatcher takes the name of an Android system property such
+/// as `keystore.boot_level`; it can report the current value of this
+/// property, or wait for it to change.
+pub struct PropertyWatcher {
+ prop_info: *const keystore2_system_property_bindgen::prop_info,
+ serial: keystore2_system_property_bindgen::__uint32_t,
+}
+
+impl PropertyWatcher {
+ /// Create a PropertyWatcher for the named system property.
+ pub fn new(name: &str) -> Result<Self> {
+ let cstr = CString::new(name)?;
+ // Unsafe FFI call. We generate the CStr in this function
+ // and so ensure it is valid during call.
+ // Returned pointer is valid for the lifetime of the program.
+ let prop_info =
+ unsafe { keystore2_system_property_bindgen::__system_property_find(cstr.as_ptr()) };
+ if prop_info.is_null() {
+ Err(PropertyWatcherError::SystemPropertyAbsent)
+ } else {
+ Ok(Self { prop_info, serial: 0 })
+ }
+ }
+
+ fn read_raw(&self, mut f: impl FnOnce(Option<&CStr>, Option<&CStr>)) {
+ // Unsafe function converts values passed to us by
+ // __system_property_read_callback to Rust form
+ // and pass them to inner callback.
+ unsafe extern "C" fn callback(
+ res_p: *mut c_void,
+ name: *const c_char,
+ value: *const c_char,
+ _: keystore2_system_property_bindgen::__uint32_t,
+ ) {
+ let name = if name.is_null() { None } else { Some(CStr::from_ptr(name)) };
+ let value = if value.is_null() { None } else { Some(CStr::from_ptr(value)) };
+ let f = &mut *res_p.cast::<&mut dyn FnMut(Option<&CStr>, Option<&CStr>)>();
+ f(name, value);
+ }
+
+ let mut f: &mut dyn FnOnce(Option<&CStr>, Option<&CStr>) = &mut f;
+
+ // Unsafe block for FFI call. We convert the FnOnce
+ // to a void pointer, and unwrap it in our callback.
+ unsafe {
+ keystore2_system_property_bindgen::__system_property_read_callback(
+ self.prop_info,
+ Some(callback),
+ &mut f as *mut _ as *mut c_void,
+ )
+ }
+ }
+
+ /// Call the passed function, passing it the name and current value
+ /// of this system property. See documentation for
+ /// `__system_property_read_callback` for details.
+ pub fn read<T, F>(&self, f: F) -> Result<T>
+ where
+ F: FnOnce(&str, &str) -> anyhow::Result<T>,
+ {
+ let mut result = Err(PropertyWatcherError::ReadCallbackNotCalled);
+ self.read_raw(|name, value| {
+ // use a wrapping closure as an erzatz try block.
+ result = (|| {
+ let name = name.ok_or(PropertyWatcherError::MissingCString)?.to_str()?;
+ let value = value.ok_or(PropertyWatcherError::MissingCString)?.to_str()?;
+ f(name, value).map_err(PropertyWatcherError::CallbackError)
+ })()
+ });
+ result
+ }
+
+ /// Wait for the system property to change. This
+ /// records the serial number of the last change, so
+ /// race conditions are avoided.
+ pub fn wait(&mut self) -> Result<()> {
+ let mut new_serial = self.serial;
+ // Unsafe block to call __system_property_wait.
+ // All arguments are private to PropertyWatcher so we
+ // can be confident they are valid.
+ if !unsafe {
+ keystore2_system_property_bindgen::__system_property_wait(
+ self.prop_info,
+ self.serial,
+ &mut new_serial,
+ null_mut(),
+ )
+ } {
+ return Err(PropertyWatcherError::WaitFailed);
+ }
+ self.serial = new_serial;
+ Ok(())
+ }
+}
diff --git a/keystore2/system_property/system_property_bindgen.hpp b/keystore2/system_property/system_property_bindgen.hpp
new file mode 100644
index 0000000..e3c1ade
--- /dev/null
+++ b/keystore2/system_property/system_property_bindgen.hpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include "sys/system_properties.h"
diff --git a/keystore2/vpnprofilestore/Android.bp b/keystore2/vpnprofilestore/Android.bp
new file mode 100644
index 0000000..7ddf0d6
--- /dev/null
+++ b/keystore2/vpnprofilestore/Android.bp
@@ -0,0 +1,57 @@
+// Copyright 2021, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
+rust_library {
+ name: "libvpnprofilestore-rust",
+ crate_name: "vpnprofilestore",
+ srcs: [
+ "lib.rs",
+ ],
+ rustlibs: [
+ "android.security.vpnprofilestore-rust",
+ "libanyhow",
+ "libbinder_rs",
+ "libkeystore2",
+ "liblog_rust",
+ "librusqlite",
+ "libthiserror",
+ ],
+}
+
+rust_test {
+ name: "vpnprofilestore_test",
+ crate_name: "vpnprofilestore",
+ srcs: ["lib.rs"],
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ rustlibs: [
+ "android.security.vpnprofilestore-rust",
+ "libanyhow",
+ "libbinder_rs",
+ "libkeystore2",
+ "libkeystore2_test_utils",
+ "liblog_rust",
+ "librusqlite",
+ "libthiserror",
+ ],
+}
diff --git a/keystore2/vpnprofilestore/lib.rs b/keystore2/vpnprofilestore/lib.rs
new file mode 100644
index 0000000..f92eacd
--- /dev/null
+++ b/keystore2/vpnprofilestore/lib.rs
@@ -0,0 +1,443 @@
+// Copyright 2020, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Implements the android.security.vpnprofilestore interface.
+
+use android_security_vpnprofilestore::aidl::android::security::vpnprofilestore::{
+ IVpnProfileStore::BnVpnProfileStore, IVpnProfileStore::IVpnProfileStore,
+ IVpnProfileStore::ERROR_PROFILE_NOT_FOUND, IVpnProfileStore::ERROR_SYSTEM_ERROR,
+};
+use android_security_vpnprofilestore::binder::{Result as BinderResult, Status as BinderStatus};
+use anyhow::{Context, Result};
+use binder::{ExceptionCode, Strong, ThreadState};
+use keystore2::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
+use rusqlite::{
+ params, Connection, OptionalExtension, Transaction, TransactionBehavior, NO_PARAMS,
+};
+use std::{
+ collections::HashSet,
+ path::{Path, PathBuf},
+};
+
+struct DB {
+ conn: Connection,
+}
+
+impl DB {
+ fn new(db_file: &Path) -> Result<Self> {
+ let mut db = Self {
+ conn: Connection::open(db_file).context("Failed to initialize SQLite connection.")?,
+ };
+ db.init_tables().context("Trying to initialize vpnstore db.")?;
+ Ok(db)
+ }
+
+ fn with_transaction<T, F>(&mut self, behavior: TransactionBehavior, f: F) -> Result<T>
+ where
+ F: Fn(&Transaction) -> Result<T>,
+ {
+ loop {
+ match self
+ .conn
+ .transaction_with_behavior(behavior)
+ .context("In with_transaction.")
+ .and_then(|tx| f(&tx).map(|result| (result, tx)))
+ .and_then(|(result, tx)| {
+ tx.commit().context("In with_transaction: Failed to commit transaction.")?;
+ Ok(result)
+ }) {
+ Ok(result) => break Ok(result),
+ Err(e) => {
+ if Self::is_locked_error(&e) {
+ std::thread::sleep(std::time::Duration::from_micros(500));
+ continue;
+ } else {
+ return Err(e).context("In with_transaction.");
+ }
+ }
+ }
+ }
+ }
+
+ fn is_locked_error(e: &anyhow::Error) -> bool {
+ matches!(e.root_cause().downcast_ref::<rusqlite::ffi::Error>(),
+ Some(rusqlite::ffi::Error {
+ code: rusqlite::ErrorCode::DatabaseBusy,
+ ..
+ })
+ | Some(rusqlite::ffi::Error {
+ code: rusqlite::ErrorCode::DatabaseLocked,
+ ..
+ }))
+ }
+
+ fn init_tables(&mut self) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
+ "CREATE TABLE IF NOT EXISTS profiles (
+ owner INTEGER,
+ alias BLOB,
+ profile BLOB,
+ UNIQUE(owner, alias));",
+ NO_PARAMS,
+ )
+ .context("Failed to initialize \"profiles\" table.")?;
+ Ok(())
+ })
+ }
+
+ fn list(&mut self, caller_uid: u32) -> Result<Vec<String>> {
+ self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ let mut stmt = tx
+ .prepare("SELECT alias FROM profiles WHERE owner = ? ORDER BY alias ASC;")
+ .context("In list: Failed to prepare statement.")?;
+
+ let aliases = stmt
+ .query_map(params![caller_uid], |row| row.get(0))?
+ .collect::<rusqlite::Result<Vec<String>>>()
+ .context("In list: query_map failed.");
+ aliases
+ })
+ }
+
+ fn put(&mut self, caller_uid: u32, alias: &str, profile: &[u8]) -> Result<()> {
+ self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
+ "INSERT OR REPLACE INTO profiles (owner, alias, profile) values (?, ?, ?)",
+ params![caller_uid, alias, profile,],
+ )
+ .context("In put: Failed to insert or replace.")?;
+ Ok(())
+ })
+ }
+
+ fn get(&mut self, caller_uid: u32, alias: &str) -> Result<Option<Vec<u8>>> {
+ self.with_transaction(TransactionBehavior::Deferred, |tx| {
+ tx.query_row(
+ "SELECT profile FROM profiles WHERE owner = ? AND alias = ?;",
+ params![caller_uid, alias],
+ |row| row.get(0),
+ )
+ .optional()
+ .context("In get: failed loading profile.")
+ })
+ }
+
+ fn remove(&mut self, caller_uid: u32, alias: &str) -> Result<bool> {
+ let removed = self.with_transaction(TransactionBehavior::Immediate, |tx| {
+ tx.execute(
+ "DELETE FROM profiles WHERE owner = ? AND alias = ?;",
+ params![caller_uid, alias],
+ )
+ .context("In remove: Failed to delete row.")
+ })?;
+ Ok(removed == 1)
+ }
+}
+
+/// This is the main VpnProfileStore error type, it wraps binder exceptions and the
+/// VnpStore errors.
+#[derive(Debug, thiserror::Error, PartialEq)]
+pub enum Error {
+ /// Wraps a VpnProfileStore error code.
+ #[error("Error::Error({0:?})")]
+ Error(i32),
+ /// Wraps a Binder exception code other than a service specific exception.
+ #[error("Binder exception code {0:?}, {1:?}")]
+ Binder(ExceptionCode, i32),
+}
+
+impl Error {
+ /// Short hand for `Error::Error(ERROR_SYSTEM_ERROR)`
+ pub fn sys() -> Self {
+ Error::Error(ERROR_SYSTEM_ERROR)
+ }
+
+ /// Short hand for `Error::Error(ERROR_PROFILE_NOT_FOUND)`
+ pub fn not_found() -> Self {
+ Error::Error(ERROR_PROFILE_NOT_FOUND)
+ }
+}
+
+/// This function should be used by vpnprofilestore service calls to translate error conditions
+/// into service specific exceptions.
+///
+/// All error conditions get logged by this function.
+///
+/// `Error::Error(x)` variants get mapped onto a service specific error code of `x`.
+///
+/// All non `Error` error conditions get mapped onto `ERROR_SYSTEM_ERROR`.
+///
+/// `handle_ok` will be called if `result` is `Ok(value)` where `value` will be passed
+/// as argument to `handle_ok`. `handle_ok` must generate a `BinderResult<T>`, but it
+/// typically returns Ok(value).
+fn map_or_log_err<T, U, F>(result: Result<U>, handle_ok: F) -> BinderResult<T>
+where
+ F: FnOnce(U) -> BinderResult<T>,
+{
+ result.map_or_else(
+ |e| {
+ log::error!("{:#?}", e);
+ let root_cause = e.root_cause();
+ let rc = match root_cause.downcast_ref::<Error>() {
+ Some(Error::Error(e)) => *e,
+ Some(Error::Binder(_, _)) | None => ERROR_SYSTEM_ERROR,
+ };
+ Err(BinderStatus::new_service_specific_error(rc, None))
+ },
+ handle_ok,
+ )
+}
+
+/// Implements IVpnProfileStore AIDL interface.
+pub struct VpnProfileStore {
+ db_path: PathBuf,
+ async_task: AsyncTask,
+}
+
+struct AsyncState {
+ recently_imported: HashSet<(u32, String)>,
+ legacy_loader: LegacyBlobLoader,
+ db_path: PathBuf,
+}
+
+impl VpnProfileStore {
+ /// Creates a new VpnProfileStore instance.
+ pub fn new_native_binder(path: &Path) -> Strong<dyn IVpnProfileStore> {
+ let mut db_path = path.to_path_buf();
+ db_path.push("vpnprofilestore.sqlite");
+
+ let result = Self { db_path, async_task: Default::default() };
+ result.init_shelf(path);
+ BnVpnProfileStore::new_binder(result)
+ }
+
+ fn open_db(&self) -> Result<DB> {
+ DB::new(&self.db_path).context("In open_db: Failed to open db.")
+ }
+
+ fn get(&self, alias: &str) -> Result<Vec<u8>> {
+ let mut db = self.open_db().context("In get.")?;
+ let calling_uid = ThreadState::get_calling_uid();
+
+ if let Some(profile) =
+ db.get(calling_uid, alias).context("In get: Trying to load profile from DB.")?
+ {
+ return Ok(profile);
+ }
+ if self.get_legacy(calling_uid, alias).context("In get: Trying to migrate legacy blob.")? {
+ // If we were able to migrate a legacy blob try again.
+ if let Some(profile) =
+ db.get(calling_uid, alias).context("In get: Trying to load profile from DB.")?
+ {
+ return Ok(profile);
+ }
+ }
+ Err(Error::not_found()).context("In get: No such profile.")
+ }
+
+ fn put(&self, alias: &str, profile: &[u8]) -> Result<()> {
+ let calling_uid = ThreadState::get_calling_uid();
+ // In order to make sure that we don't have stale legacy profiles, make sure they are
+ // migrated before replacing them.
+ let _ = self.get_legacy(calling_uid, alias);
+ let mut db = self.open_db().context("In put.")?;
+ db.put(calling_uid, alias, profile).context("In put: Trying to insert profile into DB.")
+ }
+
+ fn remove(&self, alias: &str) -> Result<()> {
+ let calling_uid = ThreadState::get_calling_uid();
+ let mut db = self.open_db().context("In remove.")?;
+ // In order to make sure that we don't have stale legacy profiles, make sure they are
+ // migrated before removing them.
+ let _ = self.get_legacy(calling_uid, alias);
+ let removed = db
+ .remove(calling_uid, alias)
+ .context("In remove: Trying to remove profile from DB.")?;
+ if removed {
+ Ok(())
+ } else {
+ Err(Error::not_found()).context("In remove: No such profile.")
+ }
+ }
+
+ fn list(&self, prefix: &str) -> Result<Vec<String>> {
+ let mut db = self.open_db().context("In list.")?;
+ let calling_uid = ThreadState::get_calling_uid();
+ let mut result = self.list_legacy(calling_uid).context("In list.")?;
+ result
+ .append(&mut db.list(calling_uid).context("In list: Trying to get list of profiles.")?);
+ result = result.into_iter().filter(|s| s.starts_with(prefix)).collect();
+ result.sort_unstable();
+ result.dedup();
+ Ok(result)
+ }
+
+ fn init_shelf(&self, path: &Path) {
+ let mut db_path = path.to_path_buf();
+ self.async_task.queue_hi(move |shelf| {
+ let legacy_loader = LegacyBlobLoader::new(&db_path);
+ db_path.push("vpnprofilestore.sqlite");
+
+ shelf.put(AsyncState { legacy_loader, db_path, recently_imported: Default::default() });
+ })
+ }
+
+ fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Result<T>
+ where
+ F: FnOnce(&mut AsyncState) -> Result<T> + Send + 'static,
+ {
+ let (sender, receiver) = std::sync::mpsc::channel::<Result<T>>();
+ self.async_task.queue_hi(move |shelf| {
+ let state = shelf.get_downcast_mut::<AsyncState>().expect("Failed to get shelf.");
+ sender.send(f(state)).expect("Failed to send result.");
+ });
+ receiver.recv().context("In do_serialized: Failed to receive result.")?
+ }
+
+ fn list_legacy(&self, uid: u32) -> Result<Vec<String>> {
+ self.do_serialized(move |state| {
+ state
+ .legacy_loader
+ .list_vpn_profiles(uid)
+ .context("Trying to list legacy vnp profiles.")
+ })
+ .context("In list_legacy.")
+ }
+
+ fn get_legacy(&self, uid: u32, alias: &str) -> Result<bool> {
+ let alias = alias.to_string();
+ self.do_serialized(move |state| {
+ if state.recently_imported.contains(&(uid, alias.clone())) {
+ return Ok(true);
+ }
+ let mut db = DB::new(&state.db_path).context("In open_db: Failed to open db.")?;
+ let migrated =
+ Self::migrate_one_legacy_profile(uid, &alias, &state.legacy_loader, &mut db)
+ .context("Trying to migrate legacy vpn profile.")?;
+ if migrated {
+ state.recently_imported.insert((uid, alias));
+ }
+ Ok(migrated)
+ })
+ .context("In get_legacy.")
+ }
+
+ fn migrate_one_legacy_profile(
+ uid: u32,
+ alias: &str,
+ legacy_loader: &LegacyBlobLoader,
+ db: &mut DB,
+ ) -> Result<bool> {
+ let blob = legacy_loader
+ .read_vpn_profile(uid, alias)
+ .context("In migrate_one_legacy_profile: Trying to read legacy vpn profile.")?;
+ if let Some(profile) = blob {
+ db.put(uid, alias, &profile)
+ .context("In migrate_one_legacy_profile: Trying to insert profile into DB.")?;
+ legacy_loader
+ .remove_vpn_profile(uid, alias)
+ .context("In migrate_one_legacy_profile: Trying to delete legacy profile.")?;
+ Ok(true)
+ } else {
+ Ok(false)
+ }
+ }
+}
+
+impl binder::Interface for VpnProfileStore {}
+
+impl IVpnProfileStore for VpnProfileStore {
+ fn get(&self, alias: &str) -> BinderResult<Vec<u8>> {
+ map_or_log_err(self.get(alias), Ok)
+ }
+ fn put(&self, alias: &str, profile: &[u8]) -> BinderResult<()> {
+ map_or_log_err(self.put(alias, profile), Ok)
+ }
+ fn remove(&self, alias: &str) -> BinderResult<()> {
+ map_or_log_err(self.remove(alias), Ok)
+ }
+ fn list(&self, prefix: &str) -> BinderResult<Vec<String>> {
+ map_or_log_err(self.list(prefix), Ok)
+ }
+}
+
+#[cfg(test)]
+mod db_test {
+ use super::*;
+ use keystore2_test_utils::TempDir;
+
+ static TEST_BLOB1: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 0];
+ static TEST_BLOB2: &[u8] = &[2, 2, 3, 4, 5, 6, 7, 8, 9, 0];
+ static TEST_BLOB3: &[u8] = &[3, 2, 3, 4, 5, 6, 7, 8, 9, 0];
+ static TEST_BLOB4: &[u8] = &[3, 2, 3, 4, 5, 6, 7, 8, 9, 0];
+
+ #[test]
+ fn test_profile_db() {
+ let test_dir = TempDir::new("profiledb_test_").expect("Failed to create temp dir.");
+ let mut db =
+ DB::new(&test_dir.build().push("vpnprofile.sqlite")).expect("Failed to open database.");
+
+ // Insert three profiles for owner 2.
+ db.put(2, "test1", TEST_BLOB1).expect("Failed to insert test1.");
+ db.put(2, "test2", TEST_BLOB2).expect("Failed to insert test2.");
+ db.put(2, "test3", TEST_BLOB3).expect("Failed to insert test3.");
+
+ // Check list returns all inserted aliases.
+ assert_eq!(
+ vec!["test1".to_string(), "test2".to_string(), "test3".to_string(),],
+ db.list(2).expect("Failed to list profiles.")
+ );
+
+ // There should be no profiles for owner 1.
+ assert_eq!(Vec::<String>::new(), db.list(1).expect("Failed to list profiles."));
+
+ // Check the content of the three entries.
+ assert_eq!(
+ Some(TEST_BLOB1),
+ db.get(2, "test1").expect("Failed to get profile.").as_deref()
+ );
+ assert_eq!(
+ Some(TEST_BLOB2),
+ db.get(2, "test2").expect("Failed to get profile.").as_deref()
+ );
+ assert_eq!(
+ Some(TEST_BLOB3),
+ db.get(2, "test3").expect("Failed to get profile.").as_deref()
+ );
+
+ // Remove test2 and check and check that it is no longer retrievable.
+ assert!(db.remove(2, "test2").expect("Failed to remove profile."));
+ assert!(db.get(2, "test2").expect("Failed to get profile.").is_none());
+
+ // test2 should now no longer be in the list.
+ assert_eq!(
+ vec!["test1".to_string(), "test3".to_string(),],
+ db.list(2).expect("Failed to list profiles.")
+ );
+
+ // Put on existing alias replaces it.
+ // Verify test1 is TEST_BLOB1.
+ assert_eq!(
+ Some(TEST_BLOB1),
+ db.get(2, "test1").expect("Failed to get profile.").as_deref()
+ );
+ db.put(2, "test1", TEST_BLOB4).expect("Failed to replace test1.");
+ // Verify test1 is TEST_BLOB4.
+ assert_eq!(
+ Some(TEST_BLOB4),
+ db.get(2, "test1").expect("Failed to get profile.").as_deref()
+ );
+ }
+}
diff --git a/ondevice-signing/Android.bp b/ondevice-signing/Android.bp
index 8da28f2..2e5e02e 100644
--- a/ondevice-signing/Android.bp
+++ b/ondevice-signing/Android.bp
@@ -14,10 +14,18 @@
// List of clang-tidy checks that are reported as errors.
// Please keep this list ordered lexicographically.
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
tidy_errors = [
"cert-err34-c",
"google-default-arguments",
- "google-explicit-constructor",
"google-runtime-int",
"google-runtime-member-string-references",
"misc-move-const-arg",
@@ -62,7 +70,7 @@
tidy_checks: tidy_errors,
tidy_checks_as_errors: tidy_errors,
tidy_flags: [
- "-format-style='file'",
+ "-format-style=file",
],
}
@@ -78,17 +86,24 @@
"CertUtils.cpp",
"Keymaster.cpp",
"KeymasterSigningKey.cpp",
+ "KeystoreKey.cpp",
"VerityUtils.cpp",
],
+ header_libs: ["odrefresh_headers"],
+
static_libs: [
"libmini_keyctl_static", // TODO need static?
"libc++fs",
+ "lib_odsign_proto",
],
shared_libs: [
"android.hardware.keymaster@4.1",
+ "android.system.keystore2-V1-cpp",
+ "android.hardware.security.keymint-V1-cpp",
"libbase",
+ "libbinder",
"libcrypto",
"libcrypto_utils",
"libfsverity",
@@ -97,6 +112,7 @@
"libkeymaster4support", // For authorization_set
"libkeymaster4_1support",
"libkeyutils",
+ "libprotobuf-cpp-full",
"libutils",
],
}
diff --git a/ondevice-signing/CertUtils.cpp b/ondevice-signing/CertUtils.cpp
index 6b24391..b0b75a6 100644
--- a/ondevice-signing/CertUtils.cpp
+++ b/ondevice-signing/CertUtils.cpp
@@ -25,6 +25,9 @@
#include <fcntl.h>
#include <vector>
+
+#include "KeyConstants.h"
+
const char kBasicConstraints[] = "CA:TRUE";
const char kKeyUsage[] = "critical,keyCertSign,cRLSign,digitalSignature";
const char kSubjectKeyIdentifier[] = "hash";
@@ -52,6 +55,33 @@
return true;
}
+Result<bssl::UniquePtr<RSA>> getRsa(const std::vector<uint8_t>& publicKey) {
+ bssl::UniquePtr<RSA> rsaPubkey(RSA_new());
+ rsaPubkey->n = BN_new();
+ rsaPubkey->e = BN_new();
+
+ BN_bin2bn(publicKey.data(), publicKey.size(), rsaPubkey->n);
+ BN_set_word(rsaPubkey->e, kRsaKeyExponent);
+
+ return rsaPubkey;
+}
+
+Result<void> verifySignature(const std::string& message, const std::string& signature,
+ const std::vector<uint8_t>& publicKey) {
+ auto rsaKey = getRsa(publicKey);
+ uint8_t hashBuf[SHA256_DIGEST_LENGTH];
+ SHA256(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(message.c_str())),
+ message.length(), hashBuf);
+
+ bool success = RSA_verify(NID_sha256, hashBuf, sizeof(hashBuf),
+ (const uint8_t*)signature.c_str(), signature.length(), rsaKey->get());
+
+ if (!success) {
+ return Error() << "Failed to verify signature.";
+ }
+ return {};
+}
+
Result<void> createSelfSignedCertificate(
const std::vector<uint8_t>& publicKey,
const std::function<Result<std::string>(const std::string&)>& signFunction,
@@ -66,8 +96,13 @@
X509_gmtime_adj(X509_get_notBefore(x509.get()), 0);
X509_gmtime_adj(X509_get_notAfter(x509.get()), kCertLifetimeSeconds);
- auto pubKeyData = publicKey.data();
- EVP_PKEY* public_key = d2i_PUBKEY(nullptr, &pubKeyData, publicKey.size());
+ // "publicKey" corresponds to the raw public key bytes - need to create
+ // a new RSA key with the correct exponent.
+ auto rsaPubkey = getRsa(publicKey);
+
+ EVP_PKEY* public_key = EVP_PKEY_new();
+ EVP_PKEY_assign_RSA(public_key, rsaPubkey->release());
+
if (!X509_set_pubkey(x509.get(), public_key)) {
return Error() << "Unable to set x509 public key";
}
@@ -112,11 +147,14 @@
x509->signature->flags &= ~(ASN1_STRING_FLAG_BITS_LEFT | 0x07);
x509->signature->flags |= ASN1_STRING_FLAG_BITS_LEFT;
- auto f = fopen(path.c_str(), "wb");
- // TODO error checking
+ auto f = fopen(path.c_str(), "wbe");
+ if (f == nullptr) {
+ return Error() << "Failed to open " << path;
+ }
i2d_X509_fp(f, x509.get());
fclose(f);
+ EVP_PKEY_free(public_key);
return {};
}
@@ -142,17 +180,33 @@
return pubKey;
}
-Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::vector<uint8_t>& keyData) {
+Result<std::vector<uint8_t>>
+extractPublicKeyFromSubjectPublicKeyInfo(const std::vector<uint8_t>& keyData) {
auto keyDataBytes = keyData.data();
EVP_PKEY* public_key = d2i_PUBKEY(nullptr, &keyDataBytes, keyData.size());
return extractPublicKey(public_key);
}
+Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::vector<uint8_t>& keyData) {
+ auto keyDataBytes = keyData.data();
+ bssl::UniquePtr<X509> decoded_cert(d2i_X509(nullptr, &keyDataBytes, keyData.size()));
+ if (decoded_cert.get() == nullptr) {
+ return Error() << "Failed to decode X509 certificate.";
+ }
+ bssl::UniquePtr<EVP_PKEY> decoded_pkey(X509_get_pubkey(decoded_cert.get()));
+
+ return extractPublicKey(decoded_pkey.get());
+}
+
Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::string& path) {
X509* cert;
- auto f = fopen(path.c_str(), "r");
+ auto f = fopen(path.c_str(), "re");
+ if (f == nullptr) {
+ return Error() << "Failed to open " << path;
+ }
if (!d2i_X509_fp(f, &cert)) {
+ fclose(f);
return Error() << "Unable to decode x509 cert at " << path;
}
diff --git a/ondevice-signing/CertUtils.h b/ondevice-signing/CertUtils.h
index d9172d0..66dff04 100644
--- a/ondevice-signing/CertUtils.h
+++ b/ondevice-signing/CertUtils.h
@@ -25,5 +25,11 @@
android::base::Result<std::vector<uint8_t>> createPkcs7(const std::vector<uint8_t>& signedData);
android::base::Result<std::vector<uint8_t>>
-extractPublicKeyFromX509(const std::vector<uint8_t>& path);
+extractPublicKeyFromX509(const std::vector<uint8_t>& x509);
+android::base::Result<std::vector<uint8_t>>
+extractPublicKeyFromSubjectPublicKeyInfo(const std::vector<uint8_t>& subjectKeyInfo);
android::base::Result<std::vector<uint8_t>> extractPublicKeyFromX509(const std::string& path);
+
+android::base::Result<void> verifySignature(const std::string& message,
+ const std::string& signature,
+ const std::vector<uint8_t>& publicKey);
diff --git a/ondevice-signing/KeyConstants.h b/ondevice-signing/KeyConstants.h
new file mode 100644
index 0000000..9e1a513
--- /dev/null
+++ b/ondevice-signing/KeyConstants.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+static constexpr int kRsaKeySize = 2048;
+static constexpr int kRsaKeyExponent = 65537;
diff --git a/ondevice-signing/Keymaster.cpp b/ondevice-signing/Keymaster.cpp
index d43828a..6cfb565 100644
--- a/ondevice-signing/Keymaster.cpp
+++ b/ondevice-signing/Keymaster.cpp
@@ -66,9 +66,12 @@
}
}
+ if (devToUse == nullptr) {
+ LOG(WARNING) << "Didn't find a keymaster to use.";
+ }
mDevice = devToUse;
- return true;
+ return mDevice != nullptr;
}
std::optional<Keymaster> Keymaster::getInstance() {
diff --git a/ondevice-signing/KeymasterSigningKey.cpp b/ondevice-signing/KeymasterSigningKey.cpp
index f35f92b..dc3ef8a 100644
--- a/ondevice-signing/KeymasterSigningKey.cpp
+++ b/ondevice-signing/KeymasterSigningKey.cpp
@@ -33,45 +33,21 @@
using android::base::Result;
using android::base::unique_fd;
+const std::string kSigningKeyBlob = "/data/misc/odsign/key.blob";
+
KeymasterSigningKey::KeymasterSigningKey() {}
-Result<KeymasterSigningKey> KeymasterSigningKey::loadFromBlobAndVerify(const std::string& path) {
- KeymasterSigningKey signingKey;
+Result<std::unique_ptr<KeymasterSigningKey>>
+KeymasterSigningKey::loadFromBlobAndVerify(const std::string& path) {
+ auto signingKey = std::make_unique<KeymasterSigningKey>();
- auto status = signingKey.initializeFromKeyblob(path);
+ auto status = signingKey->initializeFromKeyblob(path);
if (!status.ok()) {
return status.error();
}
- return std::move(signingKey);
-}
-
-Result<KeymasterSigningKey> KeymasterSigningKey::createNewKey() {
- KeymasterSigningKey signingKey;
-
- auto status = signingKey.createSigningKey();
-
- if (!status.ok()) {
- return status.error();
- }
-
- return std::move(signingKey);
-}
-
-Result<void> KeymasterSigningKey::createSigningKey() {
- KeymasterSigningKey signingKey;
- mKeymaster = Keymaster::getInstance();
-
- auto keyBlob = mKeymaster->createKey();
-
- if (!keyBlob.ok()) {
- return keyBlob.error();
- }
-
- mVerifiedKeyBlob.assign(keyBlob->begin(), keyBlob->end());
-
- return {};
+ return signingKey;
}
Result<void> KeymasterSigningKey::saveKeyblob(const std::string& path) const {
@@ -89,31 +65,73 @@
}
}
-Result<std::vector<uint8_t>> KeymasterSigningKey::getPublicKey() const {
- auto publicKeyX509 = mKeymaster->extractPublicKey(mVerifiedKeyBlob);
- if (!publicKeyX509.ok()) {
- return publicKeyX509.error();
+Result<void> KeymasterSigningKey::createSigningKey() {
+ KeymasterSigningKey signingKey;
+ auto keymaster = Keymaster::getInstance();
+ if (!keymaster.has_value()) {
+ return Error() << "Failed to initialize keymaster.";
}
- return extractPublicKeyFromX509(publicKeyX509.value());
+ mKeymaster = keymaster;
+
+ auto keyBlob = mKeymaster->createKey();
+
+ if (!keyBlob.ok()) {
+ return keyBlob.error();
+ }
+
+ mVerifiedKeyBlob.assign(keyBlob->begin(), keyBlob->end());
+
+ return {};
}
-Result<void> KeymasterSigningKey::createX509Cert(const std::string& outPath) const {
- auto publicKey = mKeymaster->extractPublicKey(mVerifiedKeyBlob);
+Result<std::unique_ptr<KeymasterSigningKey>> KeymasterSigningKey::createAndPersistNewKey() {
+ auto signingKey = std::make_unique<KeymasterSigningKey>();
+ auto status = signingKey->createSigningKey();
+
+ if (!status.ok()) {
+ return status.error();
+ }
+
+ status = signingKey->saveKeyblob(kSigningKeyBlob);
+ if (!status.ok()) {
+ return status.error();
+ }
+
+ return signingKey;
+}
+
+Result<SigningKey*> KeymasterSigningKey::getInstance() {
+ auto key = loadFromBlobAndVerify(kSigningKeyBlob);
+
+ if (!key.ok()) {
+ key = createAndPersistNewKey();
+ if (!key.ok()) {
+ return key.error();
+ }
+ }
+
+ return key->release();
+}
+
+Result<std::vector<uint8_t>> KeymasterSigningKey::getPublicKey() const {
+ auto publicKey = mKeymaster->extractPublicKey(mVerifiedKeyBlob);
if (!publicKey.ok()) {
return publicKey.error();
}
- auto keymasterSignFunction = [&](const std::string& to_be_signed) {
- return this->sign(to_be_signed);
- };
- createSelfSignedCertificate(*publicKey, keymasterSignFunction, outPath);
- return {};
+ // Keymaster returns the public key not in a full X509 cert, but just the
+ // "SubjectPublicKeyInfo"
+ return extractPublicKeyFromSubjectPublicKeyInfo(publicKey.value());
}
Result<void> KeymasterSigningKey::initializeFromKeyblob(const std::string& path) {
- mKeymaster = Keymaster::getInstance();
std::string keyBlobData;
+ auto keymaster = Keymaster::getInstance();
+ if (!keymaster.has_value()) {
+ return Error() << "Failed to initialize keymaster.";
+ }
+ mKeymaster = keymaster;
bool result = ReadFileToString(path, &keyBlobData);
if (!result) {
diff --git a/ondevice-signing/KeymasterSigningKey.h b/ondevice-signing/KeymasterSigningKey.h
index 7631059..e66781f 100644
--- a/ondevice-signing/KeymasterSigningKey.h
+++ b/ondevice-signing/KeymasterSigningKey.h
@@ -23,30 +23,36 @@
#include <utils/StrongPointer.h>
#include "Keymaster.h"
+#include "SigningKey.h"
-class KeymasterSigningKey {
+class KeymasterSigningKey : public SigningKey {
using KmDevice = ::android::hardware::keymaster::V4_1::IKeymasterDevice;
public:
+ friend std::unique_ptr<KeymasterSigningKey> std::make_unique<KeymasterSigningKey>();
+ virtual ~KeymasterSigningKey(){};
+
// Allow the key to be moved around
KeymasterSigningKey& operator=(KeymasterSigningKey&& other) = default;
KeymasterSigningKey(KeymasterSigningKey&& other) = default;
- static android::base::Result<KeymasterSigningKey>
- loadFromBlobAndVerify(const std::string& path);
- static android::base::Result<KeymasterSigningKey> createNewKey();
+ static android::base::Result<SigningKey*> getInstance();
- /* Sign a message with an initialized signing key */
- android::base::Result<std::string> sign(const std::string& message) const;
- android::base::Result<void> saveKeyblob(const std::string& path) const;
- android::base::Result<std::vector<uint8_t>> getPublicKey() const;
- android::base::Result<void> createX509Cert(const std::string& path) const;
+ virtual android::base::Result<std::string> sign(const std::string& message) const;
+ virtual android::base::Result<std::vector<uint8_t>> getPublicKey() const;
private:
KeymasterSigningKey();
+ static android::base::Result<std::unique_ptr<KeymasterSigningKey>> createAndPersistNewKey();
+ static android::base::Result<std::unique_ptr<KeymasterSigningKey>>
+ loadFromBlobAndVerify(const std::string& path);
+
android::base::Result<void> createSigningKey();
android::base::Result<void> initializeFromKeyblob(const std::string& path);
+ android::base::Result<void> saveKeyblob(const std::string& path) const;
+
+ static android::base::Result<KeymasterSigningKey> createNewKey();
std::optional<Keymaster> mKeymaster;
std::vector<uint8_t> mVerifiedKeyBlob;
diff --git a/ondevice-signing/KeystoreKey.cpp b/ondevice-signing/KeystoreKey.cpp
new file mode 100644
index 0000000..840b683
--- /dev/null
+++ b/ondevice-signing/KeystoreKey.cpp
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <binder/IServiceManager.h>
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "CertUtils.h"
+#include "KeyConstants.h"
+#include "KeystoreKey.h"
+
+using android::defaultServiceManager;
+using android::IServiceManager;
+using android::sp;
+using android::String16;
+
+using android::hardware::security::keymint::Algorithm;
+using android::hardware::security::keymint::Digest;
+using android::hardware::security::keymint::KeyParameter;
+using android::hardware::security::keymint::KeyParameterValue;
+using android::hardware::security::keymint::KeyPurpose;
+using android::hardware::security::keymint::PaddingMode;
+using android::hardware::security::keymint::SecurityLevel;
+using android::hardware::security::keymint::Tag;
+
+using android::system::keystore2::CreateOperationResponse;
+using android::system::keystore2::Domain;
+using android::system::keystore2::KeyDescriptor;
+using android::system::keystore2::KeyEntryResponse;
+using android::system::keystore2::KeyMetadata;
+
+using android::base::Error;
+using android::base::Result;
+
+using android::base::unique_fd;
+
+// Keystore boot level that the odsign key uses
+static const int kOdsignBootLevel = 30;
+
+static KeyDescriptor getKeyDescriptor() {
+ // AIDL parcelable objects don't have constructor
+ static KeyDescriptor descriptor;
+ static std::once_flag flag;
+ std::call_once(flag, [&]() {
+ descriptor.domain = Domain::SELINUX;
+ descriptor.alias = String16("ondevice-signing");
+ descriptor.nspace = 101; // odsign_key
+ });
+
+ return descriptor;
+}
+
+KeystoreKey::KeystoreKey() {}
+
+Result<KeyMetadata> KeystoreKey::createNewKey(const KeyDescriptor& descriptor) {
+ std::vector<KeyParameter> params;
+
+ KeyParameter algo;
+ algo.tag = Tag::ALGORITHM;
+ algo.value = KeyParameterValue::make<KeyParameterValue::algorithm>(Algorithm::RSA);
+ params.push_back(algo);
+
+ KeyParameter key_size;
+ key_size.tag = Tag::KEY_SIZE;
+ key_size.value = KeyParameterValue::make<KeyParameterValue::integer>(kRsaKeySize);
+ params.push_back(key_size);
+
+ KeyParameter digest;
+ digest.tag = Tag::DIGEST;
+ digest.value = KeyParameterValue::make<KeyParameterValue::digest>(Digest::SHA_2_256);
+ params.push_back(digest);
+
+ KeyParameter padding;
+ padding.tag = Tag::PADDING;
+ padding.value =
+ KeyParameterValue::make<KeyParameterValue::paddingMode>(PaddingMode::RSA_PKCS1_1_5_SIGN);
+ params.push_back(padding);
+
+ KeyParameter exponent;
+ exponent.tag = Tag::RSA_PUBLIC_EXPONENT;
+ exponent.value = KeyParameterValue::make<KeyParameterValue::longInteger>(kRsaKeyExponent);
+ params.push_back(exponent);
+
+ KeyParameter purpose;
+ purpose.tag = Tag::PURPOSE;
+ purpose.value = KeyParameterValue::make<KeyParameterValue::keyPurpose>(KeyPurpose::SIGN);
+ params.push_back(purpose);
+
+ KeyParameter auth;
+ auth.tag = Tag::NO_AUTH_REQUIRED;
+ auth.value = KeyParameterValue::make<KeyParameterValue::boolValue>(true);
+ params.push_back(auth);
+
+ KeyParameter boot_level;
+ boot_level.tag = Tag::MAX_BOOT_LEVEL;
+ boot_level.value = KeyParameterValue::make<KeyParameterValue::integer>(kOdsignBootLevel);
+ params.push_back(boot_level);
+
+ KeyMetadata metadata;
+ auto status = mSecurityLevel->generateKey(descriptor, {}, params, 0, {}, &metadata);
+ if (!status.isOk()) {
+ return Error() << "Failed to create new key";
+ }
+
+ return metadata;
+}
+
+bool KeystoreKey::initialize() {
+ sp<IServiceManager> sm = defaultServiceManager();
+ if (sm == nullptr) {
+ return false;
+ }
+ auto service = sm->getService(String16("android.system.keystore2.IKeystoreService/default"));
+ if (service == nullptr) {
+ return false;
+ }
+ mService = interface_cast<android::system::keystore2::IKeystoreService>(service);
+ if (mService == nullptr) {
+ return false;
+ }
+
+ auto status = mService->getSecurityLevel(SecurityLevel::STRONGBOX, &mSecurityLevel);
+ if (!status.isOk()) {
+ status = mService->getSecurityLevel(SecurityLevel::TRUSTED_ENVIRONMENT, &mSecurityLevel);
+ if (!status.isOk()) {
+ return false;
+ }
+ }
+
+ auto descriptor = getKeyDescriptor();
+ // See if we can fetch an existing key
+ KeyEntryResponse keyEntryResponse;
+ LOG(INFO) << "Trying to retrieve existing keystore key...";
+ status = mService->getKeyEntry(descriptor, &keyEntryResponse);
+ if (!status.isOk()) {
+ LOG(INFO) << "Existing keystore key not found, creating new key";
+ auto newKeyStatus = createNewKey(descriptor);
+ if (!newKeyStatus.ok()) {
+ LOG(ERROR) << "Failed to create new key";
+ return false;
+ }
+ mKeyMetadata = *newKeyStatus;
+ } else {
+ mKeyMetadata = keyEntryResponse.metadata;
+ }
+
+ LOG(ERROR) << "Initialized Keystore key.";
+ return true;
+}
+
+Result<SigningKey*> KeystoreKey::getInstance() {
+ static KeystoreKey keystoreKey;
+
+ if (!keystoreKey.initialize()) {
+ return Error() << "Failed to initialize keystore key.";
+ } else {
+ return &keystoreKey;
+ }
+}
+
+static std::vector<KeyParameter> getSignOpParameters() {
+ std::vector<KeyParameter> opParameters;
+
+ KeyParameter algo;
+ algo.tag = Tag::ALGORITHM;
+ algo.value = KeyParameterValue::make<KeyParameterValue::algorithm>(Algorithm::RSA);
+ opParameters.push_back(algo);
+
+ KeyParameter digest;
+ digest.tag = Tag::DIGEST;
+ digest.value = KeyParameterValue::make<KeyParameterValue::digest>(Digest::SHA_2_256);
+ opParameters.push_back(digest);
+
+ KeyParameter padding;
+ padding.tag = Tag::PADDING;
+ padding.value =
+ KeyParameterValue::make<KeyParameterValue::paddingMode>(PaddingMode::RSA_PKCS1_1_5_SIGN);
+ opParameters.push_back(padding);
+
+ KeyParameter purpose;
+ purpose.tag = Tag::PURPOSE;
+ purpose.value = KeyParameterValue::make<KeyParameterValue::keyPurpose>(KeyPurpose::SIGN);
+ opParameters.push_back(purpose);
+
+ return opParameters;
+}
+
+Result<std::string> KeystoreKey::sign(const std::string& message) const {
+ static auto opParameters = getSignOpParameters();
+
+ CreateOperationResponse opResponse;
+
+ auto status =
+ mSecurityLevel->createOperation(getKeyDescriptor(), opParameters, false, &opResponse);
+ if (!status.isOk()) {
+ return Error() << "Failed to create keystore signing operation: "
+ << status.serviceSpecificErrorCode();
+ }
+ auto operation = opResponse.iOperation;
+
+ std::optional<std::vector<uint8_t>> out;
+ status = operation->update({message.begin(), message.end()}, &out);
+ if (!status.isOk()) {
+ return Error() << "Failed to call keystore update operation.";
+ }
+
+ std::optional<std::vector<uint8_t>> signature;
+ status = operation->finish({}, {}, &signature);
+ if (!status.isOk()) {
+ return Error() << "Failed to call keystore finish operation.";
+ }
+
+ if (!signature.has_value()) {
+ return Error() << "Didn't receive a signature from keystore finish operation.";
+ }
+
+ std::string result{signature.value().begin(), signature.value().end()};
+
+ return result;
+}
+
+Result<std::vector<uint8_t>> KeystoreKey::getPublicKey() const {
+ return extractPublicKeyFromX509(mKeyMetadata.certificate.value());
+}
diff --git a/ondevice-signing/KeystoreKey.h b/ondevice-signing/KeystoreKey.h
new file mode 100644
index 0000000..6b9cb57
--- /dev/null
+++ b/ondevice-signing/KeystoreKey.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <optional>
+
+#include <android-base/macros.h>
+#include <android-base/result.h>
+#include <android-base/unique_fd.h>
+
+#include <utils/StrongPointer.h>
+
+#include <android/system/keystore2/IKeystoreService.h>
+
+#include "SigningKey.h"
+
+class KeystoreKey : public SigningKey {
+ using IKeystoreService = ::android::system::keystore2::IKeystoreService;
+ using IKeystoreSecurityLevel = ::android::system::keystore2::IKeystoreSecurityLevel;
+ using KeyDescriptor = ::android::system::keystore2::KeyDescriptor;
+ using KeyMetadata = ::android::system::keystore2::KeyMetadata;
+
+ public:
+ virtual ~KeystoreKey(){};
+ static android::base::Result<SigningKey*> getInstance();
+
+ virtual android::base::Result<std::string> sign(const std::string& message) const;
+ virtual android::base::Result<std::vector<uint8_t>> getPublicKey() const;
+
+ private:
+ KeystoreKey();
+ bool initialize();
+ android::base::Result<KeyMetadata> createNewKey(const KeyDescriptor& descriptor);
+
+ android::sp<IKeystoreService> mService;
+ android::sp<IKeystoreSecurityLevel> mSecurityLevel;
+ KeyMetadata mKeyMetadata;
+};
diff --git a/ondevice-signing/SigningKey.h b/ondevice-signing/SigningKey.h
new file mode 100644
index 0000000..89294fc
--- /dev/null
+++ b/ondevice-signing/SigningKey.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/macros.h>
+#include <android-base/result.h>
+
+class SigningKey {
+ public:
+ virtual ~SigningKey(){};
+ /* Sign a message with an initialized signing key */
+ virtual android::base::Result<std::string> sign(const std::string& message) const = 0;
+ /* Retrieve the associated public key */
+ virtual android::base::Result<std::vector<uint8_t>> getPublicKey() const = 0;
+};
diff --git a/ondevice-signing/VerityUtils.cpp b/ondevice-signing/VerityUtils.cpp
index 579d3d8..cab92e2 100644
--- a/ondevice-signing/VerityUtils.cpp
+++ b/ondevice-signing/VerityUtils.cpp
@@ -15,12 +15,15 @@
*/
#include <filesystem>
+#include <map>
+#include <span>
#include <string>
#include <fcntl.h>
#include <linux/fs.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <sys/wait.h>
#include <android-base/logging.h>
#include <android-base/unique_fd.h>
@@ -28,13 +31,17 @@
#include <linux/fsverity.h>
#include "CertUtils.h"
-#include "KeymasterSigningKey.h"
+#include "SigningKey.h"
+
+#define FS_VERITY_MAX_DIGEST_SIZE 64
using android::base::ErrnoError;
using android::base::Error;
using android::base::Result;
using android::base::unique_fd;
+static const char* kFsVerityInitPath = "/system/bin/fsverity_init";
+
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define cpu_to_le16(v) ((__force __le16)(uint16_t)(v))
#define le16_to_cpu(v) ((__force uint16_t)(__le16)(v))
@@ -50,17 +57,31 @@
__u8 digest[];
};
+static std::string toHex(std::span<uint8_t> data) {
+ std::stringstream ss;
+ for (auto it = data.begin(); it != data.end(); ++it) {
+ ss << std::setfill('0') << std::setw(2) << std::hex << static_cast<unsigned>(*it);
+ }
+ return ss.str();
+}
+
static int read_callback(void* file, void* buf, size_t count) {
int* fd = (int*)file;
if (TEMP_FAILURE_RETRY(read(*fd, buf, count)) < 0) return errno ? -errno : -EIO;
return 0;
}
-static Result<std::vector<uint8_t>> createDigest(const std::string& path) {
+Result<std::vector<uint8_t>> createDigest(const std::string& path) {
struct stat filestat;
unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_CLOEXEC)));
+ if (fd < 0) {
+ return ErrnoError() << "Failed to open " << path;
+ }
- stat(path.c_str(), &filestat);
+ int ret = stat(path.c_str(), &filestat);
+ if (ret < 0) {
+ return ErrnoError() << "Failed to stat " << path;
+ }
struct libfsverity_merkle_tree_params params = {
.version = 1,
.hash_algorithm = FS_VERITY_HASH_ALG_SHA256,
@@ -69,23 +90,45 @@
};
struct libfsverity_digest* digest;
- libfsverity_compute_digest(&fd, &read_callback, ¶ms, &digest);
-
- return std::vector<uint8_t>(&digest->digest[0], &digest->digest[32]);
+ ret = libfsverity_compute_digest(&fd, &read_callback, ¶ms, &digest);
+ if (ret < 0) {
+ return ErrnoError() << "Failed to compute fs-verity digest for " << path;
+ }
+ std::vector<uint8_t> digestVector(&digest->digest[0], &digest->digest[32]);
+ free(digest);
+ return digestVector;
}
-static Result<std::vector<uint8_t>> signDigest(const KeymasterSigningKey& key,
+namespace {
+template <typename T> struct DeleteAsPODArray {
+ void operator()(T* x) {
+ if (x) {
+ x->~T();
+ delete[](uint8_t*) x;
+ }
+ }
+};
+} // namespace
+
+template <typename T> using trailing_unique_ptr = std::unique_ptr<T, DeleteAsPODArray<T>>;
+
+template <typename T>
+static trailing_unique_ptr<T> makeUniqueWithTrailingData(size_t trailing_data_size) {
+ uint8_t* memory = new uint8_t[sizeof(T*) + trailing_data_size];
+ T* ptr = new (memory) T;
+ return trailing_unique_ptr<T>{ptr};
+}
+
+static Result<std::vector<uint8_t>> signDigest(const SigningKey& key,
const std::vector<uint8_t>& digest) {
- struct fsverity_signed_digest* d = NULL;
- size_t signed_digest_size = sizeof(*d) + digest.size();
- d = (struct fsverity_signed_digest*)malloc(signed_digest_size);
+ auto d = makeUniqueWithTrailingData<fsverity_signed_digest>(digest.size());
memcpy(d->magic, "FSVerity", 8);
d->digest_algorithm = cpu_to_le16(FS_VERITY_HASH_ALG_SHA256);
d->digest_size = cpu_to_le16(digest.size());
memcpy(d->digest, digest.data(), digest.size());
- auto signed_digest = key.sign(std::string((char*)d, signed_digest_size));
+ auto signed_digest = key.sign(std::string((char*)d.get(), sizeof(*d) + digest.size()));
if (!signed_digest.ok()) {
return signed_digest.error();
}
@@ -93,7 +136,7 @@
return std::vector<uint8_t>(signed_digest->begin(), signed_digest->end());
}
-Result<void> enableFsVerity(const std::string& path, const KeymasterSigningKey& key) {
+Result<std::string> enableFsVerity(const std::string& path, const SigningKey& key) {
auto digest = createDigest(path);
if (!digest.ok()) {
return digest.error();
@@ -120,10 +163,13 @@
return ErrnoError() << "Failed to call FS_IOC_ENABLE_VERITY on " << path;
}
- return {};
+ // Return the root hash as a hex string
+ return toHex(digest.value());
}
-Result<void> addFilesToVerityRecursive(const std::string& path, const KeymasterSigningKey& key) {
+Result<std::map<std::string, std::string>> addFilesToVerityRecursive(const std::string& path,
+ const SigningKey& key) {
+ std::map<std::string, std::string> digests;
std::error_code ec;
auto it = std::filesystem::recursive_directory_iterator(path, ec);
@@ -136,14 +182,18 @@
if (!result.ok()) {
return result.error();
}
+ digests[it->path()] = *result;
}
++it;
}
+ if (ec) {
+ return Error() << "Failed to iterate " << path << ": " << ec;
+ }
- return {};
+ return digests;
}
-Result<bool> isFileInVerity(const std::string& path) {
+Result<std::string> isFileInVerity(const std::string& path) {
unsigned int flags;
unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_CLOEXEC)));
@@ -155,11 +205,21 @@
if (ret < 0) {
return ErrnoError() << "Failed to FS_IOC_GETFLAGS for " << path;
}
+ if (!(flags & FS_VERITY_FL)) {
+ return Error() << "File is not in fs-verity: " << path;
+ }
- return (flags & FS_VERITY_FL);
+ auto d = makeUniqueWithTrailingData<fsverity_digest>(FS_VERITY_MAX_DIGEST_SIZE);
+ d->digest_size = FS_VERITY_MAX_DIGEST_SIZE;
+ ret = ioctl(fd, FS_IOC_MEASURE_VERITY, d.get());
+ if (ret < 0) {
+ return ErrnoError() << "Failed to FS_IOC_MEASURE_VERITY for " << path;
+ }
+ return toHex({&d->digest[0], &d->digest[d->digest_size]});
}
-Result<void> verifyAllFilesInVerity(const std::string& path) {
+Result<std::map<std::string, std::string>> verifyAllFilesInVerity(const std::string& path) {
+ std::map<std::string, std::string> digests;
std::error_code ec;
auto it = std::filesystem::recursive_directory_iterator(path, ec);
@@ -172,12 +232,50 @@
if (!result.ok()) {
return result.error();
}
- if (!*result) {
- return Error() << "File " << it->path() << " not in fs-verity";
- }
+ digests[it->path()] = *result;
} // TODO reject other types besides dirs?
++it;
}
+ if (ec) {
+ return Error() << "Failed to iterate " << path << ": " << ec;
+ }
+
+ return digests;
+}
+
+Result<void> addCertToFsVerityKeyring(const std::string& path) {
+ const char* const argv[] = {kFsVerityInitPath, "--load-extra-key", "fsv_ods"};
+
+ int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
+ pid_t pid = fork();
+ if (pid == 0) {
+ dup2(fd, STDIN_FILENO);
+ close(fd);
+ int argc = arraysize(argv);
+ char* argv_child[argc + 1];
+ memcpy(argv_child, argv, argc * sizeof(char*));
+ argv_child[argc] = nullptr;
+ execvp(argv_child[0], const_cast<char**>(argv_child));
+ PLOG(ERROR) << "exec in ForkExecvp";
+ _exit(EXIT_FAILURE);
+ } else {
+ close(fd);
+ }
+ if (pid == -1) {
+ return ErrnoError() << "Failed to fork.";
+ }
+ int status;
+ if (waitpid(pid, &status, 0) == -1) {
+ return ErrnoError() << "waitpid() failed.";
+ }
+ if (!WIFEXITED(status)) {
+ return Error() << kFsVerityInitPath << ": abnormal process exit";
+ }
+ if (WEXITSTATUS(status)) {
+ if (status != 0) {
+ return Error() << kFsVerityInitPath << " exited with " << status;
+ }
+ }
return {};
}
diff --git a/ondevice-signing/VerityUtils.h b/ondevice-signing/VerityUtils.h
index 1eca5a6..84af319 100644
--- a/ondevice-signing/VerityUtils.h
+++ b/ondevice-signing/VerityUtils.h
@@ -18,8 +18,11 @@
#include <android-base/result.h>
-#include "KeymasterSigningKey.h"
+#include "SigningKey.h"
-android::base::Result<void> verifyAllFilesInVerity(const std::string& path);
-android::base::Result<void> addFilesToVerityRecursive(const std::string& path,
- const KeymasterSigningKey& key);
+android::base::Result<void> addCertToFsVerityKeyring(const std::string& path);
+android::base::Result<std::vector<uint8_t>> createDigest(const std::string& path);
+android::base::Result<std::map<std::string, std::string>>
+verifyAllFilesInVerity(const std::string& path);
+android::base::Result<std::map<std::string, std::string>>
+addFilesToVerityRecursive(const std::string& path, const SigningKey& key);
diff --git a/ondevice-signing/odsign_main.cpp b/ondevice-signing/odsign_main.cpp
index efe7d35..6cab8b6 100644
--- a/ondevice-signing/odsign_main.cpp
+++ b/ondevice-signing/odsign_main.cpp
@@ -16,82 +16,57 @@
#include <fcntl.h>
#include <filesystem>
+#include <fstream>
#include <iomanip>
#include <iostream>
+#include <iterator>
#include <sys/stat.h>
#include <sys/types.h>
-#include <sys/wait.h>
#include <unistd.h>
#include <android-base/file.h>
#include <android-base/logging.h>
+#include <android-base/properties.h>
#include <android-base/scopeguard.h>
#include <logwrap/logwrap.h>
+#include <odrefresh/odrefresh.h>
#include "CertUtils.h"
#include "KeymasterSigningKey.h"
+#include "KeystoreKey.h"
#include "VerityUtils.h"
+#include "odsign_info.pb.h"
+
using android::base::ErrnoError;
using android::base::Error;
using android::base::Result;
+using android::base::SetProperty;
+
+using OdsignInfo = ::odsign::proto::OdsignInfo;
const std::string kSigningKeyBlob = "/data/misc/odsign/key.blob";
const std::string kSigningKeyCert = "/data/misc/odsign/key.cert";
+const std::string kOdsignInfo = "/data/misc/odsign/odsign.info";
+const std::string kOdsignInfoSignature = "/data/misc/odsign/odsign.info.signature";
-const std::string kArtArtifactsDir = "/data/misc/apexdata/com.android.art/system";
+const std::string kArtArtifactsDir = "/data/misc/apexdata/com.android.art/dalvik-cache";
static const char* kOdrefreshPath = "/apex/com.android.art/bin/odrefresh";
-static const char* kFsVerityInitPath = "/system/bin/fsverity_init";
+static const char* kFsVerityProcPath = "/proc/sys/fs/verity";
static const bool kForceCompilation = false;
+static const bool kUseKeystore = true;
-Result<void> addCertToFsVerityKeyring(const std::string& path) {
- const char* const argv[] = {kFsVerityInitPath, "--load-extra-key", "fsv_ods"};
+static const char* kOdsignVerificationDoneProp = "odsign.verification.done";
+static const char* kOdsignKeyDoneProp = "odsign.key.done";
- int fd = open(path.c_str(), O_RDONLY);
- pid_t pid = fork();
- if (pid == 0) {
- dup2(fd, STDIN_FILENO);
- close(fd);
- int argc = arraysize(argv);
- char* argv_child[argc + 1];
- memcpy(argv_child, argv, argc * sizeof(char*));
- argv_child[argc] = nullptr;
- execvp(argv_child[0], const_cast<char**>(argv_child));
- PLOG(ERROR) << "exec in ForkExecvp";
- _exit(EXIT_FAILURE);
- } else {
- close(fd);
- }
- if (pid == -1) {
- return ErrnoError() << "Failed to fork.";
- }
- int status;
- if (waitpid(pid, &status, 0) == -1) {
- return ErrnoError() << "waitpid() failed.";
- }
- if (!WIFEXITED(status)) {
- return Error() << kFsVerityInitPath << ": abnormal process exit";
- }
- if (WEXITSTATUS(status)) {
- if (status != 0) {
- return Error() << kFsVerityInitPath << " exited with " << status;
- }
- }
+static const char* kOdsignVerificationStatusProp = "odsign.verification.success";
+static const char* kOdsignVerificationStatusValid = "1";
+static const char* kOdsignVerificationStatusError = "0";
- return {};
-}
-
-Result<KeymasterSigningKey> loadAndVerifyExistingKey() {
- if (access(kSigningKeyBlob.c_str(), F_OK) < 0) {
- return ErrnoError() << "Key blob not found: " << kSigningKeyBlob;
- }
- return KeymasterSigningKey::loadFromBlobAndVerify(kSigningKeyBlob);
-}
-
-Result<void> verifyAndLoadExistingCert(const KeymasterSigningKey& key) {
+Result<void> verifyExistingCert(const SigningKey& key) {
if (access(kSigningKeyCert.c_str(), F_OK) < 0) {
return ErrnoError() << "Key certificate not found: " << kSigningKeyCert;
}
@@ -109,119 +84,323 @@
<< " does not match signing public key.";
}
- auto cert_add_result = addCertToFsVerityKeyring(kSigningKeyCert);
- if (!cert_add_result.ok()) {
- return cert_add_result.error();
- }
-
// At this point, we know the cert matches
return {};
}
-Result<KeymasterSigningKey> createAndPersistKey(const std::string& path) {
- auto key = KeymasterSigningKey::createNewKey();
+Result<void> createX509Cert(const SigningKey& key, const std::string& outPath) {
+ auto publicKey = key.getPublicKey();
- if (!key.ok()) {
- return key.error();
+ if (!publicKey.ok()) {
+ return publicKey.error();
}
- auto result = key->saveKeyblob(path);
+ auto keymasterSignFunction = [&](const std::string& to_be_signed) {
+ return key.sign(to_be_signed);
+ };
+ createSelfSignedCertificate(*publicKey, keymasterSignFunction, outPath);
+ return {};
+}
+
+art::odrefresh::ExitCode compileArtifacts(bool force) {
+ const char* const argv[] = {kOdrefreshPath, force ? "--force-compile" : "--compile"};
+ const int exit_code =
+ logwrap_fork_execvp(arraysize(argv), argv, nullptr, false, LOG_ALOG, false, nullptr);
+ return static_cast<art::odrefresh::ExitCode>(exit_code);
+}
+
+static std::string toHex(const std::vector<uint8_t>& digest) {
+ std::stringstream ss;
+ for (auto it = digest.begin(); it != digest.end(); ++it) {
+ ss << std::setfill('0') << std::setw(2) << std::hex << static_cast<unsigned>(*it);
+ }
+ return ss.str();
+}
+
+Result<std::map<std::string, std::string>> computeDigests(const std::string& path) {
+ std::error_code ec;
+ std::map<std::string, std::string> digests;
+
+ auto it = std::filesystem::recursive_directory_iterator(path, ec);
+ auto end = std::filesystem::recursive_directory_iterator();
+
+ while (!ec && it != end) {
+ if (it->is_regular_file()) {
+ auto digest = createDigest(it->path());
+ if (!digest.ok()) {
+ return Error() << "Failed to compute digest for " << it->path();
+ }
+ digests[it->path()] = toHex(*digest);
+ }
+ ++it;
+ }
+ if (ec) {
+ return Error() << "Failed to iterate " << path << ": " << ec;
+ }
+
+ return digests;
+}
+
+Result<void> verifyDigests(const std::map<std::string, std::string>& digests,
+ const std::map<std::string, std::string>& trusted_digests) {
+ for (const auto& path_digest : digests) {
+ auto path = path_digest.first;
+ auto digest = path_digest.second;
+ if ((trusted_digests.count(path) == 0)) {
+ return Error() << "Couldn't find digest for " << path;
+ }
+ if (trusted_digests.at(path) != digest) {
+ return Error() << "Digest mismatch for " << path;
+ }
+ }
+
+ // All digests matched!
+ if (digests.size() > 0) {
+ LOG(INFO) << "All root hashes match.";
+ }
+ return {};
+}
+
+Result<void> verifyIntegrityFsVerity(const std::map<std::string, std::string>& trusted_digests) {
+ // Just verify that the files are in verity, and get their digests
+ auto result = verifyAllFilesInVerity(kArtArtifactsDir);
if (!result.ok()) {
return result.error();
}
- return key;
+ return verifyDigests(*result, trusted_digests);
}
-bool compileArtifacts(bool force) {
- const char* const argv[] = {kOdrefreshPath, force ? "--force-compile" : "--compile"};
+Result<void> verifyIntegrityNoFsVerity(const std::map<std::string, std::string>& trusted_digests) {
+ // On these devices, just compute the digests, and verify they match the ones we trust
+ auto result = computeDigests(kArtArtifactsDir);
+ if (!result.ok()) {
+ return result.error();
+ }
- return logwrap_fork_execvp(arraysize(argv), argv, nullptr, false, LOG_ALOG, false, nullptr) ==
- 0;
+ return verifyDigests(*result, trusted_digests);
}
-bool validateArtifacts() {
- const char* const argv[] = {kOdrefreshPath, "--check"};
+Result<OdsignInfo> getOdsignInfo(const SigningKey& key) {
+ std::string persistedSignature;
+ OdsignInfo odsignInfo;
- return logwrap_fork_execvp(arraysize(argv), argv, nullptr, false, LOG_ALOG, false, nullptr) ==
- 0;
+ if (!android::base::ReadFileToString(kOdsignInfoSignature, &persistedSignature)) {
+ return ErrnoError() << "Failed to read " << kOdsignInfoSignature;
+ }
+
+ std::fstream odsign_info(kOdsignInfo, std::ios::in | std::ios::binary);
+ if (!odsign_info) {
+ return Error() << "Failed to open " << kOdsignInfo;
+ }
+ odsign_info.seekg(0);
+ // Verify the hash
+ std::string odsign_info_str((std::istreambuf_iterator<char>(odsign_info)),
+ std::istreambuf_iterator<char>());
+
+ auto publicKey = key.getPublicKey();
+ auto signResult = verifySignature(odsign_info_str, persistedSignature, *publicKey);
+ if (!signResult.ok()) {
+ return Error() << kOdsignInfoSignature << " does not match.";
+ } else {
+ LOG(INFO) << kOdsignInfoSignature << " matches.";
+ }
+
+ odsign_info.seekg(0);
+ if (!odsignInfo.ParseFromIstream(&odsign_info)) {
+ return Error() << "Failed to parse " << kOdsignInfo;
+ }
+
+ LOG(INFO) << "Loaded " << kOdsignInfo;
+ return odsignInfo;
+}
+
+Result<void> persistDigests(const std::map<std::string, std::string>& digests,
+ const SigningKey& key) {
+ OdsignInfo signInfo;
+ google::protobuf::Map<std::string, std::string> proto_hashes(digests.begin(), digests.end());
+ auto map = signInfo.mutable_file_hashes();
+ *map = proto_hashes;
+
+ std::fstream odsign_info(kOdsignInfo,
+ std::ios::in | std::ios::out | std::ios::trunc | std::ios::binary);
+ if (!signInfo.SerializeToOstream(&odsign_info)) {
+ return Error() << "Failed to persist root hashes in " << kOdsignInfo;
+ }
+
+ // Sign the signatures with our key itself, and write that to storage
+ odsign_info.seekg(0, std::ios::beg);
+ std::string odsign_info_str((std::istreambuf_iterator<char>(odsign_info)),
+ std::istreambuf_iterator<char>());
+ auto signResult = key.sign(odsign_info_str);
+ if (!signResult.ok()) {
+ return Error() << "Failed to sign " << kOdsignInfo;
+ }
+ android::base::WriteStringToFile(*signResult, kOdsignInfoSignature);
+ return {};
+}
+
+static int removeArtifacts() {
+ std::error_code ec;
+ auto num_removed = std::filesystem::remove_all(kArtArtifactsDir, ec);
+ if (ec) {
+ LOG(ERROR) << "Can't remove " << kArtArtifactsDir << ": " << ec.message();
+ return 0;
+ } else {
+ if (num_removed > 0) {
+ LOG(INFO) << "Removed " << num_removed << " entries from " << kArtArtifactsDir;
+ }
+ return num_removed;
+ }
+}
+
+static Result<void> verifyArtifacts(const SigningKey& key, bool supportsFsVerity) {
+ auto signInfo = getOdsignInfo(key);
+ // Tell init we're done with the key; this is a boot time optimization
+ // in particular for the no fs-verity case, where we need to do a
+ // costly verification. If the files haven't been tampered with, which
+ // should be the common path, the verification will succeed, and we won't
+ // need the key anymore. If it turns out the artifacts are invalid (eg not
+ // in fs-verity) or the hash doesn't match, we won't be able to generate
+ // new artifacts without the key, so in those cases, remove the artifacts,
+ // and use JIT zygote for the current boot. We should recover automatically
+ // by the next boot.
+ SetProperty(kOdsignKeyDoneProp, "1");
+ if (!signInfo.ok()) {
+ return Error() << signInfo.error().message();
+ }
+ std::map<std::string, std::string> trusted_digests(signInfo->file_hashes().begin(),
+ signInfo->file_hashes().end());
+ Result<void> integrityStatus;
+
+ if (supportsFsVerity) {
+ integrityStatus = verifyIntegrityFsVerity(trusted_digests);
+ } else {
+ integrityStatus = verifyIntegrityNoFsVerity(trusted_digests);
+ }
+ if (!integrityStatus.ok()) {
+ return Error() << integrityStatus.error().message();
+ }
+
+ return {};
}
int main(int /* argc */, char** /* argv */) {
- auto removeArtifacts = []() {
- std::error_code ec;
- auto num_removed = std::filesystem::remove_all(kArtArtifactsDir, ec);
- if (ec) {
- // TODO can't remove artifacts, signal Zygote shouldn't use them
- LOG(ERROR) << "Can't remove " << kArtArtifactsDir << ": " << ec.message();
- } else {
- LOG(INFO) << "Removed " << num_removed << " entries from " << kArtArtifactsDir;
- }
+ auto errorScopeGuard = []() {
+ // In case we hit any error, remove the artifacts and tell Zygote not to use anything
+ removeArtifacts();
+ // Tell init we don't need to use our key anymore
+ SetProperty(kOdsignKeyDoneProp, "1");
+ // Tell init we're done with verification, and that it was an error
+ SetProperty(kOdsignVerificationDoneProp, "1");
+ SetProperty(kOdsignVerificationStatusProp, kOdsignVerificationStatusError);
};
- // Make sure we delete the artifacts in all early (error) exit paths
- auto scope_guard = android::base::make_scope_guard(removeArtifacts);
+ auto scope_guard = android::base::make_scope_guard(errorScopeGuard);
- auto key = loadAndVerifyExistingKey();
- if (!key.ok()) {
- LOG(WARNING) << key.error().message();
-
- key = createAndPersistKey(kSigningKeyBlob);
- if (!key.ok()) {
- LOG(ERROR) << "Failed to create or persist new key: " << key.error().message();
- return -1;
- }
- } else {
- LOG(INFO) << "Found and verified existing key: " << kSigningKeyBlob;
+ if (!android::base::GetBoolProperty("ro.apex.updatable", false)) {
+ LOG(INFO) << "Device doesn't support updatable APEX, exiting.";
+ return 0;
}
- auto existing_cert = verifyAndLoadExistingCert(key.value());
- if (!existing_cert.ok()) {
- LOG(WARNING) << existing_cert.error().message();
-
- // Try to create a new cert
- auto new_cert = key->createX509Cert(kSigningKeyCert);
- if (!new_cert.ok()) {
- LOG(ERROR) << "Failed to create X509 certificate: " << new_cert.error().message();
- // TODO apparently the key become invalid - delete the blob / cert
+ SigningKey* key;
+ if (kUseKeystore) {
+ auto keystoreResult = KeystoreKey::getInstance();
+ if (!keystoreResult.ok()) {
+ LOG(ERROR) << "Could not create keystore key: " << keystoreResult.error().message();
return -1;
}
+ key = keystoreResult.value();
+ } else {
+ // TODO - keymaster will go away
+ auto keymasterResult = KeymasterSigningKey::getInstance();
+ if (!keymasterResult.ok()) {
+ LOG(ERROR) << "Failed to create keymaster key: " << keymasterResult.error().message();
+ return -1;
+ }
+ key = keymasterResult.value();
+ }
+
+ bool supportsFsVerity = access(kFsVerityProcPath, F_OK) == 0;
+ if (!supportsFsVerity) {
+ LOG(INFO) << "Device doesn't support fsverity. Falling back to full verification.";
+ }
+
+ if (supportsFsVerity) {
+ auto existing_cert = verifyExistingCert(*key);
+ if (!existing_cert.ok()) {
+ LOG(WARNING) << existing_cert.error().message();
+
+ // Try to create a new cert
+ auto new_cert = createX509Cert(*key, kSigningKeyCert);
+ if (!new_cert.ok()) {
+ LOG(ERROR) << "Failed to create X509 certificate: " << new_cert.error().message();
+ // TODO apparently the key become invalid - delete the blob / cert
+ return -1;
+ }
+ } else {
+ LOG(INFO) << "Found and verified existing public key certificate: " << kSigningKeyCert;
+ }
auto cert_add_result = addCertToFsVerityKeyring(kSigningKeyCert);
if (!cert_add_result.ok()) {
LOG(ERROR) << "Failed to add certificate to fs-verity keyring: "
<< cert_add_result.error().message();
return -1;
}
- } else {
- LOG(INFO) << "Found and verified existing public key certificate: " << kSigningKeyCert;
}
- auto verityStatus = verifyAllFilesInVerity(kArtArtifactsDir);
- if (!verityStatus.ok()) {
- LOG(WARNING) << verityStatus.error().message() << ", removing " << kArtArtifactsDir;
- removeArtifacts();
- }
-
- bool artifactsValid = validateArtifacts();
-
- if (!artifactsValid || kForceCompilation) {
- removeArtifacts();
-
- LOG(INFO) << "Starting compilation... ";
- bool ret = compileArtifacts(kForceCompilation);
- LOG(INFO) << "Compilation done, returned " << ret;
-
- verityStatus = addFilesToVerityRecursive(kArtArtifactsDir, key.value());
-
- if (!verityStatus.ok()) {
- LOG(ERROR) << "Failed to add " << verityStatus.error().message();
+ art::odrefresh::ExitCode odrefresh_status = compileArtifacts(kForceCompilation);
+ if (odrefresh_status == art::odrefresh::ExitCode::kOkay) {
+ LOG(INFO) << "odrefresh said artifacts are VALID";
+ // A post-condition of validating artifacts is that if the ones on /system
+ // are used, kArtArtifactsDir is removed. Conversely, if kArtArtifactsDir
+ // exists, those are artifacts that will be used, and we should verify them.
+ int err = access(kArtArtifactsDir.c_str(), F_OK);
+ // If we receive any error other than ENOENT, be suspicious
+ bool artifactsPresent = (err == 0) || (err < 0 && errno != ENOENT);
+ if (artifactsPresent) {
+ auto verificationResult = verifyArtifacts(*key, supportsFsVerity);
+ if (!verificationResult.ok()) {
+ LOG(ERROR) << verificationResult.error().message();
+ return -1;
+ }
+ }
+ } else if (odrefresh_status == art::odrefresh::ExitCode::kCompilationSuccess ||
+ odrefresh_status == art::odrefresh::ExitCode::kCompilationFailed) {
+ const bool compiled_all = odrefresh_status == art::odrefresh::ExitCode::kCompilationSuccess;
+ LOG(INFO) << "odrefresh compiled " << (compiled_all ? "all" : "partial")
+ << " artifacts, returned " << odrefresh_status;
+ Result<std::map<std::string, std::string>> digests;
+ if (supportsFsVerity) {
+ digests = addFilesToVerityRecursive(kArtArtifactsDir, *key);
+ } else {
+ // If we can't use verity, just compute the root hashes and store
+ // those, so we can reverify them at the next boot.
+ digests = computeDigests(kArtArtifactsDir);
+ }
+ if (!digests.ok()) {
+ LOG(ERROR) << digests.error().message();
return -1;
}
+ auto persistStatus = persistDigests(*digests, *key);
+ if (!persistStatus.ok()) {
+ LOG(ERROR) << persistStatus.error().message();
+ return -1;
+ }
+ } else if (odrefresh_status == art::odrefresh::ExitCode::kCleanupFailed) {
+ LOG(ERROR) << "odrefresh failed cleaning up existing artifacts";
+ return -1;
+ } else {
+ LOG(ERROR) << "odrefresh exited unexpectedly, returned " << odrefresh_status;
+ return -1;
}
- // TODO we want to make sure Zygote only picks up the artifacts if we deemed
- // everything was ok here. We could use a sysprop, or some other mechanism?
LOG(INFO) << "On-device signing done.";
scope_guard.Disable();
+ // At this point, we're done with the key for sure
+ SetProperty(kOdsignKeyDoneProp, "1");
+ // And we did a successful verification
+ SetProperty(kOdsignVerificationDoneProp, "1");
+ SetProperty(kOdsignVerificationStatusProp, kOdsignVerificationStatusValid);
return 0;
}
diff --git a/ondevice-signing/proto/Android.bp b/ondevice-signing/proto/Android.bp
new file mode 100644
index 0000000..fd48f31
--- /dev/null
+++ b/ondevice-signing/proto/Android.bp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+cc_library_static {
+ name: "lib_odsign_proto",
+ host_supported: true,
+ proto: {
+ export_proto_headers: true,
+ type: "full",
+ },
+ srcs: ["odsign_info.proto"],
+}
diff --git a/ondevice-signing/proto/odsign_info.proto b/ondevice-signing/proto/odsign_info.proto
new file mode 100644
index 0000000..9d49c6c
--- /dev/null
+++ b/ondevice-signing/proto/odsign_info.proto
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+
+package odsign.proto;
+
+message OdsignInfo {
+ // Map of artifact files to their hashes
+ map<string, string> file_hashes = 1;
+}
diff --git a/provisioner/Android.bp b/provisioner/Android.bp
index c1c8d15..d3f06fe 100644
--- a/provisioner/Android.bp
+++ b/provisioner/Android.bp
@@ -14,6 +14,15 @@
// limitations under the License.
//
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "system_security_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["system_security_license"],
+}
+
aidl_interface {
name: "android.security.provisioner",
unstable: true,